text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Load the Data
Step3: Preprocess the Data
Step5: Normalize the features
Step6: One-Hot Encode the labels
Step7: Keras Sequential Model
Step8: Training a Sequential Model
Step9: Convolutions
Step10: Pooling
Step11: Dropout
Step12: Optimization
Step14: Best Validation Accuracy
|
<ASSISTANT_TASK:>
Python Code:
from urllib.request import urlretrieve
from os.path import isfile
from tqdm import tqdm
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('train.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Train Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/train.p',
'train.p',
pbar.hook)
if not isfile('test.p'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Test Dataset') as pbar:
urlretrieve(
'https://s3.amazonaws.com/udacity-sdc/datasets/german_traffic_sign_benchmark/test.p',
'test.p',
pbar.hook)
print('Training and Test data downloaded.')
import pickle
import numpy as np
import math
# Fix error with TF and Keras
import tensorflow as tf
tf.python.control_flow_ops = tf
print('Modules loaded.')
with open('train.p', 'rb') as f:
data = pickle.load(f)
# TODO: Load the feature data to the variable X_train
X_train = data['features']
# TODO: Load the label data to the variable y_train
y_train = data['labels']
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert np.array_equal(X_train, data['features']), 'X_train not set to data[\'features\'].'
assert np.array_equal(y_train, data['labels']), 'y_train not set to data[\'labels\'].'
print('Tests passed.')
# TODO: Shuffle the data
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert X_train.shape == data['features'].shape, 'X_train has changed shape. The shape shouldn\'t change when shuffling.'
assert y_train.shape == data['labels'].shape, 'y_train has changed shape. The shape shouldn\'t change when shuffling.'
assert not np.array_equal(X_train, data['features']), 'X_train not shuffled.'
assert not np.array_equal(y_train, data['labels']), 'y_train not shuffled.'
print('Tests passed.')
# TODO: Normalize the data features to the variable X_normalized
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [-0.5, 0.5]
:param image_data: The image data to be normalized
:return: Normalized image data
a = -0.5
b = 0.5
grayscale_min = 0
grayscale_max = 255
return a + ( ( (image_data - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
X_normalized = normalize_grayscale(X_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert math.isclose(np.min(X_normalized), -0.5, abs_tol=1e-5) and math.isclose(np.max(X_normalized), 0.5, abs_tol=1e-5), 'The range of the training data is: {} to {}. It must be -0.5 to 0.5'.format(np.min(X_normalized), np.max(X_normalized))
print('Tests passed.')
# TODO: One Hot encode the labels to the variable y_one_hot
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
y_one_hot = lb.fit_transform(y_train)
# STOP: Do not change the tests below. Your implementation should pass these tests.
import collections
assert y_one_hot.shape == (39209, 43), 'y_one_hot is not the correct shape. It\'s {}, it should be (39209, 43)'.format(y_one_hot.shape)
assert next((False for y in y_one_hot if collections.Counter(y) != {0: 42, 1: 1}), True), 'y_one_hot not one-hot encoded.'
print('Tests passed.')
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
model = Sequential()
# TODO: Build a Multi-layer feedforward neural network with Keras here.
# 1st Layer - Add a flatten layer
model.add(Flatten(input_shape=(32, 32, 3)))
# 2nd Layer - Add a fully connected layer
model.add(Dense(128))
# 3rd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 4th Layer - Add a fully connected layer
model.add(Dense(43))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.activations import relu, softmax
def check_layers(layers, true_layers):
assert len(true_layers) != 0, 'No layers found'
for layer_i in range(len(layers)):
assert isinstance(true_layers[layer_i], layers[layer_i]), 'Layer {} is not a {} layer'.format(layer_i+1, layers[layer_i].__name__)
assert len(true_layers) == len(layers), '{} layers found, should be {} layers'.format(len(true_layers), len(layers))
check_layers([Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[1].output_shape == (None, 128), 'Second layer output is wrong, it should be (128)'
assert model.layers[2].activation == relu, 'Third layer not a relu activation layer'
assert model.layers[3].output_shape == (None, 43), 'Fourth layer output is wrong, it should be (43)'
assert model.layers[4].activation == softmax, 'Fifth layer not a softmax activation layer'
print('Tests passed.')
# TODO: Compile and train the model here.
# Configures the learning process and metrics
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
print(model.summary())
# Train the model
# History is a record of training loss and metrics
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=10, validation_split=0.2)
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.optimizers import Adam
assert model.loss == 'categorical_crossentropy', 'Not using categorical_crossentropy loss function'
assert isinstance(model.optimizer, Adam), 'Not using adam optimizer'
assert len(history.history['acc']) == 10, 'You\'re using {} epochs when you need to use 10 epochs.'.format(len(history.history['acc']))
assert history.history['acc'][-1] > 0.92, 'The training accuracy was: %.3f. It shoud be greater than 0.92' % history.history['acc'][-1]
assert history.history['val_acc'][-1] > 0.85, 'The validation accuracy is: %.3f. It shoud be greater than 0.85' % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
model = Sequential()
# 1st Layer - Add a convolution with 32 filters, 3x3 kernel, and valid padding
model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(32, 32, 3)))
# 2nd Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 3rd Layer - Add a flatten layer
model.add(Flatten())
# 4th Layer - Add a fully connected layer
model.add(Dense(128))
# 5th Layer - Add a ReLU activation layer
model.add(Activation('relu'))
# 6th Layer - Add a fully connected layer
model.add(Dense(43))
# 7th Layer - Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
check_layers([Convolution2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[0].input_shape == (None, 32, 32, 3), 'First layer input shape is wrong, it should be (32, 32, 3)'
assert model.layers[0].nb_filter == 32, 'Wrong number of filters, it should be 32'
assert model.layers[0].nb_col == model.layers[0].nb_row == 3, 'Kernel size is wrong, it should be a 3x3'
assert model.layers[0].border_mode == 'valid', 'Wrong padding, it should be valid'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
# Add a convolution with 32 filters, 3x3 kernel, and valid padding
model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(32, 32, 3)))
# Add a max pooling of 2x2
model.add(MaxPooling2D(pool_size=(2, 2)))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a flatten layer
model.add(Flatten())
# Add a fully connected layer
model.add(Dense(128))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a fully connected layer
model.add(Dense(43))
# Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[1].pool_size == (2, 2), 'Second layer must be a max pool layer with pool size of 2x2'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=4, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
# TODO: Re-construct the network and add dropout after the pooling layer.
# TODO: Re-construct the network and add a pooling layer after the convolutional layer.
# TODO: Re-construct the network and add a convolutional layer before the flatten layer.
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
model = Sequential()
# Add a convolution with 32 filters, 3x3 kernel, and valid padding
model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(32, 32, 3)))
# Add a max pooling of 2x2
model.add(MaxPooling2D(pool_size=(2, 2)))
# Add a dropout of 50%
model.add(Dropout(0.5))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a flatten layer
model.add(Flatten())
# Add a fully connected layer
model.add(Dense(128))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a fully connected layer
model.add(Dense(43))
# Add a ReLU activation layer
model.add(Activation('softmax'))
# STOP: Do not change the tests below. Your implementation should pass these tests.
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
check_layers([Convolution2D, MaxPooling2D, Dropout, Activation, Flatten, Dense, Activation, Dense, Activation], model.layers)
assert model.layers[2].p == 0.5, 'Third layer should be a Dropout of 50%'
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=128, nb_epoch=2, validation_split=0.2)
assert(history.history['val_acc'][-1] > 0.91), "The validation accuracy is: %.3f. It should be greater than 0.91" % history.history['val_acc'][-1]
print('Tests passed.')
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.regularizers import l2, activity_l2
model = Sequential()
# Add a convolution with 32 filters, 3x3 kernel, and valid padding
model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(32, 32, 3)))
# Add a max pooling of 2x2
model.add(MaxPooling2D(pool_size=(2, 2)))
# Add a dropout of 50%
model.add(Dropout(0.5))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a convolution with 64 filters, 2x2 kernel, and valid padding
model.add(Convolution2D(64, 2, 2, border_mode='valid'))
# Add a max pooling of 2x2
model.add(MaxPooling2D(pool_size=(2, 2)))
# Add a dropout of 50%
model.add(Dropout(0.5))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a flatten layer
model.add(Flatten())
# Add a fully connected layer
model.add(Dense(128, W_regularizer=l2(0.0001), activity_regularizer=activity_l2(0.0001)))
# Add a ReLU activation layer
model.add(Activation('relu'))
# Add a fully connected layer
model.add(Dense(43, W_regularizer=l2(0.0001), activity_regularizer=activity_l2(0.0001)))
# Add a ReLU activation layer
model.add(Activation('softmax'))
print(model.summary())
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, batch_size=256, nb_epoch=20, validation_split=0.2)
# TODO: Load test data
with open('test.p', 'rb') as f:
data = pickle.load(f)
X_test = data['features']
y_test = data['labels']
# TODO: Preprocess data & one-hot encode the labels
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [-0.5, 0.5]
:param image_data: The image data to be normalized
:return: Normalized image data
a = -0.5
b = 0.5
grayscale_min = 0
grayscale_max = 255
return a + ( ( (image_data - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
X_test_normalized = normalize_grayscale(X_test)
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
y_test_one_hot = lb.fit_transform(y_test)
# TODO: Evaluate model on test data
print(model.metrics_names)
model.evaluate(X_test_normalized, y_test_one_hot, batch_size=256, verbose=1, sample_weight=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'messy-consortium', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step2: We just defined a JulianDay class that takes one argument of type string or binary, and returns a float.
Step 2
Step3: Interlude
Step4: Step 3
Step5: Step 4
Step6: Create and execute a julianday expression
Step7: Because we've defined our operation on StringValue, and not just on StringColumn we get operations on both string scalars and string columns for free
|
<ASSISTANT_TASK:>
Python Code:
import ibis.expr.datatypes as dt
import ibis.expr.rules as rlz
from ibis.expr.operations import ValueOp
class JulianDay(ValueOp):
arg = rlz.string
output_dtype = dt.float32
output_shape = rlz.shape_like('arg')
from ibis.expr.types import BinaryValue, StringValue
def julianday(string_value):
return JulianDay(string_value).to_expr()
StringValue.julianday = julianday
import ibis
t = ibis.table([('string_col', 'string')], name='t')
t.string_col.julianday()
import sqlalchemy as sa
@ibis.sqlite.add_operation(JulianDay)
def _julianday(translator, expr):
# pull out the arguments to the expression
(arg,) = expr.op().args
# compile the argument
compiled_arg = translator.translate(arg)
# return a SQLAlchemy expression that calls into the SQLite julianday function
return sa.func.julianday(compiled_arg)
!curl -LsS -o $TEMPDIR/geography.db 'https://storage.googleapis.com/ibis-tutorial-data/geography.db'
import os
import tempfile
import ibis
db_fname = os.path.join(tempfile.gettempdir(), 'geography.db')
con = ibis.sqlite.connect(db_fname)
independence = con.table('independence')
independence
day = independence.independence_date.cast('string')
day
julianday_expr = day.julianday().name("jday")
julianday_expr
sql_expr = julianday_expr.compile()
print(sql_expr)
result = julianday_expr.execute()
result.head()
scalar = ibis.literal('2010-03-14')
scalar
julianday_scalar = scalar.julianday()
con.execute(julianday_scalar)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If the iterables to be combined are not all known in advance, or need to be evaluated lazily, chain.from_iterable() can be used to construct the chain instead.
Step2: The built-in function zip() returns an iterator that combines the elements of several iterators into tuples.
Step3: zip() stops when the first input iterator is exhausted. To process all of the inputs, even if the iterators produce different numbers of values, use zip_longest().
Step4: The islice() function returns an iterator which returns selected items from the input iterator, by index.
Step5: The tee() function returns several independent iterators (defaults to 2) based on a single original input.
Step6: The new iterators created by tee() share their input, so the original iterator should not be used after the new ones are created.
Step7: Converting Inputs
Step8: The starmap() function is similar to map(), but instead of constructing a tuple from multiple iterators, it splits up the items in a single iterator as arguments to the mapping function using the * syntax.
Step9: Producing new values
Step10: The start and step arguments to count() can be any numerical values that can be added together.
Step11: The cycle() function returns an iterator that repeats the contents of the arguments it is given indefinitely. Since it has to remember the entire contents of the input iterator, it may consume quite a bit of memory if the iterator is long.
Step12: The repeat() function returns an iterator that produces the same value each time it is accessed.
Step13: Filtering
Step14: The opposite of dropwhile() is takewhile(). It returns an iterator that returns items from the input iterator as long as the test function returns true.
Step15: The built-in function filter() returns an iterator that includes only items for which the test function returns true.
Step16: filterfalse() returns an iterator that includes only items where the test function returns false.
Step17: Grouping Data
Step18: Combining Inputs
Step19: It is possible to combine accumulate() with any other function that takes two input values to achieve different results.
Step20: Nested for loops that iterate over multiple sequences can often be replaced with product(), which produces a single iterable whose values are the Cartesian product of the set of input values.
Step21: The values produced by product() are tuples, with the members taken from each of the iterables passed in as arguments in the order they are passed. The first tuple returned includes the first value from each iterable. The last iterable passed to product() is processed first, followed by the next to last, and so on. The result is that the return values are in order based on the first iterable, then the next iterable, etc.
Step22: To limit the values to unique combinations rather than permutations, use combinations(). As long as the members of the input are unique, the output will not include any repeated values.
Step23: While combinations() does not repeat individual input elements, sometimes it is useful to consider combinations that do include repeated elements. For those cases, use combinations_with_replacement().
|
<ASSISTANT_TASK:>
Python Code:
from itertools import *
for i in chain([1, 2, 3], ['a', 'b', 'c']):
print(i, end=' ')
print()
from itertools import *
def make_iterables_to_chain():
yield [1, 2, 3]
yield ['a', 'b', 'c']
for i in chain.from_iterable(make_iterables_to_chain()):
print(i, end=' ')
print()
for i in zip([1, 2, 3], ['a', 'b', 'c']):
print(i)
from itertools import *
r1 = range(3)
r2 = range(2)
print('zip stops early:')
print(list(zip(r1, r2)))
r1 = range(3)
r2 = range(2)
print('\nzip_longest processes all of the values:')
print(list(zip_longest(r1, r2)))
from itertools import *
print('Stop at 5:')
for i in islice(range(100), 5):
print(i, end=' ')
print('\n')
print('Start at 5, Stop at 10:')
for i in islice(range(100), 5, 10):
print(i, end=' ')
print('\n')
print('By tens to 100:')
for i in islice(range(100), 0, 100, 10):
print(i, end=' ')
print('\n')
from itertools import *
r = islice(count(), 5)
i1, i2 = tee(r)
print('i1:', list(i1))
print('i2:', list(i2))
from itertools import *
r = islice(count(), 5)
i1, i2 = tee(r)
print('r:', end=' ')
for i in r:
print(i, end=' ')
if i > 1:
break
print()
print('i1:', list(i1))
print('i2:', list(i2))
def times_two(x):
return 2 * x
def multiply(x, y):
return (x, y, x * y)
print('Doubles:')
for i in map(times_two, range(5)):
print(i)
print('\nMultiples:')
r1 = range(5)
r2 = range(5, 10)
for i in map(multiply, r1, r2):
print('{:d} * {:d} = {:d}'.format(*i))
print('\nStopping:')
r1 = range(5)
r2 = range(2)
for i in map(multiply, r1, r2):
print(i)
from itertools import *
values = [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]
for i in starmap(lambda x, y: (x, y, x * y), values):
print('{} * {} = {}'.format(*i))
from itertools import *
for i in zip(count(1), ['a', 'b', 'c']):
print(i)
import fractions
from itertools import *
start = fractions.Fraction(1, 3)
step = fractions.Fraction(1, 3)
for i in zip(count(start, step), ['a', 'b', 'c']):
print('{}: {}'.format(*i))
from itertools import *
for i in zip(range(7), cycle(['a', 'b', 'c'])):
print(i)
from itertools import *
for i in repeat('over-and-over', 5):
print(i)
from itertools import *
def should_drop(x):
print('Testing:', x)
return x < 1
for i in dropwhile(should_drop, [-1, 0, 1, 2, -2]):
print('Yielding:', i)
from itertools import *
def should_take(x):
# print('Testing:', x)
return x < 2
for i in takewhile(should_take, [-1, 0, 1, 2, -2]):
print('Yielding:', i)
from itertools import *
def check_item(x):
print('Testing:', x)
return x < 1
for i in filter(check_item, [-1, 0, 1, 2, -2]):
print('Yielding:', i)
from itertools import *
every_third = cycle([False, False, True])
data = range(1, 10)
for i in compress(data, every_third):
print(i, end=' ')
print()
import functools
from itertools import *
import operator
import pprint
@functools.total_ordering
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return '({}, {})'.format(self.x, self.y)
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def __gt__(self, other):
return (self.x, self.y) > (other.x, other.y)
# Create a dataset of Point instances
data = list(map(Point,
cycle(islice(count(), 3)),
islice(count(), 7)))
print('Data:')
pprint.pprint(data, width=35)
print()
# Try to group the unsorted data based on X values
print('Grouped, unsorted:')
for k, g in groupby(data, operator.attrgetter('x')):
print(k, list(g))
print()
# Sort the data
data.sort()
print('Sorted:')
pprint.pprint(data, width=35)
print()
# Group the sorted data based on X values
print('Grouped, sorted:')
for k, g in groupby(data, operator.attrgetter('x')):
print(k, list(g))
print()
from itertools import *
print(list(accumulate(range(5))))
print(list(accumulate('abcde')))
from itertools import *
def f(a, b):
print(a, b)
return b + a + b
print(list(accumulate('abcde', f)))
from itertools import *
import pprint
FACE_CARDS = ('J', 'Q', 'K', 'A')
SUITS = ('H', 'D', 'C', 'S')
DECK = list(
product(
chain(range(2, 11), FACE_CARDS),
SUITS,
)
)
for card in DECK:
print('{:>2}{}'.format(*card), end=' ')
if card[1] == SUITS[-1]:
print()
from itertools import *
def show(iterable):
first = None
for i, item in enumerate(iterable, 1):
if first != item[0]:
if first is not None:
print()
first = item[0]
print(''.join(item), end=' ')
print()
print('All permutations:\n')
show(permutations('abcd'))
print('\nPairs:\n')
show(permutations('abcd', r=2))
from itertools import *
def show(iterable):
first = None
for i, item in enumerate(iterable, 1):
if first != item[0]:
if first is not None:
print()
first = item[0]
print(''.join(item), end=' ')
print()
print('Unique pairs:\n')
show(combinations('abcd', r=2))
from itertools import *
def show(iterable):
first = None
for i, item in enumerate(iterable, 1):
if first != item[0]:
if first is not None:
print()
first = item[0]
print(''.join(item), end=' ')
print()
print('Unique pairs:\n')
show(combinations_with_replacement('abcd', r=2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2016-11-04
Step2: 1. Decision trees
Step3: Question Compute the mean and standard deviation of the area under the ROC curve of these 5 trees. Plot the ROC curves of these 5 trees.
Step4: Question What parameters of DecisionTreeClassifier can you play with to define trees differently than with the default parameters? Cross-validate these using a grid search, and plot the optimal decision tree on the previous plot. Did you manage to improve performance?
Step5: Question How does the performance of decision trees compare to the performance of classifiers we have used previously on this data? Does this match your expectations?
Step7: Question Use cross_validate_optimize (as defined in the previous lab) to optimize the number of decision trees to use in the bagging method. How many trees did you find to be an optimal choice?
Step8: Question Plot the ROC curve of the optimized cross-validated bagging tree classifier obtained with cross_validate_optimize, and compare it to the previous ROC curves (non-optimized bagging tree, decision trees).
Step9: Question Use cross_validate_optimize (as defined in the previous lab) to optimize the number of decision trees to use in the random forest. How many trees do you find to be an optimal choice? How does the optimal random forest compare to the optimal bagging trees? How do the training times of the random forest and the bagging trees compare?
Step10: Question How do your tree-based classifiers compare to the linear regression (regularized or not)? Plot ROC curves.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%pylab inline
# Load the data
# TODO
# Normalize the data
from sklearn import preprocessing
X = preprocessing.normalize(X)
# Set up a stratified 10-fold cross-validation
from sklearn import cross_validation
folds = cross_validation.StratifiedKFold(y, 10, shuffle=True)
def cross_validate(design_matrix, labels, classifier, cv_folds):
Perform a cross-validation and returns the predictions.
Parameters:
-----------
design_matrix: (n_samples, n_features) np.array
Design matrix for the experiment.
labels: (n_samples, ) np.array
Vector of labels.
classifier: sklearn classifier object
Classifier instance; must have the following methods:
- fit(X, y) to train the classifier on the data X, y
- predict_proba(X) to apply the trained classifier to the data X and return probability estimates
cv_folds: sklearn cross-validation object
Cross-validation iterator.
Return:
-------
pred: (n_samples, ) np.array
Vectors of predictions (same order as labels).
pred = np.zeros(labels.shape)
for tr, te in cv_folds:
# Restrict data to train/test folds
Xtr = design_matrix[tr, :]
ytr = labels[tr]
Xte = design_matrix[te, :]
#print Xtr.shape, ytr.shape, Xte.shape
# Fit classifier
classifier.fit(Xtr, ytr)
# Predict probabilities (of belonging to +1 class) on test data
yte_pred = classifier.predict_proba(Xte)
index_of_class_1 = (1-classifier.classes_[0])/2 # 0 if the first sample is positive, 1 otherwise
pred[te] = yte_pred[:, index_of_class_1]
return pred
from sklearn import tree
from sklearn import metrics
# Use: clf = tree.DecisionTreeClassifier()
ypred_dt = [] # will hold the 5 arrays of predictions (1 per tree)
for tree_index in range(5):
# TODO
fpr_dt = [] # will hold the 5 arrays of false positive rates (1 per tree)
tpr_dt = [] # will hold the 5 arrays of true positive rates (1 per tree)
auc_dt = [] # will hold the 5 areas under the ROC curve (1 per tree)
for tree_index in range(5):
# TODO
for tree_index in range(4):
plt.plot(fpr_dt[tree_index], tpr_dt[tree_index], '-', color='orange')
plt.plot(fpr_dt[-1], tpr_dt[-1], '-', color='orange',
label='DT (AUC = %0.2f (+/- %0.2f))' % (np.mean(auc_dt), np.std(auc_dt)))
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curves', fontsize=16)
plt.legend(loc="lower right")
from sklearn import grid_search
param_grid = # TODO
clf = grid_search.GridSearchCV(tree.DecisionTreeClassifier(), param_grid,
scoring='roc_auc')
ypred_dt_opt = cross_validate(X, y, clf, folds)
fpr_dt_opt, tpr_dt_opt, thresholds = metrics.roc_curve(y, ypred_dt_opt, pos_label=1)
auc_dt_opt = metrics.auc(fpr_dt_opt, tpr_dt_opt)
# Plot the 5 decision trees from earlier
for tree_index in range(4):
plt.plot(fpr_dt[tree_index], tpr_dt[tree_index], '-', color='blue')
plt.plot(fpr_dt[-1], tpr_dt[-1], '-', color='blue',
label='DT (AUC = %0.2f (+/- %0.2f))' % (np.mean(auc_dt), np.std(auc_dt)))
# Plot the optimized decision tree
plt.plot(fpr_dt_opt, tpr_dt_opt, color='orange', label='DT optimized (AUC=%0.2f)' % auc)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curves', fontsize=16)
plt.legend(loc="lower right")
from sklearn import ensemble
# By default, the base estimator is a decision tree with default parameters
# TODO: Use clf = ensemble.BaggingClassifier(n_estimators=5)
def cross_validate_optimize(design_matrix, labels, classifier, cv_folds):
Perform a cross-validation and returns the predictions.
Parameters:
-----------
design_matrix: (n_samples, n_features) np.array
Design matrix for the experiment.
labels: (n_samples, ) np.array
Vector of labels.
classifier: sklearn GridSearchCV object
GridSearchCV instance; must have the following methods/attributes:
- fit(X, y) to train the classifier on the data X, y
- predict_proba(X) to apply the trained classifier to the data X and return probability estimates
cv_folds: sklearn cross-validation object
- best_params_ the best parameter dictionary
Cross-validation iterator.
Return:
-------
pred: (n_samples, ) np.array
Vector of predictions (same order as labels).
pred = np.zeros(labels.shape)
for tr, te in cv_folds:
# Restrict data to train/test folds
Xtr = design_matrix[tr, :]
ytr = labels[tr]
Xte = design_matrix[te, :]
#print Xtr.shape, ytr.shape, Xte.shape
# Fit classifier
classifier.fit(Xtr, ytr)
# Print best parameter
print classifier.best_params_
# Predict probabilities (of belonging to +1 class) on test data
yte_pred = classifier.predict_proba(Xte)
index_of_class_1 = 1 - ytr[0] # 0 if the first sample is positive, 1 otherwise
pred[te] = yte_pred[:, index_of_class_1]
return pred
param_grid = {'n_estimators': [5, 15, 25, 50]}
# TODO
clf = ensemble.RandomForestClassifier(n_estimators=5)
# TODO
param_grid = {'n_estimators': [5, 15, 25, 50]}
# TODO
from sklearn import linear_model
param_grid = {'C':[1e-3, 1e-2, 1e-1, 1., 1e2, 1e3]}
clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l1'),
param_grid, scoring='roc_auc')
ypred_l1 = cross_validate_optimize(X, y, clf, folds)
fpr_l1, tpr_l1, thresholds_l1 = metrics.roc_curve(y, ypred_l1, pos_label=1)
clf = grid_search.GridSearchCV(linear_model.LogisticRegression(penalty='l2'),
param_grid, scoring='roc_auc')
ypred_l2 = cross_validate_optimize(X, y, clf, folds)
fpr_l2, tpr_l2, thresholds_l2 = metrics.roc_curve(y, ypred_l2, pos_label=1)
# TODO
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.title('ROC curves', fontsize=16)
plt.legend(loc="lower right")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Neural Network Embeddings
Step3: The following are descriptions of the remaining data columns in the play-by-play dataset. Note that the one-hot encoded columns do not follow the structure listed below, but for the sake of readability they are presented as if they were not one-hot encoded.
Step4: Modeling
Step5: Some Setup Code for TensorFlow
Step6: Calculating Costs for a Model
Step7: Grid Search
Step8: First Deep Learning Architecture
Step9: Grid Searching on the Model
Step10: Although the grid search returned the optimal step count and rate, it is meaningful to visualize the grid that was generated, to get an idea for how much better these particular hyperparameters are than the other possible combinations in the grid.
Step11: The above grid search shows that, while there is no obvious pattern with respect to the performance, it seems that the major diagonal generally has the lowest costs. Interestingly, the lower-left tile has a significantly higher score than all of the others. This is likely due to overlearning, as it comes from a section in the grid with the highest possible step count.
Step12: The costs and auc scores computed above are hard-coded below for later use, so that they don't need to be computed again.
Step14: Second Deep Learning Architecture
Step15: Grid Searching on the Model
Step16: Although the grid search, once again, gave us the optimal step count and rate, it is worthwhile to visualize the grid that was generated, to get an idea for how much better these particular hyperparameters are than the other possible combinations in the grid.
Step17: This heatmap shows a stronger pattern than the map for model 1. It shows the optimal to be centered around 1500 steps and a learning rate of 0.005, with somewhat constant cost increase as the two parameters grow greater or smaller than thoses values.
Step18: The costs and auc scores computed above are hard-coded below for later use, so that they don't need to be computed again.
Step19: Third Deep Learning Architecture
Step20: Grid Searching on the Model
Step21: Although the grid search returned the optimal step count and rate, it is meaningful to visualize the grid that was generated, to get an idea for how much better these particular hyperparameters are than the other possible combinations in the grid.
Step22: The grid search for model 3, interestingly, shows a significant trend toward lower costs in the lower-left part of the map (higher step count and lower learning rate). This stands to reason, and aligns much more with what is expected than the unruly heatmap that was generated by the model 1 grid search. This also aligns with what one might expect of a simple multi-layer perceptron model, which this network architecture closely resembles.
Step23: The costs and auc scores computed above are hard-coded below for later use, so that they don't need to be computed again.
Step24: Architecture Comparison
Step25: Cost Difference
Step26: The above 3 confidence intervals show that the first two architectures performed similarly, as the model 1 to model 2 difference confidence interval contains zero. However, the third architecture, which we created specifically because of how poorly the first two architectures performed, did significantly outperform both of the first two models, as zero does not appear in either of the confidence intervals including model 3. Therefore, it can be concluded that, with respect to cost, our "home-grown" model 3 does significantly outperform the other architectures.
Step27: The confidence intervals for area under ROC curve provide the same insight as the confidence intervals for the cost function. The first 2 models are statistically similar with 95% confidence, but the third model statistically outperforms both of the first 2, with 95% confidence, because zero does not fall in the ROC difference confidence interval for model 3 vs the other 2 models.
Step28: Next, we produce cost scores for the random forest classifier.
Step29: Finally, we produce AUC_ROC scores for the random forest classifier.
Step30: The cost and auc scores for the random forest are saved below so that they don't need to be re-computed.
|
<ASSISTANT_TASK:>
Python Code:
#For final version of report, remove warnings for aesthetics.
import warnings
warnings.filterwarnings('ignore')
#Libraries used for data analysis
import pandas as pd
import numpy as np
from sklearn import preprocessing
df = pd.read_csv('data/cleaned.csv') # read in the csv file
colsToInclude = [ 'Drive', 'qtr', 'down',
'TimeSecs', 'yrdline100','ydstogo','ydsnet',
'GoalToGo','posteam','DefensiveTeam',
'PosTeamScore','ScoreDiff', 'PlayType']
df = df[colsToInclude]
df = df[[p not in ["Sack", "No Play", "QB Kneel", "Spike"] for p in df.PlayType]]
df.info()
from sklearn.feature_extraction import DictVectorizer
#Simple function for 1 hot encoding
def encode_onehot(df, cols):
One-hot encoding is applied to columns specified in a pandas DataFrame.
Modified from: https://gist.github.com/kljensen/5452382
Details:
http://en.wikipedia.org/wiki/One-hot
http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html
@param df pandas DataFrame
@param cols a list of columns to encode
@return a DataFrame with one-hot encoding
vec = DictVectorizer()
vec_data = pd.DataFrame(vec.fit_transform(df[cols].to_dict(outtype='records')).toarray())
vec_data.columns = vec.get_feature_names()
vec_data.index = df.index
df = df.drop(cols, axis=1)
df = df.join(vec_data)
return df
df = encode_onehot(df, cols=['posteam', 'DefensiveTeam'])
from sklearn.model_selection import KFold
#Using a 10-fold sequential split.
#Note that this cv object is unused, but is here for reference
cv = KFold(n_splits=10)
y,levels = pd.factorize(df.PlayType.values)
X = df.drop('PlayType', 1).values.astype(np.float32)
num_classes = len(levels)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc,make_scorer
from scipy import interp
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
cost_mat = [[0 ,.1 , 1 , .15 , 0.15, .1 , 1 ],
[.1 , 0 , 1 , 0.15, 0.15, 0.1, 1 ],
[1 , 1 , 0 , 1 , 1 , 1 , 0.75],
[.25,0.25, 1 , 0 , 1 ,0.15, 1 ],
[0.4, 0.4, 1 , 1 , 0 , 1 , 1 ],
[0.4, 0.4, 1 , 0.1 , 1 , 0 , 1 ],
[1 , 1 , 0.25, 1 , 1 , 1 , 0 ]]
def cost(Y, yhat):
return np.sum(np.multiply(confusion_matrix(Y,yhat), cost_mat))
def auc_of_roc(Y,yhat, levels=['Pass', 'Run', 'Kickoff', 'Punt', 'Extra Point', 'Field Goal', 'Onside Kick']):
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
for c in levels:
tempY = [x==c for x in Y]
tempYhat = [x==c for x in yhat]
fpr, tpr, thresholds = roc_curve(tempY, tempYhat)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
mean_tpr /= len(levels)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
return mean_auc
#For use in the final deployment section
scorer = make_scorer(cost)
auc_roc_scorer = make_scorer(auc_of_roc)
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib import layers
#Suppress all non-error warnings
tf.logging.set_verbosity(tf.logging.ERROR)
def get_scores_for_model(model_fn, X, y, steps=1000, learning_rate=0.05, num_splits = 10):
auc = []
costs = []
for train_index, test_index in KFold(n_splits=num_splits).split(X, y):
classifier = learn.TensorFlowEstimator(model_fn=model_fn,
n_classes=7, batch_size=1000,
steps=steps, learning_rate=learning_rate)
classifier.fit(X[train_index], y[train_index])
yhat = classifier.predict(X[test_index])
costs.append(cost(y[test_index], yhat))
auc.append(auc_of_roc(y[test_index], yhat, levels=range(0,7)))
return costs, auc
def grid_search(model_fn, steps_list, learning_rate_list):
costs = []
for steps in steps_list:
step_costs = []
for rate in learning_rate_list:
step_costs.append(np.mean(get_scores_for_model(model_fn, X[0:5000, :], y[0:5000], steps, rate, 3)[0]))
print(step_costs)
print(costs)
costs.append(step_costs)
min_idx = np.argmin(costs)
return costs, steps_list[min_idx//len(costs[0])], learning_rate_list[min_idx%len(costs[0])]
import seaborn as sns
def grid_search_heatmap(costs, steps, rates):
ax = sns.heatmap(np.array(costs))
ax.set(xlabel='Learning Rate', ylabel='Step Count')
ax.set_xticklabels(rates[::-1])
ax.set_yticklabels(steps[::-1])
ax.set_title("Grid Search Heatmap")
def deep_model_1(X, y):
#Embeddings layer
teamembeddings = layers.stack(X[:,11:75], layers.fully_connected, [700 for _ in range(6)])
teamembeddings = tf.nn.tanh(teamembeddings)
#Non-embeddings features
otherfeatures = X[:,0:10]
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [700 for _ in range(6)])
tensors = tf.concat(1, [teamembeddings, otherfeatures])
tensors = tf.nn.tanh(tensors)
pred,loss = learn.models.logistic_regression(tensors, y)
return pred, loss
costs, optimal_steps, optimal_rate = grid_search(deep_model_1, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
print((optimal_steps, optimal_rate))
grid_search_heatmap(costs, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
costs_model_1, auc_roc_model_1 = get_scores_for_model(deep_model_1, X, y, optimal_steps, optimal_rate)
print(costs_model_1)
print(auc_roc_model_1)
costs_model_1 = [1843.45, 2005.0, 2017.0, 1971.05, 2069.25, 2021.75, 1942.8499999999999, 2048.25, 2022.0, 2042.0999999999999]
auc_roc_model_1 = [0.51052936522990444, 0.50011988848000888, 0.49999999999999994, 0.50082942390789931, 0.49999999999999994, 0.49999999999999994, 0.50244062722123584, 0.49999999999999994, 0.50046048738783466, 0.49987541305436189]
def deep_model_2(X, y):
#Embeddings layer
teamembeddings = layers.stack(X[:,11:75], layers.fully_connected, [200,1,3], activation_fn=tf.nn.relu)
teamembeddings = layers.stack(teamembeddings, layers.fully_connected, [50,2,3], activation_fn=tf.nn.relu)
#Non-embeddings features
otherfeatures = X[:,0:10]
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [50,1,3], activation_fn=tf.nn.relu)
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [12,2,3], activation_fn=tf.nn.relu)
#combine the team and play data
tensors = tf.concat(1, [teamembeddings, otherfeatures])
tensors = layers.stack(tensors, layers.fully_connected, [100], activation_fn=tf.nn.relu)
tensors = layers.stack(tensors, layers.fully_connected, [7], activation_fn=tf.nn.softmax)
# This section is doing all layers before combining team and play data
#Embeddings layer
teamembeddings = layers.stack(X[:,11:75], layers.fully_connected, [200,1,3], activation_fn=tf.nn.relu)
teamembeddings = layers.stack(teamembeddings, layers.fully_connected, [50,2,3], activation_fn=tf.nn.relu)
teamembeddings = layers.stack(teamembeddings, layers.fully_connected, [100], activation_fn=tf.nn.relu)
teamembeddings = layers.stack(teamembeddings, layers.fully_connected, [7], activation_fn=tf.nn.softmax)
#Non-embeddings features
otherfeatures = X[:,0:10]
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [50,1,3], activation_fn=tf.nn.relu)
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [12,2,3], activation_fn=tf.nn.relu)
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [100], activation_fn=tf.nn.relu)
otherfeatures = layers.stack(otherfeatures, layers.fully_connected, [7], activation_fn=tf.nn.softmax)
#combine the team and play data
tensors = tf.concat(1, [teamembeddings, otherfeatures])
pred, loss = learn.models.logistic_regression(tensors, y)
return pred, loss
costs, optimal_steps, optimal_rate = grid_search(deep_model_2, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
print((optimal_steps, optimal_rate))
grid_search_heatmap(costs, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
costs_model_2, auc_roc_model_2 = get_scores_for_model(deep_model_2, X, y, optimal_steps, optimal_rate)
print(costs_model_2)
print(auc_roc_model_2)
costs_model_2 = [2042.75, 2007.5, 2017.0, 1978.0, 2069.25, 2021.75, 1974.0, 2048.25, 2027.0, 2041.25]
auc_roc_model_2 = [ 0.5085592, 0.4820538, 0.48592543, 0.48381174, 0.47515301, 0.475777, 0.47903248, 0.48797259, 0.47638676, 0.4891856]
def deep_model_3(X, y):
#Embeddings layer
teamembeddings = layers.stack(X[:,11:75], layers.fully_connected, [20,4])
#teamembeddings = tf.nn.relu(teamembeddings)
#Non-embeddings features
otherfeatures = X[:,0:10]
#Concatenate the embeddings with the non-embeddings
tensors = tf.concat(1, [teamembeddings, otherfeatures])
#[500,200,100,500][1000,1000,1000,500,200,1000]
tensors = layers.stack(tensors, layers.fully_connected, [1000,1000,1000,500,200,1000])
#Relu activation function
tensors = tf.nn.relu(tensors)
pred, loss = learn.models.logistic_regression(tensors, y)
return pred, loss
costs, optimal_steps, optimal_rate = grid_search(deep_model_3, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
print((optimal_steps, optimal_rate))
grid_search_heatmap(costs, [250,500,1000,1500,2000], [.05, .01, .005, .001, .0005])
costs_model_3, auc_roc_model_3 = get_scores_for_model(deep_model_3, X, y, optimal_steps, optimal_rate)
print(costs_model_3)
print(auc_roc_model_3)
costs_model_3 = [ 1750.0 , 1722.85, 1709.0, 1710.75, 1716.25, 1742.0, 1650.0 , 1714.0, 1699.50, 1684.50]
auc_roc_model_3 = [0.62998537, 0.61758681, 0.61135905, 0.61209804, 0.6146912 , 0.62633358, 0.5845431 , 0.61362667, 0.60702243, 0.60023413]
def getDifference(cost1,cost2,z_val=2.26,size=10):
cost1 = np.asarray(cost1)
cost2 = np.asarray(cost2)
diff12 = cost1 - cost2
sigma12 = np.sqrt(np.sum(diff12*diff12) * 1/(size-1))
d12 = (np.mean(diff12) + 1/(np.sqrt(size)) * z_val * sigma12, np.mean(diff12) - 1/(np.sqrt(size)) * z_val * sigma12)
return d12
d_one_two = np.array(getDifference(costs_model_1, costs_model_2))
d_one_three = np.array(getDifference(costs_model_1, costs_model_3))
d_two_three = np.array(getDifference(costs_model_2, costs_model_3))
print("Average Model 1 vs Model 2 Difference:", d_one_two)
print("Average Model 1 vs Model 3 Difference:", d_one_three)
print("Average Model 2 vs Model 3 Difference:", d_two_three)
d_one_two = np.array(getDifference(auc_roc_model_1, auc_roc_model_2))
d_one_three = np.array(getDifference(auc_roc_model_1, auc_roc_model_3))
d_two_three = np.array(getDifference(auc_roc_model_2, auc_roc_model_3))
print("Average Model 1 vs Model 2 Difference:", d_one_two)
print("Average Model 1 vs Model 3 Difference:", d_one_three)
print("Average Model 2 vs Model 3 Difference:", d_two_three)
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
#Building the class weight map
PlayTypes = df.PlayType.value_counts().index.tolist()
Costs = [sum(x) for x in cost_mat]
y = df.PlayType.values
ClassWeights = dict(zip(PlayTypes, Costs))
#Pipeline for cost evaluation
clf = Pipeline([('sca',StandardScaler()),
('clf',RandomForestClassifier(class_weight=ClassWeights, n_estimators=250))])
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=scorer,
n_jobs=-1
)
RFCosts = per_fold_eval_criteria
#Pipeline for cost evaluation
clf = Pipeline([('sca',StandardScaler()),
('clf',RandomForestClassifier(class_weight=ClassWeights, n_estimators=250))])
per_fold_eval_criteria = cross_val_score(estimator=clf,
X=X,
y=y,
cv=cv,
scoring=auc_roc_scorer,
n_jobs=-1
)
RF_auc = per_fold_eval_criteria
RFCosts = [ 988.05, 1007.9 , 976.75, 956.75, 971.75, 949.55, 919. , 992.5 , 985.75, 956. ]
RF_auc = [ 0.89186154, 0.84936065, 0.85641263, 0.84052327, 0.84994278, 0.84799384, 0.86985361, 0.88280675, 0.87433496, 0.85303625]
auc_diff = getDifference(auc_roc_model_3, RF_auc)
cost_diff = getDifference(costs_model_3, RFCosts)
print("Deep Learning vs Random Forests (Cost):", cost_diff)
print("Deep Learning vs Random Forests (AUC_ROC):", auc_diff)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here, each component of the values tensor has one more sample point in the direction it is facing.
Step2: Staggered grids can also be created from other fields using field.at() or @ by passing an existing StaggeredGrid.
Step3: Grids do not support slicing along spatial dimensions because the result would be ambiguous with StaggeredGrids.
Step4: Slicing along batch dimensions has no special effect, this just slices the values.
|
<ASSISTANT_TASK:>
Python Code:
# !pip install --quiet phiflow
from phi.flow import *
grid = StaggeredGrid(0, extrapolation.BOUNDARY, x=10, y=10)
grid.values
domain = dict(x=10, y=10, bounds=Box(x=1, y=1), extrapolation=extrapolation.ZERO)
grid = StaggeredGrid((1, -1), **domain) # from constant vector
grid = StaggeredGrid(Noise(), **domain) # sample analytic field
grid = StaggeredGrid(grid, **domain) # resample existing field
grid = StaggeredGrid(lambda x: math.exp(-x), **domain) # function value(location)
grid = StaggeredGrid(Sphere([0, 0], radius=1), **domain) # no anti-aliasing
grid = StaggeredGrid(SoftGeometryMask(Sphere([0, 0], radius=1)), **domain) # with anti-aliasing
grid.vector['x'] # select component
grid.values.x[3:4] # spatial slice
grid.values.x[0] # spatial slice
grid.batch[0] # batch slice
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The easiest way to load image data is with datasets.ImageFolder from torchvision (documentation). In general you'll use ImageFolder like so
Step2: If you loaded the data correctly, you should see something like this (your image will be different)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import helper
data_dir = 'Cat_Dog_data/train'
# TODO: compose transforms here
transform = transforms.Compose([transforms.Resize(size=(255, 255)),
transforms.CenterCrop(size=[224, 224]),
transforms.ToTensor()])
# TODO: create the ImageFolder
dataset = datasets.ImageFolder(root=data_dir,
transform=transform)
# TODO: use the ImageFolder dataset to create the DataLoader
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=32, shuffle=True)
# Run this to test your data loader
images, labels = next(iter(dataloader))
helper.imshow(images[0], normalize=False)
# Shape=(batch_size, channels, width, height)
images.shape
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose(transforms=[transforms.RandomRotation(degrees=30),
transforms.RandomResizedCrop(size=(224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.Compose(transforms=[transforms.Resize(size=(255, 255)),
transforms.CenterCrop(size=(224, 224)),
transforms.ToTensor()])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train',
transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test',
transform=test_transforms)
trainloader = torch.utils.data.DataLoader(dataset=train_data,
batch_size=32)
testloader = torch.utils.data.DataLoader(dataset=test_data,
batch_size=32)
# change this to the trainloader or testloader
data_iter = iter(testloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
helper.imshow(images[ii], ax=ax, normalize=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Descarga de datos
Step2: 3. Estandarizacion de datos de Parรกmetros
Step3: Exportar Dataset
|
<ASSISTANT_TASK:>
Python Code:
descripciones = {
'P0609': 'Usuarios Electricos'
}
# Librerias utilizadas
import pandas as pd
import sys
import urllib
import os
import csv
import zipfile
# Configuracion del sistema
print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
url = r'http://datos.cfe.gob.mx/Datos/Usuariosyconsumodeelectricidadpormunicipio.csv'
archivo_local = r'D:\PCCS\00_RawData\01_CSV\CFE\UsuariosElec.csv'
if os.path.isfile(archivo_local):
print('Ya existe el archivo: {}'.format(archivo_local))
else:
print('Descargando {} ... ... ... ... ... '.format(archivo_local))
urllib.request.urlretrieve(url, archivo_local)
print('se descargรณ {}'.format(archivo_local))
dtypes = { # Los valores numericos del CSV estan guardados como " 000,000 " y requieren limpieza
'Cve Mun':'str',
'2010':'str',
'2011':'str',
'2012':'str',
'2013':'str',
'2014':'str',
'2015':'str',
'2016':'str',
'ene-17':'str',
'feb-17':'str',
'mar-17':'str',
'abr-17':'str',
'may-17':'str',
'jun-17':'str',
'jul-17':'str',
'ago-17':'str',
'sep-17':'str',
'oct-17':'str',
'nov-17':'str',
'dic-17':'str'}
# Lectura del Dataset
dataset = pd.read_csv(archivo_local, skiprows = 2, nrows = 82236, na_values = ' - ',
dtype=dtypes) # Lee el dataset
dataset['CVE_EDO'] = dataset['Cve Inegi'].apply(lambda x: '{0:0>2}'.format(x)) # CVE_EDO de 2 digitos
dataset['CVE_MUN'] = dataset['CVE_EDO'].map(str) + dataset['Cve Mun']
dataset.head()
# Quitar espacios en blanco y comas de columnas que deberian ser numericas
columnums = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', 'ene-17', 'feb-17', 'mar-17', 'abr-17', 'may-17', 'jun-17', 'jul-17', 'ago-17', 'sep-17', 'oct-17', 'nov-17', 'dic-17']
for columna in columnums:
dataset[columna] = dataset[columna].str.replace(' ','')
dataset[columna] = dataset[columna].str.replace(',','')
dataset.head()
# Convertir columnas a numericas
columnasanios = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', 'ene-17', 'feb-17',
'mar-17', 'abr-17', 'may-17', 'jun-17', 'jul-17', 'ago-17', 'sep-17', 'oct-17', 'nov-17', 'dic-17']
for columna in columnasanios:
dataset[columna] = pd.to_numeric(dataset[columna], errors='coerce', downcast = 'integer')
dataset.head()
# Quitar columnas que ya no se utilizarรกn
dropcols = ['Cve Edo', 'Cve Inegi', 'Cve Mun', 'Entidad Federativa', 'Municipio', 'Unnamed: 25', 'CVE_EDO']
dataset = dataset.drop(dropcols, axis = 1)
# Asignar CVE_EDO como indice
dataset = dataset.set_index('CVE_MUN')
dataset.head()
# Sumar las columnas de 2017
columnas2017 = ['ene-17', 'feb-17', 'mar-17', 'abr-17', 'may-17', 'jun-17', 'jul-17', 'ago-17', 'sep-17', 'oct-17', 'nov-17', 'dic-17']
dataset['2017'] = dataset[columnas2017].sum(axis = 1)
# Eliminar columnas de 2017
dataset = dataset.drop(columnas2017, axis = 1)
dataset.head()
len(dataset)
dataset.head(40)
dataset_total = dataset[dataset['Tarifa'] == 'TOTAL']
dataset_total.head()
len(dataset_total)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1.2 (10 pts)
Step2: The following code reads the data, subselects the $y$ and $X$ variables, and makes a training and test split. This is the Abalone dataset and we will be predicting the age. V9 is age, 1 represents old, 0 represents young.
Step3: Exercise 2.1 (10 pts) Perform logistic regression using Newton conjugate gradient. You should save the predicted probabilities, and save the roc and pr curves (using roc_curve and precision_recall_curve) computed using the test set.
Step4: Exercise 2.2 (10 pts) Do the same for linear discriminant analysis.
Step5: Exercise 2.3 (10 pts) Do the same for support vector machines.
Step6: Exercise 2.4 (10 pts) Plot and compare the ROC and PR curves for the above methods.
|
<ASSISTANT_TASK:>
Python Code:
xx = np.linspace(-3,5,100)
exp = np.exp(-xx)
qua = map(lambda x: (1-x)**2 if x<1 else 0,xx )
hin = map(lambda x: 1-x if x<1 else 0,xx )
sig = 1 - np.tanh(xx)
plt.figure(figsize=(10,7))
plt.plot(xx,sig,label='sigmoid loss')
plt.plot(xx,exp,label='exponential loss')
plt.plot(xx,qua,label='truncated quadratic loss')
plt.plot(xx,hin,label='hinge loss')
plt.xlabel('x')
plt.ylabel('Loss')
plt.title('loss of different function')
plt.legend()
plt.show()
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn.linear_model as skl_lm
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, roc_curve
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
# dataset path
data_dir = "."
sample_data = pd.read_csv(data_dir+"/hw3.csv", delimiter=',')
sample_data.V1=sample_data.V1.factorize()[0]
X = np.array(sample_data.iloc[:,range(0,8)])
y = np.array(sample_data.iloc[:,8])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, random_state=0)
logit = skl_lm.LogisticRegression(solver='newton-cg')
logit.fit(X_train,y_train)
prob = logit.predict_proba(X_test)
logfpr, logtpr, logthr = roc_curve(y_test, prob[:,1])
logpre, logrec, logthresh = precision_recall_curve(y_test, prob[:,1])
lda = LinearDiscriminantAnalysis()
lda.fit(X_train,y_train)
pred = lda.predict_proba(X_test)
ldafpr, ldatpr, ldathr = roc_curve(y_test, pred[:,1])
ldapre, ldarec, ldathresh = precision_recall_curve(y_test, pred[:,1])
svc = SVC(probability=True)
svc.fit(X_train,y_train)
pred = svc.predict_proba(X_test)
svcfpr, svctpr, svcthr = roc_curve(y_test, pred[:,1])
svcpre, svcrec, svcthresh = precision_recall_curve(y_test, pred[:,1])
plt.figure()
lw = 2
plt.plot(logfpr,logtpr,lw=lw, label='logistic regression')
plt.plot(ldafpr,ldatpr,lw=lw, label='linear discriminant analysis')
plt.plot(svcfpr,svctpr,lw=lw, label='support vector machines')
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for three methods')
plt.legend(loc="lower right")
plt.show()
plt.figure()
lw = 2
plt.plot(logrec,logpre,lw=lw, label='logistic regression')
plt.plot(ldarec,ldapre,lw=lw, label='linear discriminant analysis')
plt.plot(svcrec,svcpre,lw=lw, label='support vector machines')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision Recall curve for three methods')
plt.legend(loc="lower right")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Tutorial
Step13: Quick peek at your data
Step14: Create the Dataset
Step15: Create and run training pipeline
Step16: Run the training pipeline
Step17: Review model evaluation scores
Step18: Deploy the model
Step19: Send a online prediction request
Step20: Make the prediction
Step21: Undeploy the model
Step22: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
)
print(dag)
model = dag.run(
dataset=dataset,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
disable_early_stopping=False,
target_column=label_column,
)
# Get model resource ID
models = aip.Model.list(filter="display_name=bank_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
endpoint = model.deploy(machine_type="n1-standard-4")
INSTANCE = {
"Age": "58",
"Job": "managment",
"MaritalStatus": "married",
"Education": "teritary",
"Default": "no",
"Balance": "2143",
"Housing": "yes",
"Loan": "no",
"Contact": "unknown",
"Day": "5",
"Month": "may",
"Duration": "261",
"Campaign": "1",
"PDays": "-1",
"Previous": "0",
"POutcome": "unknown",
}
instances_list = [INSTANCE]
prediction = endpoint.predict(instances_list)
print(prediction)
endpoint.undeploy_all()
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating Wick's theorem contractions
Step2: Wick's theorem contractions
Step3: To apply Wick's theorem to this product, you need to create a WickTheorem object. You can compute the contraction with the contraction function.
Step4: You can get a LaTeX expression with the latex() function
Step5: Or render the expression directly in a notebook
Step6: Let's evaluate a more complex expression
Step7: Example
Step8: Next, we compute the operator $e^{-\hat{T}} \hat{H} e^{\hat{T}}$ up to the term quadratic in $\hat{T}$ and compute the fully contracted term
|
<ASSISTANT_TASK:>
Python Code:
import wicked as w
from IPython.display import display, Math, Latex
def latex(expr):
Function to render any object that has a member latex() function
display(Math(expr.latex()))
w.reset_space()
w.add_space("o", "fermion", "occupied", ['i','j','k','l','m'])
w.add_space("v", "fermion", "unoccupied", ['a','b','c','d','e','f'])
F = w.utils.gen_op('f',1,'ov','ov')
T1 = w.op('t',['v+ o'])
FT1 = F @ T1
print(FT1)
wt = w.WickTheorem()
expr = wt.contract(FT1,minrank=0,maxrank=4)
expr
print(expr.latex())
latex(expr)
L1 = w.op('lambda',['o+ v'])
opexpr = L1 @ w.commutator(F,T1)
expr = wt.contract(opexpr,0,0)
latex(expr)
E0 = w.op("E_0",[""])
F = w.utils.gen_op('f',1,'ov','ov')
V = w.utils.gen_op('v',2,'ov','ov')
H = E0 + F + V
T = w.op("t",["v+ o", "v+ v+ o o", "v+ v+ v+ o o o"])
Hbar = w.bch_series(H,T,2)
expr = wt.contract(Hbar,0,0)
latex(expr)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can also plot a combined set of labels (23 per hemisphere).
Step2: We can add another custom parcellation
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import mne
Brain = mne.viz.get_brain_class()
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
mne.datasets.fetch_hcp_mmp_parcellation(subjects_dir=subjects_dir,
verbose=True)
mne.datasets.fetch_aparc_sub_parcellation(subjects_dir=subjects_dir,
verbose=True)
labels = mne.read_labels_from_annot(
'fsaverage', 'HCPMMP1', 'lh', subjects_dir=subjects_dir)
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1')
aud_label = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
brain.add_label(aud_label, borders=False)
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('HCPMMP1_combined')
brain = Brain('fsaverage', 'lh', 'inflated', subjects_dir=subjects_dir,
cortex='low_contrast', background='white', size=(800, 600))
brain.add_annotation('aparc_sub')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step3: Details of the "Happy" dataset
Step4: You have now built a function to describe your model. To train and test this model, there are four steps in Keras
Step5: Exercise
Step6: Exercise
Step7: Note that if you run fit() again, the model will continue to train with the parameters it has already learnt instead of reinitializing them.
Step8: If your happyModel() function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets. To pass this assignment, you have to get at least 75% accuracy.
Step9: 5 - Other useful functions in Keras (Optional)
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
### START CODE HERE ###
### END CODE HERE ###
return model
### START CODE HERE ### (1 line)
### END CODE HERE ###
### START CODE HERE ### (1 line)
### END CODE HERE ###
### START CODE HERE ### (1 line)
### END CODE HERE ###
### START CODE HERE ### (1 line)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
### START CODE HERE ###
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Section 2
Step3: In the cell below, describe some of the conclusions that you've drawn from the data you have just explored!
|
<ASSISTANT_TASK:>
Python Code:
# put your code here, and add additional cells as necessary.
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
alldata = np.loadtxt('datafile_1.csv',comments='#',unpack=True,delimiter=',')
xval = alldata[0]
xerr = alldata[1]
yval = alldata[2]
yerr = alldata[3]
zval = alldata[4]
zerr = alldata[5]
plt.errorbar(xval,yval,xerr=xerr,yerr=yerr,fmt='ro',ecolor='b')
plt.errorbar(xval,zval,xerr=xerr,yerr=zerr,fmt='ro',ecolor='b')
plt.errorbar(yval,zval,xerr=yerr,yerr=zerr,fmt='ro',ecolor='b')
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xval, yval, zval,'bo')
ax.view_init(elev=30., azim=20)
# put your code here, and add additional cells as necessary.
# column 0 is years, column 13 is yearly average
global_temp = np.loadtxt('GLB.Ts.csv',skiprows=1,unpack=True,delimiter=',')
# plot the monthly data so we can get some sense of range.
for i in range(1,13):
plt.plot(global_temp[0],global_temp[i]/100.0,'r-')
# plot yearly data over this
plt.plot(global_temp[0],global_temp[13]/100.0,'b.')
# this is going to confuse people; encourage them to google it.
# http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.nanmin.html
print("min, max, mean: {:3f} {:3f} {:3f}".format(np.nanmin(global_temp[13])/100.0,
np.nanmax(global_temp[13])/100.0,
np.nanmean(global_temp[13])/100.0))
smoothed = np.zeros_like(global_temp[13])
for i in range(global_temp[13].size):
start = i - 10
end = i + 10
if start < 0:
start = 0
if end > global_temp[13].size:
end = global_temp[13].size
smoothed[i] = global_temp[13][start:end].sum() / global_temp[13][start:end].size
plt.plot(global_temp[0],smoothed/100.0,'g-',linewidth=3)
# calculate standard deviation around the smoothed value
print(np.nanstd(global_temp[13]-smoothed)/100.0)
global_temp_3myr = np.loadtxt('bintanja2008.txt',skiprows=110,unpack=True)
tbegin = 100
tend = 0
plt.subplot(4, 1, 1)
plt.plot(global_temp_3myr[0],global_temp_3myr[4],'k-')
plt.title('Data over 3 Myr')
plt.ylabel('Sfc Temperature [K]')
plt.xlabel('time (kyr)')
plt.xlim(tbegin,0)
plt.subplot(4, 1, 2)
plt.plot(global_temp_3myr[0],global_temp_3myr[3],'r-')
plt.ylabel('deep sea temperature [K]')
plt.xlabel('time (kyr)')
plt.xlim(tbegin,0)
plt.subplot(4, 1, 3)
plt.plot(global_temp_3myr[0],global_temp_3myr[7],'g-')
plt.ylabel('ice volume [m]')
plt.xlabel('time (kyr)')
plt.xlim(tbegin,0)
plt.subplot(4, 1, 4)
plt.plot(global_temp_3myr[0],-global_temp_3myr[8],'b-')
plt.ylabel('sea level [m]')
plt.xlabel('time (kyr)')
plt.xlim(tbegin,0)
plt.plot(global_temp_3myr[7],-global_temp_3myr[8],'b.')
plt.xlabel('ice thickness [m]')
plt.ylabel('sea level [m]')
plt.plot(global_temp_3myr[4],-global_temp_3myr[8],'r.')
plt.xlabel('temperature [K]')
plt.ylabel('sea level [m]')
from IPython.display import HTML
HTML(
<iframe
src="https://goo.gl/forms/Jg6Mxb0ZTvwiSe4R2?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up a color palette reputed to be color blind friendly. From
Step2: Read in the data. I am using a modified version of the UCI Auto MPG data set. Missing horsepower values have been looked up and a column to mark diesel vehicles added.
Step3: Lets check the effect of the number of cylinders a vehicle has vs. the fuel economy (mpg) by using a box plot.
Step4: What is with 3 cylinder vehicles, why do they break the pattern of decreasing fuel economy with more cylinders?
Step5: 3 cylinder cars are all Mazdas with rotary engines. These actually have 3 chambers per rotor and 2 rotors per engine - so 3 x 2 = 6. As plotted by the box plot they do indeed appear to share fuel economy characteristics with 6 cylinder engines. Possibly in a machine learning model it might be best to replace these by 6 cylinders, especially since there are so few of them.
Step6: It appears that the relationship is not really linear. So try a second order polynomial fit.
Step7: The second order polynomial does appear to fit better than the first order (straight line). Hence, a machine learning model should either try to capture the higher order effect with squared terms (and possibly interactions) or use a machine learning method that copes well with non-linearity such as Random Forests or other similar tree-based algorithms.
Step8: We can observe that more cylinders generally lead to increasing weight and lower mpg. Multi-collinearity could be a problem in this data set, here the relationship between weight and cylinders.
Step9: Mean mpg by the region of origin of a vehicle. The origin code is translated to a more human readable format. Origin is either related to fuel economy or lighter vehicles come from Japan and Europe than from the US in this time period (1970's to 1980's).
Step10: Box plot for fuel economy by vehicle origin.
Step11: A scatter plot for mpg vs. weight with 2nd order regression fit curves by vehicle origin. Shows that vehicle origin may have some effect on fuel economy even when vehicle weight is considered. It is a complex relationship since lighter Japanese cars have higher fuel economy and the heavier amongst the European cars have higher fuel economy than their equivalent American cars.
Step12: In the plot above the orange line shows weight vs. mpg for cars from the US. The regression fit curve covers the weight range for US cars. The green line is similar for European cars and the maroon line for Japanese cars with the curves covering the weight range for their respective origin codes.
Step13: Check for normal distributions of the data - the test fails - note the p-values are far below the normal level of significance of 0.05. All three cylinder types fail the normality test.
Step14: Histograms show skewed distributions.
Step15: Even with non-normal data Levene's test can be used to check for equal variances. This test shows low p-values for the 4 cylinder vehicles against 6 and 8 cylinder vehicles, so we cannot assume equal variances going forward.
Step16: The Kruskal ANOVA test can be used when conditions of normality and equal variances are not satisfied. The p-value is near zero, so cylinders seem have an effect on mpg, though we may need to consider multi-collinearity with other variables such as weight and displacement.
Step17: Mood's Median Test, another way of checking that the number of cylinders may be predictive of mpg. Or put another way, that automobiles with differing numbers of cylinders have significantly different mpg. This is a comparison for 4, 6 and 8 cylinder cars.
Step18: We can use the stats models multiple tests function to correct the p-values for the three test pairs so that we can see which pairs are significant. The adjustment for multiple tests gives a better result than simple pairwise comparison since it adjusts for the loss of power in doing multiple tests. Here the p-values are all still zero so each combination of engine cylinders has a significantly different median mpg value.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.sandbox.stats.multicomp as mc
colors = ('#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3', '#999999', '#e41a1c') # , '#dede00')
df = pd.read_csv('uci_auto_mpg_mod.csv')
print df.head()
mpg3cyl = df['mpg'][df['cylinders'] == 3]
mpg4cyl = df['mpg'][df['cylinders'] == 4]
mpg5cyl = df['mpg'][df['cylinders'] == 5]
mpg6cyl = df['mpg'][df['cylinders'] == 6]
mpg8cyl = df['mpg'][df['cylinders'] == 8]
cyl = [mpg3cyl, mpg4cyl, mpg5cyl, mpg6cyl, mpg8cyl]
bplot = plt.boxplot(cyl,
notch=False, # notch shape
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=['3 Cyl', '4 Cyl', '5 Cyl', '6 Cyl', '8 Cyl']) # will be used to label x-ticks
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
patch._alpha = 0.5
for line in bplot['medians']:
line._color = 'k'
plt.xlabel("Number of Engine Cylinders")
plt.ylabel("MPG")
plt.title("Engine Cylinders vs. MPG")
plt.show()
df3cyl = df.loc[df['cylinders'] == 3]
print ' 3 cylinder cars'
print df3cyl['car_name']
# fit the line
z = np.polyfit(df['weight'], df['mpg'], 1)
y_poly = [z[0] * x + z[1] for x in range(1500, 5600, 100)]
x_poly = [x for x in range(1500, 5600, 100)]
# plot
plt.plot(df['weight'], df['mpg'], ".", color=colors[0])
plt.plot(x_poly, y_poly, "-", color=colors[1])
plt.xlim(1500, 5500)
plt.ylim(0, 50)
plt.xlabel("Vehicle Weight")
plt.ylabel("MPG")
plt.title("Weight vs. MPG")
plt.show()
plt.close()
# second order fit
z = np.polyfit(df['weight'], df['mpg'], 2)
y_poly2 = [z[0] * x ** 2 + z[1] * x + z[2] for x in range(1500, 5600, 100)]
# plot it
plt.plot(df['weight'], df['mpg'], ".", color=colors[0])
plt.plot(x_poly, y_poly2, "-", color=colors[1])
plt.xlim(1500, 5500)
plt.ylim(0, 50)
plt.xlabel("Vehicle Weight")
plt.ylabel("MPG")
plt.title("Weight vs. MPG")
plt.show()
plt.close()
markers = ('s', 'D')
cyls = pd.unique(df.cylinders.values)
cyls = np.sort(cyls)
for i, cyl in enumerate(cyls):
if cyl ==3 or cyl == 5:
s = 16
m = markers[1]
else:
s = 4
m = markers[0]
plt.scatter(df['weight'].loc[df['cylinders'] == cyl],
df['mpg'].loc[df['cylinders'] == cyl],
s=s, c=colors[i-1], marker=m, label='%d Cylinders' % cyl)
plt.plot(x_poly, y_poly, "-", color=colors[5])
plt.plot(x_poly, y_poly2, "-", color=colors[6])
plt.xlim(1500, 5500)
plt.ylim(0, 50)
plt.xlabel("Vehicle Weight")
plt.ylabel("MPG")
plt.title("Weight vs. MPG")
plt.legend()
plt.show()
plt.close()
z = np.polyfit(df['displacement'], df['horsepower'], 1)
y_poly = [z[0] * x + z[1] for x in range(0, 600, 50)]
x_poly = [x for x in range(0, 600, 50)]
plt.plot(df['displacement'], df['horsepower'], '.', color=colors[0])
plt.plot(x_poly, y_poly, '-', color=colors[1])
plt.xlim(0, 500)
plt.ylim(0, 300)
plt.xlabel("Displacement")
plt.ylabel("Horsepower")
plt.title("Displacement vs. Horsepower")
plt.show()
plt.close()
df['origin_name'] = df['origin'] - 1
df['origin_name'] = df['origin_name'].astype('category')
df['origin_name'] = pd.Series(pd.Categorical.from_codes(df['origin_name'], categories=['US', 'EUR', 'JPN']))
mean_mpg = df.groupby('origin_name', as_index=False)['mpg'].mean()
print 'Mean value of mpg, by vehicle origin:'
print mean_mpg.to_string(index=False)
mpg_usa = df['mpg'][df['origin_name'] == 'US']
mpg_eur = df['mpg'][df['origin_name'] == 'EUR']
mpg_jpn = df['mpg'][df['origin_name'] == 'JPN']
origin_mpg = (mpg_usa, mpg_eur, mpg_jpn)
bplot = plt.boxplot(origin_mpg,
notch=False, # notch shape
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=['USA', 'EUR', 'JPN']) # will be used to label x-ticks
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
patch._alpha = 0.5
for line in bplot['medians']:
line._color = 'k'
plt.xlabel("Vehicle Origin")
plt.ylabel("MPG")
plt.title("Vehicle Origin vs. MPG")
plt.show()
plt.close()
df_usa = df.loc[df['origin_name'] == 'US']
z = np.polyfit(df_usa['weight'], df_usa['mpg'], 2)
x_poly_usa = [x for x in range(np.min(df_usa['weight']), np.max(df_usa['weight'] + 100), 100)]
y_poly_usa = [z[0] * x ** 2 + z[1] * x + z[2] for x in range(np.min(df_usa['weight']),
np.max(df_usa['weight'] + 100), 100)]
df_eur = df.loc[df['origin_name'] == 'EUR']
z = np.polyfit(df_eur['weight'], df_eur['mpg'], 2)
x_poly_eur = [x for x in range(np.min(df_eur['weight']), np.max(df_eur['weight'] + 100), 100)]
y_poly_eur = [z[0] * x ** 2 + z[1] * x + z[2] for x in range(np.min(df_eur['weight']),
np.max(df_eur['weight'] + 100), 100)]
df_jpn = df.loc[df['origin_name'] == 'JPN']
z = np.polyfit(df_jpn['weight'], df_jpn['mpg'], 2)
x_poly_jpn = [x for x in range(np.min(df_jpn['weight']), np.max(df_jpn['weight'] + 100), 100)]
y_poly_jpn = [z[0] * x ** 2 + z[1] * x + z[2] for x in range(np.min(df_jpn['weight']),
np.max(df_jpn['weight'] + 100), 100)]
plt.plot(df['weight'], df['mpg'], ".", color=colors[0], alpha=0.33)
plt.plot(x_poly_usa, y_poly_usa, "-", color=colors[1])
plt.plot(x_poly_eur, y_poly_eur, "-", color=colors[2])
plt.plot(x_poly_jpn, y_poly_jpn, "-", color=colors[4])
plt.xlim(1500, 5500)
plt.ylim(0, 50)
plt.xlabel("Vehicle Weight")
plt.ylabel("MPG")
plt.title("Weight vs. MPG, Fitted by Vehicle Origin")
plt.show()
plt.close()
df468 = df.loc[df.cylinders != 3]
df468 = df468.loc[df468.cylinders != 5]
df468 = df468.loc[df.diesel != 1]
s, p = stats.normaltest(df468['mpg'].loc[df468['cylinders'] == 4])
print 'Normality test for mpg with 4 cylinders, p-value: %.4f' % p
s, p = stats.normaltest(df468['mpg'].loc[df468['cylinders'] == 6])
print 'Normality test for mpg with 6 cylinders, p-value: %.4f' % p
s, p = stats.normaltest(df468['mpg'].loc[df468['cylinders'] == 8])
print 'Normality test for mpg with 8 cylinders, p-value: %.4f' % p
plt.hist(df468['mpg'].loc[df468['cylinders'] == 4], bins=32)
plt.show()
plt.close()
plt.hist(df468['mpg'].loc[df468['cylinders'] == 6], bins=32)
plt.show()
plt.close()
plt.hist(df468['mpg'].loc[df468['cylinders'] == 8], bins=32)
plt.show()
plt.close()
W, p = stats.levene(df468['mpg'].loc[df468['cylinders'] == 4], df468['mpg'].loc[df468['cylinders'] == 6])
print 'Levene test for equal variances, 4 vs 6 cylinder vehicles, p-value: %.4f' % p
W, p = stats.levene(df468['mpg'].loc[df468['cylinders'] == 4], df468['mpg'].loc[df468['cylinders'] == 8])
print 'Levene test for equal variances, 4 vs 8 cylinder vehicles, p-value: %.4f' % p
W, p = stats.levene(df468['mpg'].loc[df468['cylinders'] == 6], df468['mpg'].loc[df468['cylinders'] == 8])
print 'Levene test for equal variances, 6 vs 8 cylinder vehicles, p-value: %.4f' % p
grps = pd.unique(df468.cylinders.values)
d_data = {grp: df468['mpg'][df468.cylinders == grp] for grp in grps}
kruskal = stats.kruskal(d_data[4], d_data[6], d_data[8])
print 'kruskal'
print kruskal
stat, p, med, tbl = stats.median_test(d_data[4], d_data[6], d_data[8])
print "Mood's median p-value: %.4f" % p
print "Mood's median value: %.4f (median mpg)" % med
stat, p46, med, tbl = stats.median_test(d_data[4], d_data[6])
stat, p48, med, tbl = stats.median_test(d_data[4], d_data[8])
stat, p68, med, tbl = stats.median_test(d_data[6], d_data[8])
rej, pvc, aS, aB = mc.multipletests((p46, p48, p68))
print pvc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Functions and CEO Incomes
Step2: Question 1. When we first loaded this dataset, we tried to compute the average of the CEOs' pay like this
Step3: Question 2. Extract the first value in the "Total Pay" column. It's Mark Hurd's pay in 2015, in millions of dollars. Call it mark_hurd_pay_string.
Step4: Question 3. Convert mark_hurd_pay_string to a number of dollars. The string method strip will be useful for removing the dollar sign; it removes a specified character from the start or end of a string. For example, the value of "100%".strip("%") is the string "100". You'll also need the function float, which converts a string that looks like a number to an actual number. Last, remember that the answer should be in dollars, not millions of dollars.
Step6: To compute the average pay, we need to do this for every CEO. But that looks like it would involve copying this code 102 times.
Step7: Running that cell doesn't convert any particular pay string.
Step8: What have we gained? Well, without the function, we'd have to copy that 10**6 * float(pay_string.strip("$")) stuff each time we wanted to convert a pay string. Now we just call a function whose name says exactly what it's doing.
Step12: Like the built-in functions, you can use named values as arguments to your function.
Step13: Here's something important about functions
Step14: As we've seen with the built-in functions, functions can also take strings (or arrays, or tables) as arguments, and they can return those things, too.
Step16: Calls on calls on calls
Step17: Functions can also encapsulate code that does things rather than just computing values. For example, if you call print inside a function, and then call that function, something will get printed.
Step18: After writing this, you realize you also wanted to print out the 2nd and 3rd-highest years. Instead of copying your code, you decide to put it in a function. Since the rank varies, you make that an argument to your function.
Step19: 3. applying functions
Step20: The old name for max is still around
Step21: Try just writing max or our_name_for_max (or the name of any other function) in a cell, and run that cell. Python will print out a (very brief) description of the function.
Step22: Why is this useful? Since functions are just values, it's possible to pass them as arguments to other functions. Here's a simple but not-so-practical example
Step23: Question 1. Make an array containing any 3 other functions you've seen. Call it some_functions.
Step24: Working with functions as values can lead to some funny-looking code. For example, see if you can figure out why this works
Step25: Here's a simpler example that's actually useful
Step26: Here's an illustration of what that did
Step27: Now that we have the pay in numbers, we can compute things about them.
Step28: Question 4. Companies pay executives in a variety of ways
Step29: Check out the "% Change" column in compensation. It shows the percentage increase in the CEO's pay from the previous year. For CEOs with no previous year on record, it instead says "(No previous year)". The values in this column are strings, not numbers, so like the "Total Pay" column, it's not usable without a bit of extra work.
Step30: Question 6. What was the average pay of these CEOs in 2014? Does it make sense to compare this number to the number you computed in question 3?
Step31: Question 7. A skeptical student asks
Step32: Question 2. Looking at the histogram, how many CEOs made more than \$30 million? (Answer the question by filling in your answer manually. You'll have to do a bit of arithmetic; feel free to use Python as a calculator.)
Step33: Question 3. Answer the same question with code. Hint
Step34: Question 4. Do most CEOs make around the same amount, or are there some who make a lot more than the rest? Discuss with someone near you.
Step35: The big difference between the code above and all the other code we have run thus far is that the code above doesn't always return the same value. It can return either treatment or control, and we don't know ahead of time which one it will pick. We can repeat the process by providing a second argument, the number of times to repeat the process. In the choice function we just used, we can add an optional second argument that tells the function how many times to make a random selection. Try it below
Step36: If we wanted to determine whether the random choice made by the function random is really fair, we could make a random selection a bunch of times and then count how often each selection shows up. In the next few code blocks, write some code that calls the choice function on the two_groups array one thousand times. Then, print out the percentage of occurrences for each of treatment and control. A useful function called Counter will be helpful; look at the code comments to see how it works!
Step37: A fundamental question about random events is whether or not they occur. For example
Step38: The value True indicates that the comparison is valid; Python has confirmed this simple fact about the relationship between 3 and 1+1. The full set of common comparison operators are listed below.
Step39: An expression can contain multiple comparisons, and they all must hold in order for the whole expression to be True. For example, we can express that 1+1 is between 1 and 3 using the following expression.
Step40: The average of two numbers is always between the smaller number and the larger number. We express this relationship for the numbers x and y below. Try different values of x and y to confirm this relationship.
Step41: 7 Comparing Strings
Step42: Let's return to random selection. Recall the array two_groups which consists of just two elements, treatment and control. To see whether a randomly assigned individual went to the treatment group, you can use a comparison
Step43: As before, the random choice will not always be the same, so the result of the comparison won't always be the same either. It will depend on whether treatment or control was chosen. With any cell that involves random selection, it is a good idea to run the cell several times to get a sense of the variability in the result.
Step44: This function returns the correct sign if the input is a positive number. But if the input is not a positive number, then the if expression evaluates to a False value, and so the return statement is skipped and the function call has no value. See what happens when you run the next block.
Step45: So let us refine our function to return Negative if the input is a negative number. We can do this by adding an elif clause, where elif is Python's shorthand for the phrase "else, if".
Step46: Now sign returns the correct answer when the input is -3
Step47: What if the input is 0? To deal with this case, we can add another elif clause
Step48: Run the previous code block for different inputs to our sign() function to make sure it does what we want it to.
Step50: 9. The General Form
Step51: 11. Iteration
Step52: We might want to re-run code with slightly different input or other slightly different behavior. We could copy-paste the code multiple times, but that's tedious and prone to typos, and if we wanted to do it a thousand times or a million times, forget it.
Step53: It is instructive to imagine code that exactly replicates a for statement without the for statement. (This is called unrolling the loop.) A for statement simple replicates the code inside it, but before each iteration, it assigns a new value from the given sequence to the name we chose. For example, here is an unrolled version of the loop above
Step54: Notice that the name i is arbitrary, just like any name we assign with =.
Step55: In this case, we simply perform exactly the same (random) action several times, so the code inside our for statement does not actually refer to i.
Step56: This keeps the array pets unchanged
Step57: But often while using for loops it will be convenient to mutate an array โ that is, change it โ when augmenting it. This is done by assigning the augmented array to the same name as the original.
Step58: Example
Step59: Let us rewrite the cell with the for statement unrolled
Step60: By capturing the results in an array we have given ourselves the ability to use array methods to do computations. For example, we can use np.count_nonzero to count the number of heads in the five tosses.
Step61: Iteration is a powerful technique. For example, by running exactly the same code for 1000 tosses instead of 5, we can count the number of heads in 1000 tosses.
Step62: Example
Step63: Now let's study 100 tosses. We will start by creating an empty array called heads. Then, in each of the 10,000 repetitions, we will toss a coin 100 times, count the number of heads, and append it to heads.
Step64: Let us collect the results in a table and draw a histogram.
Step65: Here is a histogram of the data, with bins of width 1 centered at each value of the number of heads.
|
<ASSISTANT_TASK:>
Python Code:
# Run this cell to set up the notebook, but please don't change it.
# These lines import the Numpy and Datascience modules.
import numpy as np
from datascience import *
# These lines do some fancy plotting magic.
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import warnings
warnings.simplefilter('ignore', FutureWarning)
# These lines load the tests.
from client.api.assignment import load_assignment
tests = load_assignment('lab04.ok')
raw_compensation = Table.read_table('raw_compensation.csv')
raw_compensation
...
mark_hurd_pay_string = ...
mark_hurd_pay_string
_ = tests.grade('q1_2')
mark_hurd_pay = ...
mark_hurd_pay
_ = tests.grade('q1_3')
def convert_pay_string_to_number(pay_string):
Converts a pay string like '$100 ' (in millions) to a number of dollars.
return float(pay_string.strip("$"))
_ = tests.grade('q1_4')
convert_pay_string_to_number(mark_hurd_pay_string)
# We can also compute Safra Catz's pay in the same way:
convert_pay_string_to_number(raw_compensation.where("Name", are.equal_to("Safra A. Catz*")).column("Total Pay").item(0))
...
...
...
...
twenty_percent = ...
twenty_percent
_ = tests.grade('q2_1')
a_proportion = 2**(.5) / 2
a_percentage = ...
a_percentage
_ = tests.grade('q2_2')
# You should see an error when you run this. (If you don't, you might
# have defined factor somewhere above.)
factor
def disemvowel(a_string):
...
...
# An example call to your function. (It's often helpful to run
# an example call from time to time while you're writing a function,
# to see how it currently works.)
disemvowel("Can you read this without vowels?")
_ = tests.grade('q2_3')
def num_non_vowels(a_string):
The number of characters in a string, minus the vowels.
...
_ = tests.grade('q2_4')
movies_by_year = Table.read_table("movies_by_year.csv")
rank = 5
fifth_from_top_movie_year = movies_by_year.sort("Total Gross", descending=True).column("Year").item(rank-1)
print("Year number", rank, "for total gross movie sales was:", fifth_from_top_movie_year)
def print_kth_top_movie_year(k):
# Our solution used 2 lines.
...
...
# Example calls to your function:
print_kth_top_movie_year(2)
print_kth_top_movie_year(3)
_ = tests.grade('q2_5')
our_name_for_max = max
our_name_for_max(2, 6)
max(2, 6)
max
make_array(max, np.average, are.equal_to)
some_functions = ...
some_functions
_ = tests.grade('q3_1')
make_array(max, np.average, are.equal_to).item(0)(4, -2, 7)
raw_compensation.apply(convert_pay_string_to_number, "Total Pay")
compensation = raw_compensation.with_column(
"Total Pay ($)",
...
compensation
_ = tests.grade('q3_2')
average_total_pay = ...
average_total_pay
_ = tests.grade('q3_3')
cash_proportion = ...
cash_proportion
_ = tests.grade('q3_4')
# For reference, our solution involved more than just this one line of code
...
with_previous_compensation = ...
with_previous_compensation
_ = tests.grade('q3_5')
average_pay_2014 = ...
average_pay_2014
_ = tests.grade('q3_6')
...
num_ceos_more_than_30_million = ...
num_ceos_more_than_30_million_2 = ...
num_ceos_more_than_30_million_2
_ = tests.grade('q4_3')
two_groups = make_array('treatment', 'control')
np.random.choice(two_groups)
np.random.choice(two_groups, 10)
# replace ... with code that will run the 'choice' function 1000 times;
# the resulting array of choices will then have the name 'exp_results'
exp_results = ...
from collections import Counter
Counter(exp_results)
# the output from Counter tells you how many times 'treatment' and 'control' appear in the array
# produced by 'choice'; run this cell to see the output
# use the info provided by 'Counter' to print the percentage of times 'treatment' and 'control'
# were selected
print(...) # print percentage for 'treatment' here
print(...) # print percentage for 'control' here
3 > 1 + 1
5 = 10/2
5 == 10/2
1 < 1 + 1 < 3
x = 12
y = 5
min(x, y) <= (x+y)/2 <= max(x, y)
'Dog' > 'Catastrophe' > 'Cat'
np.random.choice(two_groups) == 'treatment'
def sign(x):
if x > 0:
return 'Positive'
sign(3)
sign(-3)
def sign(x):
if x > 0:
return 'Positive'
elif x < 0:
return 'Negative'
sign(-3)
def sign(x):
if x > 0:
return 'Positive'
elif x < 0:
return 'Negative'
elif x == 0:
return 'Neither positive nor negative'
sign(0)
def sign(x):
if x > 0:
return 'Positive'
elif x < 0:
return 'Negative'
else:
return 'Neither positive nor negative'
sign(0)
def draw_card():
Print out a random suit and numeric value representing a card from a standard 52-card deck.
# pick a random number to determine the suit
suit_num = np.random.uniform(0,1) # this function returns a random decimal number
# between 0 and 1
### TODO: write an 'if' statement that prints out 'heart' if 0 < suit_num < 0.25,
### 'spade' if 0.25 < suit_num < 0.5,
### 'club' if 0.5 < suit_num < 0.75,
### 'diamond' if 0.75 < suit_num < 1
# pick a random number to determine the suit
val_num = np.random.uniform(0,13)
### TODO: write an if statement so that if 2 < val_num <= 12,
### you print out the floor of val_num
### (you can use the floor() function)
### TODO: write an 'if' statement that prints out the value of the card for the
### non-numeric possibilities'A' for ace, 'J' for jack, 'Q' for 'queen', 'K'
### for king;
return
# test your function by running this block; do it multiple times and see what happens!
draw_card()
np.random.choice(make_array('Heads', 'Tails'))
for i in np.arange(3):
print(i)
i = np.arange(3).item(0)
print(i)
i = np.arange(3).item(1)
print(i)
i = np.arange(3).item(2)
print(i)
coin = make_array('Heads', 'Tails')
for i in np.arange(5):
print(np.random.choice(make_array('Heads', 'Tails')))
pets = make_array('Cat', 'Dog')
np.append(pets, 'Another Pet')
pets
pets = np.append(pets, 'Another Pet')
pets
coin = make_array('Heads', 'Tails')
tosses = make_array()
for i in np.arange(5):
tosses = np.append(tosses, np.random.choice(coin))
tosses
coin = make_array('Heads', 'Tails')
tosses = make_array()
i = np.arange(5).item(0)
tosses = np.append(tosses, np.random.choice(coin))
i = np.arange(5).item(1)
tosses = np.append(tosses, np.random.choice(coin))
i = np.arange(5).item(2)
tosses = np.append(tosses, np.random.choice(coin))
i = np.arange(5).item(3)
tosses = np.append(tosses, np.random.choice(coin))
i = np.arange(5).item(4)
tosses = np.append(tosses, np.random.choice(coin))
tosses
np.count_nonzero(tosses == 'Heads')
tosses = make_array()
for i in np.arange(1000):
tosses = np.append(tosses, np.random.choice(coin))
np.count_nonzero(tosses == 'Heads')
np.random.choice(coin, 10)
N = 10000
heads = make_array()
for i in np.arange(N):
tosses = np.random.choice(coin, 100)
heads = np.append(heads, np.count_nonzero(tosses == 'Heads'))
heads
results = Table().with_columns(
'Repetition', np.arange(1, N+1),
'Number of Heads', heads
)
results
results.select('Number of Heads').hist(bins=np.arange(30.5, 69.6, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (ไบ)่ณๆ้ไป็ดน
Step2: | ้กฏ็คบ | ่ชชๆ |
Step3:
|
<ASSISTANT_TASK:>
Python Code:
#้่กๆฏๅจipython notebook็ไป้ข่ฃๅฐ็จ๏ผๅฆๆๅจๅ
ถไปไป้ขๅๅฏไปฅๆฟๆ
%matplotlib inline
from sklearn import datasets
import matplotlib.pyplot as plt
#่ผๅ
ฅๆธๅญ่ณๆ้
digits = datasets.load_digits()
#็ซๅบ็ฌฌไธๅๅ็
plt.figure(1, figsize=(3, 3))
plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
for key,value in digits.items() :
try:
print (key,value.shape)
except:
print (key)
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
#ๆฅ่ๆๅๅ่ฉฆๅฐ้ๅๆฉๅจๅญธ็ฟ่ณๆไนๆ่ฟฐๆช้กฏ็คบๅบไพ
print(digits['DESCR'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initial set-up
Step2: Plot steady-state and time constant functions of original model
Step3: Activation gate ($r$) calibration
Step4: Set up prior ranges for each parameter in the model.
Step5: Run ABC calibration
Step6: Database results analysis
|
<ASSISTANT_TASK:>
Python Code:
import os, tempfile
import logging
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from ionchannelABC import theoretical_population_size
from ionchannelABC import IonChannelDistance, EfficientMultivariateNormalTransition, IonChannelAcceptor
from ionchannelABC.experiment import setup
from ionchannelABC.visualization import plot_sim_results, plot_kde_matrix_custom
import myokit
from pyabc import Distribution, RV, History, ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
from pyabc.populationstrategy import ConstantPopulationSize
from experiments.isus_wang import (wang_act_and_kin)
from experiments.isus_courtemanche import (courtemanche_deact)
modelfile = 'models/nygren_isus.mmt'
from ionchannelABC.visualization import plot_variables
sns.set_context('talk')
V = np.arange(-100, 40, 0.01)
nyg_par_map = {'ri': 'isus.r_inf',
'si': 'isus.s_inf',
'rt': 'isus.tau_r',
'st': 'isus.tau_s'}
f, ax = plot_variables(V, nyg_par_map, modelfile, figshape=(2,2))
observations, model, summary_statistics = setup(modelfile,
wang_act_and_kin,
courtemanche_deact)
assert len(observations)==len(summary_statistics(model({})))
g = plot_sim_results(modelfile,
wang_act_and_kin,
courtemanche_deact)
limits = {'isus.p1': (-100, 100),
'isus.p2': (1e-7, 50),
'log_isus.p3': (-5, 0),
'isus.p4': (-100, 100),
'isus.p5': (1e-7, 50),
'log_isus.p6': (-6, -1)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
db_path = ("sqlite:///" + os.path.join(tempfile.gettempdir(), "nygren_isus_rgate_unified.db"))
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
exp_id=list(observations.exp_id),
variance=list(observations.variance),
delta=0.05),
population_size=ConstantPopulationSize(1000),
summary_statistics=summary_statistics,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=100),
sampler=MulticoreEvalParallelSampler(n_procs=16),
acceptor=IonChannelAcceptor())
obs = observations.to_dict()['y']
obs = {str(k): v for k, v in obs.items()}
abc_id = abc.new(db_path, obs)
history = abc.run(minimum_epsilon=0., max_nr_populations=100, min_acceptance_rate=0.01)
history = History('sqlite:///results/nygren/isus/unified/nygren_isus_rgate_unified.db')
df, w = history.get_distribution()
df.describe()
sns.set_context('poster')
mpl.rcParams['font.size'] = 14
mpl.rcParams['legend.fontsize'] = 14
g = plot_sim_results(modelfile,
wang_act_and_kin,
courtemanche_deact,
df=df, w=w)
plt.tight_layout()
import pandas as pd
N = 100
nyg_par_samples = df.sample(n=N, weights=w, replace=True)
nyg_par_samples = nyg_par_samples.set_index([pd.Index(range(N))])
nyg_par_samples = nyg_par_samples.to_dict(orient='records')
sns.set_context('talk')
mpl.rcParams['font.size'] = 14
mpl.rcParams['legend.fontsize'] = 14
f, ax = plot_variables(V, nyg_par_map,
'models/nygren_isus.mmt',
[nyg_par_samples],
figshape=(2,2))
from ionchannelABC.visualization import plot_kde_matrix_custom
import myokit
import numpy as np
m,_,_ = myokit.load(modelfile)
originals = {}
for name in limits.keys():
if name.startswith("log"):
name_ = name[4:]
else:
name_ = name
val = m.value(name_)
if name.startswith("log"):
val_ = np.log10(val)
else:
val_ = val
originals[name] = val_
sns.set_context('paper')
g = plot_kde_matrix_custom(df, w, limits=limits, refval=originals)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr/>
Step2: <hr/>
Step3: What kind of object did you get receive as output? Call type on your result.
Step4: <hr/>
Step5: <hr/>
Step6: <hr/>
Step7: <hr/>
Step8: Examples
Step9: <hr />
Step10: <hr />
Step11: <hr />
Step12: <hr/>
Step13: <hr/>
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('iris.csv')
df.head()
df.groupby(df.Species).PetalLength.mean() # Average petal length per species
from odo import odo
import numpy as np
import pandas as pd
odo("iris.csv", pd.DataFrame)
odo("iris.csv", list)
odo("iris.csv", np.ndarray)
odo("iris.csv", "sqlite:///my.db::iris")
type(_)
odo('s3://nyqpug/tips.csv', pd.DataFrame)
import pandas as pd
df = pd.read_csv('iris.csv')
df.head(5)
df.Species.unique()
df.Species.drop_duplicates()
import blaze as bz
d = bz.Data('iris.csv')
d.head(5)
d.Species.distinct()
!ls
db = bz.Data('sqlite:///my.db')
#db.iris
#db.iris.head()
db.iris.Species.distinct()
db.iris[db.iris.Species == 'versicolor'][['Species', 'SepalLength']]
# Inspect SQL query
query = db.iris[db.iris.Species == 'versicolor'][['Species', 'SepalLength']]
print bz.compute(query)
query = bz.by(db.iris.Species, longest=db.iris.PetalLength.max(),
shortest=db.iris.PetalLength.min())
print bz.compute(query)
odo(query, list)
# db = bz.Data('postgresql://postgres:postgres@ec2-54-159-160-163.compute-1.amazonaws.com') # Use Postgres if you don't have the sqlite file
db = bz.Data('sqlite:///lahman2013.sqlite')
db.dshape
# View the Salaries table
t = bz.Data('sqlite:///lahman2013.sqlite::Salaries')
t.dshape
# What are the distinct teamIDs in the Salaries table?
t.teamID.distinct()
odo(t.teamID.distinct(), list)
query = t.teamID.distinct()
print bz.compute(query)
# What is the minimum and maximum yearID in the Salaries table?
t.yearID.min()
t.yearID.max()
# For the Oakland Athletics (teamID OAK), pick out the playerID, salary, and yearID columns
t[t.teamID=='OAK'][['playerID', 'salary', 'yearID']]
oak = t[t.teamID=='OAK'][['playerID', 'salary', 'yearID']]
oak
odo(oak, 'oak.csv')
!ls
# Sort that result by salary.
# Use the ascending=False keyword argument to the sort function to find the highest paid players
oak.sort('salary',ascending=False)
import pandas as pd
iris = pd.read_csv('iris.csv')
iris.groupby('Species').PetalLength.min()
iris = bz.Data('sqlite:///my.db::iris')
bz.by(iris.Species, largest=iris.PetalLength.max(),
smallest=iris.PetalLength.min())
print(_)
iris = bz.Data('sqlite:///my.db::iris')
query = bz.by(iris.Species, largest=iris.PetalLength.max(), # A lazily evaluated result
smallest=iris.PetalLength.min())
odo(query, list) # A concrete result
result = bz.by(db.Salaries.teamID, avg=db.Salaries.salary.mean(),
max=db.Salaries.salary.max(),
ratio=db.Salaries.salary.max() / db.Salaries.salary.min()
).sort('ratio', ascending=False)
odo(result, list)[:10]
odo(result, 'sqlite:///my.db::result')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It is clear that in the code above we could change line 7 to result += y[i] * x[i] without affecting the result.
Step2: We define the vectors $x$ and $y$ using numpy
Step3: We can now calculate the $dot$ or $inner product$ using the dot function of numpy
Step4: The order of the arguments is irrelevant
Step5: Note that both vectors are actually row vectors in the above code. We can transpose them to column vectors by using the shape property
Step6: In fact, in our understanding of Linear Algebra, we take the arrays above to represent row vectors. Numpy treates them differently.
Step7: The problem here is that this does not do, what we expect it to do. It only works, if we declare the variables not to be arrays of numbers, but in fact a matrix
Step8: Note that the numpy functions dot and outer are not affected by this distinction. We can compute the dot product using the mathematical equation above in numpy using the new $x$ and $y$ row vectors
Step9: Or by reverting to
Step10: To read the result from this array of arrays, we would need to access the value this way
Step11: Outer Product of Two Vectors
Step12: We can now compute the outer product by multiplying the column vector $x$ with the row vector $y$
Step13: Numpy provides an outer function that does all that
Step14: Note, in this simple case using the simple arrays for the data structures of the vectors does not affect the result of the outer function
Step15: Matrix-Vector Products
Step16: Assume that we have a column vector $x$
Step17: We can thus describe the product writing $A$ by rows as
Step18: We can ask for the shape of $A$
Step19: The shape property of a matrix contains the $m$ (number of rows) and $n$ (number of columns) properties in a tuple, in that particular order. We can create an identity matrix for the use in $AI$ by using the $n$ value
Step20: Note that we specify the dtype parameter to identity as int, since the default would return a matrix of float values.
Step21: We can compute the dot product of $A$ and its identity matrix $I$
Step22: The same is true for the other direction
Step23: Diagonal Matrix
Step24: An optional parameter k to the diag function allows us to extract the diagonal above the main diagonal with a positive k, and below the main diagonal with a negative k
Step25: Transpose of a Matrix
Step26: The numpy function arange is similar to the standard Python function range. It returns an array with $n$ elements, specified in the one parameter version only. If we provide to parameters to arange, it generates an array starting from the value of the first parameter and ending with a value one less than the second parameter. The function reshape returns us a matrix with the corresponding number of rows and columns.
|
<ASSISTANT_TASK:>
Python Code:
x = (1, 2, 3, 4)
y = (5, 6, 7, 8)
n = len(x)
if n == len(y):
result = 0
for i in range(n):
result += x[i] * y[i]
print(result)
import numpy as np
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
print("x:", x)
print("y:", y)
np.dot(x, y)
np.dot(y, x)
print("x:", x)
x.shape = (4, 1)
print("xT:", x)
print("y:", y)
y.shape = (4, 1)
print("yT:", y)
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
print("x:", x)
print("y:", y)
print("xT:", x.T)
print("yT:", y.T)
x = np.array([[1, 2, 3, 4]])
y = np.array([[5, 6, 7, 8]])
print("x:", x)
print("y:", y)
print("xT:", x.T)
print("yT:", y.T)
print("x:", x)
print("y:", y.T)
np.dot(x, y.T)
print("x:", x.T)
print("y:", y)
np.dot(y, x.T)
np.dot(y, x.T)[0][0]
x = np.array([[1, 2, 3, 4]])
print("x:", x)
print("xT:", np.reshape(x, (4, 1)))
print("xT:", x.T)
print("xT:", x.transpose())
x = np.array([[1, 2, 3, 4]])
y = np.array([[5, 6, 7, 8]])
x.T * y
np.outer(x, y)
x = np.array([1, 2, 3, 4])
y = np.array([5, 6, 7, 8])
np.outer(x, y)
import numpy as np
A = np.array([[4, 5, 6],
[7, 8, 9]])
A * 2
A = np.array([[4, 5, 6],
[7, 8, 9]])
x = np.array([1, 2, 3])
A.dot(x)
import numpy as np
A = np.array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
print("A:", A)
A.shape
np.identity(A.shape[1], dtype="int")
np.identity(A.shape[0], dtype="int")
n = A.shape[1]
I = np.array(np.identity(n, dtype="int"))
np.dot(A, I)
m = A.shape[0]
I = np.array(np.identity(m, dtype="int"))
np.dot(I, A)
import numpy as np
A = np.array([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]])
np.diag(A)
np.diag(A, k=1)
np.diag(A, k=-1)
import numpy as np
print("np.arange(9):", np.arange(9))
print("np.arange(9, 18):", np.arange(9, 18))
A = np.arange(9, 18).reshape((3, 3))
B = np.arange(9).reshape((3, 3))
print("A:", A)
print("B:", B)
A + B
A - B
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For the example, we will use a linear regression model.
Step3: Now we will write the Stan code, keeping in mind that it must be able to compute the pointwise log likelihood on excluded data, i.e., data that is not used to fit the model. Thus, the backbone of the code must look like the following
Step4: We have defined a dictionary sample_kwargs that will be passed to the SamplingWrapper in order to make sure that all
Step5: We will create a subclass of {class}~arviz.PyStan2SamplingWrapper. Therefore, instead of having to implement all functions required by {func}~arviz.reloo we only have to implement {func}~arviz.PyStan2SamplingWrapper.sel_observations. As explained in its docs, it takes one argument which is the indices of the data to be excluded and returns modified_observed_data which is passed as data to sampling function of PyStan model and excluded_observed_data which is used to retrieve the log likelihood of the excluded data (as passing the excluded data would make no sense).
Step6: In this case, the Leave-One-Out Cross Validation (LOO-CV) approximation using Pareto Smoothed Importance Sampling (PSIS) works for all observations, so we will use modify loo_orig in order to make {func}~arviz.reloo believe that PSIS failed for some observations. This will also serve as a validation of our wrapper, as the PSIS LOO-CV already returned the correct value.
Step7: We initialize our sampling wrapper
Step8: And eventually, we can use this wrapper to call az.reloo, and compare the results with the PSIS LOO-CV results.
|
<ASSISTANT_TASK:>
Python Code:
import arviz as az
import pystan
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(26)
xdata = np.linspace(0, 50, 100)
b0, b1, sigma = -2, 1, 3
ydata = np.random.normal(loc=b1 * xdata + b0, scale=sigma)
plt.plot(xdata, ydata)
refit_lr_code =
data {
// Define data for fitting
int<lower=0> N;
vector[N] x;
vector[N] y;
// Define excluded data. It will not be used when fitting.
int<lower=0> N_ex;
vector[N_ex] x_ex;
vector[N_ex] y_ex;
}
parameters {
real b0;
real b1;
real<lower=0> sigma_e;
}
model {
b0 ~ normal(0, 10);
b1 ~ normal(0, 10);
sigma_e ~ normal(0, 10);
for (i in 1:N) {
y[i] ~ normal(b0 + b1 * x[i], sigma_e); // use only data for fitting
}
}
generated quantities {
vector[N] log_lik;
vector[N_ex] log_lik_ex;
vector[N] y_hat;
for (i in 1:N) {
// calculate log likelihood and posterior predictive, there are
// no restrictions on adding more generated quantities
log_lik[i] = normal_lpdf(y[i] | b0 + b1 * x[i], sigma_e);
y_hat[i] = normal_rng(b0 + b1 * x[i], sigma_e);
}
for (j in 1:N_ex) {
// calculate the log likelihood of the excluded data given data_for_fitting
log_lik_ex[j] = normal_lpdf(y_ex[j] | b0 + b1 * x_ex[j], sigma_e);
}
}
sm = pystan.StanModel(model_code=refit_lr_code)
data_dict = {
"N": len(ydata),
"y": ydata,
"x": xdata,
# No excluded data in initial fit
"N_ex": 0,
"x_ex": [],
"y_ex": [],
}
sample_kwargs = {"iter": 1000, "chains": 4}
fit = sm.sampling(data=data_dict, **sample_kwargs)
dims = {"y": ["time"], "x": ["time"], "log_likelihood": ["time"], "y_hat": ["time"]}
idata_kwargs = {
"posterior_predictive": ["y_hat"],
"observed_data": "y",
"constant_data": "x",
"log_likelihood": ["log_lik", "log_lik_ex"],
"dims": dims,
}
idata = az.from_pystan(posterior=fit, **idata_kwargs)
class LinearRegressionWrapper(az.PyStan2SamplingWrapper):
def sel_observations(self, idx):
xdata = self.idata_orig.constant_data.x.values
ydata = self.idata_orig.observed_data.y.values
mask = np.full_like(xdata, True, dtype=bool)
mask[idx] = False
N_obs = len(mask)
N_ex = np.sum(~mask)
observations = {
"N": N_obs - N_ex,
"x": xdata[mask],
"y": ydata[mask],
"N_ex": N_ex,
"x_ex": xdata[~mask],
"y_ex": ydata[~mask],
}
return observations, "log_lik_ex"
loo_orig = az.loo(idata, pointwise=True)
loo_orig
loo_orig.pareto_k[[13, 42, 56, 73]] = np.array([0.8, 1.2, 2.6, 0.9])
pystan_wrapper = LinearRegressionWrapper(
sm, idata_orig=idata, sample_kwargs=sample_kwargs, idata_kwargs=idata_kwargs
)
loo_relooed = az.reloo(pystan_wrapper, loo_orig=loo_orig)
loo_relooed
loo_orig
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So the naive effect is around 60% high. Now, let's build a causal model for this data.
Step2: Now that we have a model, we can try to identify the causal effect.
Step3: Identification works! We didn't actually need to do this yet, since it will happen internally with the do sampler, but it can't hurt to check that identification works before proceeding. Now, let's build the sampler.
Step4: Now, we can just sample from the interventional distribution! Since we set the keep_original_treatment flag to False, any treatment we pass here will be ignored. Here, we'll just pass None to acknowledge that we know we don't want to pass anything.
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
sys.path.append(os.path.abspath("../../../"))
import numpy as np
import pandas as pd
import dowhy.api
N = 5000
z = np.random.uniform(size=N)
d = np.random.binomial(1., p=1./(1. + np.exp(-5. * z)))
y = 2. * z + d + 0.1 * np.random.normal(size=N)
df = pd.DataFrame({'Z': z, 'D': d, 'Y': y})
(df[df.D == 1].mean() - df[df.D == 0].mean())['Y']
from dowhy import CausalModel
causes = ['D']
outcomes = ['Y']
common_causes = ['Z']
model = CausalModel(df,
causes,
outcomes,
common_causes=common_causes)
identification = model.identify_effect(proceed_when_unidentifiable=True)
from dowhy.do_samplers.weighting_sampler import WeightingSampler
sampler = WeightingSampler(df,
causal_model=model,
keep_original_treatment=True,
variable_types={'D': 'b', 'Z': 'c', 'Y': 'c'}
)
interventional_df = sampler.do_sample(None)
(interventional_df[interventional_df.D == 1].mean() - interventional_df[interventional_df.D == 0].mean())['Y']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample Info
Step2: The read mapping output from STAR is a BAM file. We convert the BAM file to BED file. You can do this using bedtools bamtobed command, or using the wrapper jgem.bedtools.bam2bed
Step3: Preparing Inputs to the assembler
Step4: To generate junction files from SJ.out.tab, use function jgem.gtfgffbed.sjtab2sjbed
Step5: Let's check the content of generated SJBED file.
Step6: SJBED file format
Step7: Generated figures visualize the process of finding coverage threshold for single exons. (See ...)
Step8: Main outputs are files with .ex.txt.gz, .sj.txt.gz suffixes. These contain exons and junctions in tab separated format. BED files are also generated for viewing elements (genes,exons,junctions) with browsers like IGV (https
Step9: INFO outputs are as follows
Step10: We can also plot the detection sensitivity against coverages. We expect the detection rate is better at the bigger coverage.
Step11: These plots show percent of detected exons calculated in bins sorted by coverage (binsize 100). Numbers in inset indicate normalized area under the curve (AUC, which ranges from 0 to 1). Dashed lines in 5',3' and single exon panels correspond to 5b, 3b, sb category.
Step12: Averages shown inside are geometric averages. Overall length tends to be a bit longer. (Matching of internal exons and junctions always produce ratio of 1, so they are not shown.)
Step13: Blue dots are genes, green dots are binned averages, red lines are sigmoid function fit to blue dots and red dashes are 99% position of the sigmoidal fit.
Step14: Preparing for merge
Step15: We need to assign sjexpre column to the sampleinfo
Step16: To create bigwigs
Step17: To make aggreagated junction files
Step18: To make average bigwig coverage file (average of original individual bigwig coverages, different from bigwigs generated above by make_ex_bigwigs(), which are based on assembled exons)
Step19: Output files are
Step20: Bigwigs generated from assembly exons are separated into multi-exon positive strand, negative strand and single exons
Step21: Output files are
Step22: Looking at .genes.bed file with IGV, we find previously incomplete reconstructions are now merged into one complete structure
Step23: To visualize detection percentage
Step24: So we have much better detection of internal exons (~10% improvement) and slight improvements in the detection of 5'exons, 3'exons and junctions.
Step25: Annotating assembly
Step26: When overwrite is True, it overwrites the original sj,ex files. If False, it creats files in the output direcotry
Step27: Let's check the contents of the exon file
|
<ASSISTANT_TASK:>
Python Code:
# This is to change logging level of jupyter notebook
try:
from importlib import reload # for python 3
except:
pass
import logging
reload(logging)
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO, datefmt='%I:%M:%S')
# This is to show matplotlib output in the notebook
%matplotlib inline
import matplotlib.pylab as P
import pandas as PD
si = PD.read_excel('data/sampleinfo.xlsx')
print(si[['name','mapbed','sjtab']])
import os
BASEDIR = './data'
si['bed_path'] = [os.path.join(BASEDIR, "BED", x) for x in si['mapbed']]
si['sjtab_path'] = [os.path.join(BASEDIR, "SJ", x) for x in si['sjtab']]
print(si[['name','bed_path','sjtab_path']])
%%time
# first prepare destination
OUTDIR = './tutorial_out'
si['bw_path'] = [os.path.join(OUTDIR, "bigwig/{0}.bw".format(x)) for x in si['name']]
# then make coverages BIGWIG from BED (~ 2 min)
from jgem import utils as UT
from jgem import bedtools as BT
chromsizes = UT.chromsizes('mm10') # a file containing chromosome names and sizes
for bedfile, bwfile in si[['bed_path','bw_path']].values:
BT.bed2bw(bedfile, chromsizes, bwfile)
ls ./tutorial_out/bigwig/*
%%time
# sjbed destination
si['sjbed_path'] = [os.path.join(OUTDIR, "sjbed/{0}.sj.bed.gz".format(x)) for x in si['name']]
# SJ.out.tab => sjbed
from jgem import gtfgffbed as GGB
for sjtab, sjbed in si[['sjtab_path','sjbed_path']].values:
GGB.sjtab2sjbed(sjtab,sjbed)
ls ./tutorial_out/sjbed/*
sj = GGB.read_sj(sjbed)
print(sj.head())
# assemble 4 samples takes about ~10 min
from jgem import filenames as FN
from jgem import assembler as AS
for name,bwfile,sjfile in si[['name','bw_path','sjbed_path']].values:
fn = FN.FileNames(name, bwfile, sjfile, outdir=OUTDIR+'/assemblies')
p = AS.Assembler(fn,saveintermediates=False)
p.assemble()
ls ./tutorial_out/assemblies/Fev_DR_m70_1623*
# Evaluate against Gencode.VM4 (~1 min)
from jgem import evaluate as EV
ens = {} # keep EvalNames objects
ems = {} # keep EvalMatch object
# Gencode.VM4
en_gen4 = EV.EvalNames('./data/REF/gencode.vM4.chr1', code='gen4', outdir=OUTDIR+'/eval')
# assign sample code
si['scode'] = [x.split('_')[-1].split('.')[0] for x in si['name']]
# For each sample
for scode, name, bwfile, sjfile in si[['scode','name','bw_path','sjbed_path']].values:
print('process {0}...'.format(name))
sjexbase = os.path.join(OUTDIR,'assemblies',name) # prefix to .ex.txt.gz and .sj.txt.gz files
assemblycode = 'a'+str(scode) # assembly identifier
datacode = 'd'+str(scode) # data identifier
ens[name] = EV.EvalNames(sjexbase, code=assemblycode, outdir=OUTDIR+'/eval')
ems[name] = EV.EvalMatch(en_gen4, ens[name], bwfile, sjfile, datacode=datacode, binsize=100)
ems[name].calculate()
# EvalMatch.get_detection_percentages returns a dataframe containing detection percentages
dp = PD.DataFrame({n:ems[n].get_detection_percentages().stack() for n in ems})
tgt = ['i','5','3','s','j']
dp1 = dp.ix[[(x,'%detected 1') for x in tgt]][si['name'].values]
dp1.index = tgt
ax = dp1.plot(kind='bar', legend=False, figsize=(6,4))
ax.set_ylabel('detection %')
ax.set_xlabel('exons/junctions')
ax.legend(loc='center left', bbox_to_anchor=(1,0.5))
colors = ['ro-','go-','bo-','mo-']
for i,name in enumerate(ems):
em = ems[name]
if i==0:
axr = em.plot_sensitivity(color=colors[i])
else:
axr = em.plot_sensitivity(color=colors[i], axr=axr, ypos=i)
for name in ems:
ems[name].plot_ratio()
axr = ems[name].plot_completeness(pw='fdat', ca='g.-', cf='r-', cd='b.', xlim=[0,15], xlimjcc=[0,40])
colors = ['ro-','go-','bo-','mo-']
for i,name in enumerate(ems):
em = ems[name]
if i==0:
axr = em.plot_completeness(pw='a', ca=colors[i], label=name, title='')
else:
axr = em.plot_completeness(pw='a', ca=colors[i], xlim=(0,15),xlimjcc=(0,40), axr=axr, label=name)
P.legend(bbox_to_anchor=(1,1),loc='upper left')
from jgem import merge as MG
fni = MG.MergeInputNames(sampleinfo=si, code='FevMI', outdir=OUTDIR+'/merge')
mi = MG.MergeInputs(fnobj=fni, genome='mm10', np=3, th_detected=0, th_maxcnt1=0)
# prefix to *.ex.txt.gz and *.sj.txt.gz files
si['sjexpre'] = [os.path.join(OUTDIR, 'assemblies', x) for x in si['name']]
%%time
mi.make_ex_bigwigs()
%%time
mi.make_sj_bed()
%%time
mi.aggregate_bigwigs()
ls ./tutorial_out/merge/*
fna = MG.MergeAssemblyNames('FevMA', outdir=OUTDIR+'/merge')
ma = MG.MergeAssemble(fni,fna,np=3,saveintermediates=False)
reload(AS)
reload(MG)
ma.__class__ = MG.MergeAssemble
ma.assemble()
ls ./tutorial_out/merge/FevMA*
ems2 = {} # keep EvalMatch object
en_m = EV.EvalNames('./tutorial_out/merge/FevMA', code='FevMA', outdir='./tutorial_out/eval/')
# for each sample data generate EvalMatch between the merged assembly and the gencode4
for scode, name, bwfile, sjfile in si[['scode','name','bw_path','sjbed_path']].values:
print('process {0}...'.format(name))
datacode = 'd'+str(scode)
ems2[name] = EV.EvalMatch(en_gen4, en_m, bwfile, sjfile, datacode=datacode, binsize=100)
ems2[name].calculate()
for n in ems2:
dp[n+'_merged'] = ems2[n].get_detection_percentages().stack()
tgt = ['i','5','3','s','j']
cols = list(si['name'])+[x+'_merged' for x in si['name']]
dp1 = dp.ix[[(x,'%detected 1') for x in tgt]][cols]
dp1.index = tgt
ax = dp1.plot(kind='bar', legend=False, figsize=(10,4))
ax.set_ylabel('detection %')
ax.set_xlabel('exons/junctions')
ax.legend(loc='center left', bbox_to_anchor=(1,0.5))
for i,name in enumerate(ems):
if i==0:
axr = ems[name].plot_completeness(pw='a', ca='bo-', label=name, title='')
axr = ems2[name].plot_completeness(pw='a', ca='ro-', label=name+'_merged', axr=axr)
else:
axr = ems[name].plot_completeness(pw='a', ca='bo-', label=name, axr=axr)
axr = ems2[name].plot_completeness(pw='a', ca='ro-', label=name+'_merged', axr=axr,
xlim=(0,15), xlimjcc=(0,40))
P.legend(bbox_to_anchor=(1,1),loc='upper left')
from jgem import annotate as AN
g4sjexpre = './data/REF/gencode.vM4.chr1'
mergepre = './tutorial_out/merge/FevMA'
outdir = './tutorial_out/annotate'
cref = AN.ComparatorNames(g4sjexpre, 'gen4', outdir)
ctgt = AN.ComparatorNames(mergepre, 'FevMA', outdir)
cp = AN.Comparator(cref, ctgt, gnamecol='gene_name', gidxcol='gene_id')
cp.annotate(overwrite=False)
ls ./tutorial_out/annotate
ex = UT.read_pandas('./tutorial_out/annotate/FevMA.gen4.ex.txt.gz')
ex.columns
acols = ['chr','st','ed','strand','cat','gname','eknown_gen4','etcode_gen4',
'intergenic_gen4', 'ex_as_ovl_gen4', 'gene_as_ovl_gen4',
'gknown_gen4', 'gtcode_gen4','gen4_sym0']
print(ex.head(10)[acols])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Agenda
Step2: Clusterhypothese
Step3: Vorkommenshรคufigkeit des Terms im Dokument
Step4: Trennschรคrfe des Terms
Step5: Bewertung des Vektorraummodells
|
<ASSISTANT_TASK:>
Python Code:
# This cell has to be run to prepare the Jupyter notebook
# The %... is an Jupyter thing, and is not part of the Python language.
# In this case we're just telling the plotting library to draw things on
# the notebook, instead of on a separate window.
%matplotlib inline
# See all the "as ..." contructs? They're just aliasing the package names.
# That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot().
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
from time import time
from math import log
# import the k-means algorithm
from sklearn.cluster import KMeans
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("poster")
# create sample data
d = {'doc01': (1,2), 'doc02': (10,10),'doc03':(60,40),'doc04':(100,80),'doc05':(99,81),'doc06':(1,1),'doc07':(45,55),'doc08':(9,10),'doc09':(11,11),'doc10':(1,11)}
# create a data frame from our sample data
sampleDF=pd.DataFrame(data=d)
# transpose the data frame for later use
sampleDF=sampleDF.transpose()
sampleDF.columns = ['term 2 (z.B. Retrieval)', 'term 1 (z.B. Information)']
sampleDF.head(10)
# plot the sample data
sampleDF.plot(x=0,y=1,kind='scatter',alpha=0.75,s=70) # we have to define explicitly which data is used for the x and y axes
vq=[0,1,1,0]
vd1=[50,5,0,0]
vd2=[0,2,2,0]
print "similarity between Vq and Vd1: %i"%np.inner(vq,vd1) # inner product is another name for the scalar/dot product
print "similarity between Vq and Vd2: %i"%np.inner(vq,vd2)
# with stopword in first dimension
vq=[100,0,0,1,2]
b=[99,1,2,0,0]
c=[80,0,0,2,2]
print "Before stopword elimination"
print "\tsimilarity between Vq and b: %i"%np.inner(vq,b)
print "\tsimilarity between Vq and c: %i"%np.inner(a,c)
# after stopword elimination
vq2=[0,0,1,2]
b2=[1,2,0,0]
c2=[0,0,2,2]
print "\nAfter stopword elimination"
print "\tsimilarity between Vq and b: %i"%np.inner(vq2,b2)
print "\tsimilarity between Vq and c: %i"%np.inner(a2,c2)
N=1000.0
ni=[]
quotient=[]
quotientLog=[]
for i in range(1,500):
ni.append(i)
for i in ni:
quotient.append(N/i)
quotientLog.append(log(N/i))
plt.plot(ni,quotient,label="N/ni")
plt.plot(ni,quotientLog,label="log(N/ni)")
plt.axis([0, 500, 0, 100])
plt.ylabel('Result (limited to 100)')
plt.xlabel("ni")
plt.title("Sample results of idf, N=1000")
plt.legend()
# define the number of clusters to be found
true_k=3
# initialize the k-means algorithm
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1)
# apply the algorithm on the data
km.fit(sampleDF)
# add the detected clusters as a new column to the original data frame
sampleDF['cluster']=km.labels_
sampleDF=sampleDF.sort('cluster')
sampleDF.head(10)
clusterCenters=pd.DataFrame(data=km.cluster_centers_)
clusterCenters.head()
# plot the sample data and save the plot in the variable "ax"
ax=sampleDF.plot(x=0,y=1,kind='scatter',alpha=0.75,s=70)
# plot the centroids in red
plt.scatter(x=clusterCenters[0],y=clusterCenters[1],color='red')
# next, define the circles' centers surrounding the clusters for a better visualization result
cirlePos1=(clusterCenters[0][0],clusterCenters[1][0])
cirlePos2=(clusterCenters[0][1],clusterCenters[1][1])
cirlePos3=(clusterCenters[0][2],clusterCenters[1][2])
# create the unfilled circles with a radius of 20 (this value is arbitrary)
circ1=plt.Circle(cirlePos1,20,color='r',fill=False)
circ2=plt.Circle(cirlePos2,20,color='r',fill=False)
circ3=plt.Circle(cirlePos3,20,color='r',fill=False)
# add the circles to your plot
ax.add_patch(circ1)
ax.add_patch(circ2)
ax.add_patch(circ3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With our setup in place, let's do a few standard imports to get the ball rolling.
Step2: The ntext step we want to do is load our dataset. We're using a small dataset we've prepared that's pulled out of the larger GDB benchmarks. The dataset contains the atomization energies for 1K small molecules.
Step3: We now need a way to transform molecules that is useful for prediction of atomization energy. This representation draws on foundational work [1] that represents a molecule's 3D electrostatic structure as a 2D matrix $C$ of distances scaled by charges, where the $ij$-th element is represented by the following charge structure.
Step4: Note that in this case, we set the maximum number of atoms to $N = 23$. Let's now load our dataset file into DeepChem. As in the previous tutorials, we use a Loader class, in particular dc.data.SDFLoader to load our .sdf file into DeepChem. The following snippet shows how we do this
Step5: For the purposes of this tutorial, we're going to do a random split of the dataset into training, validation, and test. In general, this split is weak and will considerably overestimate the accuracy of our models, but for now in this simple tutorial isn't a bad place to get started.
Step6: One issue that Coulomb matrix featurizations have is that the range of entries in the matrix $C$ can be large. The charge $q_1q_2/r^2$ term can range very widely. In general, a wide range of values for inputs can throw off learning for the neural network. For this, a common fix is to normalize the input values so that they fall into a more standard range. Recall that the normalization transform applies to each feature $X_i$ of datapoint $X$
Step7: Now that we have the data cleanly transformed, let's do some simple machine learning. We'll start by constructing a random forest on top of the data. We'll use DeepChem's hyperparameter tuning module to do this.
Step8: Let's build one more model, a kernel ridge regression, on top of this raw data.
|
<ASSISTANT_TASK:>
Python Code:
%tensorflow_version 1.x
!curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py
import deepchem_installer
%time deepchem_installer.install(version='2.3.0')
import os
import unittest
import numpy as np
import deepchem as dc
import numpy.random
from deepchem.utils.evaluate import Evaluator
from sklearn.ensemble import RandomForestRegressor
from sklearn.kernel_ridge import KernelRidge
tasks = ["atomization_energy"]
dataset_file = "../../datasets/gdb1k.sdf"
smiles_field = "smiles"
mol_field = "mol"
featurizer = dc.feat.CoulombMatrixEig(23, remove_hydrogens=False)
loader = dc.data.SDFLoader(
tasks=["atomization_energy"], smiles_field="smiles",
featurizer=featurizer,
mol_field="mol")
dataset = loader.featurize(dataset_file)
random_splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = random_splitter.train_valid_test_split(dataset)
transformers = [
dc.trans.NormalizationTransformer(transform_X=True, dataset=train_dataset),
dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for dataset in [train_dataset, valid_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
def rf_model_builder(model_params, model_dir):
sklearn_model = RandomForestRegressor(**model_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
params_dict = {
"n_estimators": [10, 100],
"max_features": ["auto", "sqrt", "log2", None],
}
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error)
optimizer = dc.hyper.HyperparamOpt(rf_model_builder)
best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
def krr_model_builder(model_params, model_dir):
sklearn_model = KernelRidge(**model_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
params_dict = {
"kernel": ["laplacian"],
"alpha": [0.0001],
"gamma": [0.0001]
}
metric = dc.metrics.Metric(dc.metrics.mean_absolute_error)
optimizer = dc.hyper.HyperparamOpt(krr_model_builder)
best_krr, best_krr_hyperparams, all_krr_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up data paths and loading raw data (skip some data for speed)
Step2: Since downsampling reduces the timing precision of events, we recommend
Step3: When resampling epochs is unwanted or impossible, for example when the data
Step4: Because resampling also affects the stim channels, some trigger onsets might
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data()
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.copy().resample(100, npad='auto')
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
# Resample to 300 Hz
raw_resampled_300 = raw.copy().resample(300, npad='auto')
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (suppress the warning that would be emitted)
raw_resampled_100 = raw.copy().resample(100, npad='auto', verbose='error')
print('Number of events after resampling:',
len(mne.find_events(raw_resampled_100)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.copy().resample(
100, npad='auto', events=events)
print('Number of events after resampling:', len(events_resampled))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Using interact for animation with data
Step3: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays
Step4: Compute a 2d NumPy array called phi
Step6: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful.
Step7: Use interact to animate the plot_soliton_data function versus time.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def soliton(x, t, c, a):
Return phi(x, t) for a soliton wave with constants c and a.
phi = 0.5/(np.cosh((c**(0.5))*(x-c*t-a)/2)**2)
return phi
assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5]))
tmin = 0.0
tmax = 10.0
tpoints = 100
t = np.linspace(tmin, tmax, tpoints)
xmin = 0.0
xmax = 10.0
xpoints = 200
x = np.linspace(xmin, xmax, xpoints)
c = 1.0
a = 0.0
phi = np.empty((xpoints, tpoints), dtype=float)
for i in range(len(x)):
for j in range(len(t)):
phi[i,j] = soliton(x[i], t[j], c, a)
assert phi.shape==(xpoints, tpoints)
assert phi.ndim==2
assert phi.dtype==np.dtype(float)
assert phi[0,0]==soliton(x[0],t[0],c,a)
def plot_soliton_data(i=0):
Plot the soliton data at t[i] versus x.
plt.plot(phi[:,i])
plt.tick_params(direction='out')
plot_soliton_data(0)
assert True # leave this for grading the plot_soliton_data function
interact(plot_soliton_data, i=(0,99,0.1))
assert True # leave this for grading the interact with plot_soliton_data cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here, I want to find the percentance of people that survived based on their class
Step2: As expected, most of the upper class survived, while the lower class had less odds to survive.
|
<ASSISTANT_TASK:>
Python Code:
# imports
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql import functions as F
from graphframes import *
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# make graphs beautiful
plt.style.use('ggplot')
plt.rc('font', family='sans-serif')
%matplotlib inline
sqlContext = SQLContext(sc)
train = pd.read_csv('train.csv')
train.head()
sorted_by_age = train.sort_values(['Age'])
plt.plot(sorted_by_age.Age, sorted_by_age.Pclass, 'bo')
plt.title('Class per Age')
plt.ylim([0, 5])
plt.show()
# drop useless columns
drop_elements = ['PassengerId', 'Name']
train = train.drop(drop_elements, axis = 1)
train.head()
total_elements = train.groupby(train.Pclass).Survived.count()
total_survived = train[train.Survived == 1].groupby(train.Pclass).Survived.count()
(total_survived / total_elements).plot.bar()
avg_prices = train.groupby(train.Pclass).Fare.mean()
avg_prices.plot.bar()
from pyspark.sql import SQLContext
sqlContext = SQLContext(sc)
df = spark.read.csv('train.csv', header='true', inferSchema='true')
df.limit(5).toPandas()
df.dtypes
df.filter((df.Survived == 1) & (df.Sex == 'male')).count() / df.filter(df.Sex == 'male').count()
df.filter((df.Survived == 1) & (df.Sex == 'female')).count() / df.filter(df.Sex == 'female').count()
total_by_sex = df.groupBy(df.Sex).count().withColumnRenamed('count', 'total')
survived_by_sex = df.filter(df.Survived == 1).groupBy(df.Sex).count()
survived_by_sex.join(total_by_sex, survived_by_sex.Sex == total_by_sex.Sex, 'outer')\
.rdd.map(lambda x: (x.Sex, x['count'] / x.total)).collect()
# best way to do this calculation
df[["Sex", "Survived"]].groupby('Sex').mean().show()
g = sns.FacetGrid(train, col='Survived')
g.map(plt.hist, 'Age', bins=40)
grid = sns.FacetGrid(train, col='Pclass', row='Survived')
grid.map(plt.hist, 'Age', bins=40)
grid.add_legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Just check that analytical solution coincides with the solution of ODE for the variance
Step2: Test of different SME solvers
Step3: Plotting the figure - Constant case
Step4: Deterministic part time dependent
Step5: Both d1 and d2 time-dependent
Step6: Multiple sc_ops, time-dependent
Step7: Versions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_formats = ['svg']
from qutip import *
from qutip.ui.progressbar import BaseProgressBar
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
y_sse = None
import time
def arccoth(x):
return 0.5*np.log((1.+x)/(x-1.))
############ parameters #############
th = 0.1 # Interaction parameter
alpha = np.cos(th)
beta = np.sin(th)
gamma = 1.
def gammaf(t):
return 0.25+t/12+t*t/6
def f_gamma(t,*args):
return (0.25+t/12+t*t/6)**(0.5)
################# Solution of the differential equation for the variance Vc ####################
T = 6.
N_store = 200
tlist = np.linspace(0,T,N_store)
y0 = 0.5
def func(y, t):
return -(gammaf(t) - alpha*beta)*y - 2*alpha*alpha*y*y + 0.5*gammaf(t)
y_td = odeint(func, y0, tlist)
def func(y, t):
return -(gamma - alpha*beta)*y - 2*alpha*alpha*y*y + 0.5*gamma
y = odeint(func, y0, tlist)
############ Exact steady state solution for Vc #########################
Vc = (alpha*beta - gamma + np.sqrt((gamma-alpha*beta)**2 + 4*gamma*alpha**2))/(4*alpha**2)
#### Analytic solution
A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5
B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A)
y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2)
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
ax.set_title('Variance as a function of time')
ax.plot(tlist,y)
ax.plot(tlist,Vc*np.ones_like(tlist))
ax.plot(tlist,y_an)
ax.set_ylim(0,0.5)
ax2.set_title('Deviation of odeint from analytic solution')
ax2.set_xlabel('t')
ax2.set_ylabel(r'$\epsilon$')
ax2.plot(tlist,y_an - y.T[0]);
####################### Model ###########################
N = 30 # number of Fock states
Id = qeye(N)
a = destroy(N)
s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag())
x = (a + a.dag())/np.sqrt(2)
H = Id
c_op = [np.sqrt(gamma)*a]
c_op_td = [[a,f_gamma]]
sc_op = [s]
e_op = [x, x*x]
rho0 = fock_dm(N,0) # initial vacuum state
#sc_len=1 # one stochastic operator
############## time steps and trajectories ###################
ntraj = 1 #100 # number of trajectories
T = 6. # final time
N_store = 200 # number of time steps for which we save the expectation values/density matrix
tlist = np.linspace(0,T,N_store)
ddt = (tlist[1]-tlist[0])
Nsubs = list((13*np.logspace(0,1,10)).astype(np.int))
stepsizes = [ddt/j for j in Nsubs] # step size is doubled after each evaluation
Nt = len(Nsubs) # number of step sizes that we compare
Nsubmax = Nsubs[-1] # Number of intervals for the smallest step size;
dtmin = (tlist[1]-tlist[0])/(Nsubmax)
# Analetical solution not available:
# Compute the evolution with the best solver and very small step size and use it as the reference
sol = ssesolve(H, fock(N), tlist, [sc_op[0]+c_op[0]], e_op, nsubsteps=2000, method="homodyne",solver="taylor2.0")
y_sse = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
ntraj = 1
def run_sse(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve(H, fock(N), tlist, [sc_op[0]+c_op[0]], e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats(**kw):
start = time.time()
y = run_sse(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit,time.time()-start
stats_cte = []
stats_cte.append(get_stats(solver='euler-maruyama'))
stats_cte.append(get_stats(solver='platen'))
stats_cte.append(get_stats(solver='pred-corr'))
stats_cte.append(get_stats(solver='milstein'))
stats_cte.append(get_stats(solver='milstein-imp', tol=1e-9))
stats_cte.append(get_stats(solver='pred-corr-2'))
stats_cte.append(get_stats(solver='explicit1.5'))
stats_cte.append(get_stats(solver="taylor1.5"))
stats_cte.append(get_stats(solver="taylor1.5-imp", tol=1e-9))
stats_cte.append(get_stats(solver="taylor2.0"))
stats_cte.append(get_stats(solver="taylor2.0", noiseDepth=500))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_cte):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.003*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.01*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.001*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.01*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**2.0, label="$\propto\Delta t^{2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
def H_f(t,args):
return 0.125+t/12+t*t/72
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op, e_op,
nsubsteps=2500, method="homodyne",solver="taylor2.0")
y_sse_td = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_td)
ntraj = 1
def run_sse_td(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_td - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats(**kw):
y = run_sse_td(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit
stats_td = []
stats_td.append(get_stats(solver='euler-maruyama'))
stats_td.append(get_stats(solver='platen'))
stats_td.append(get_stats(solver='pred-corr'))
stats_td.append(get_stats(solver='milstein'))
stats_td.append(get_stats(solver='milstein-imp'))
stats_td.append(get_stats(solver='pred-corr-2'))
stats_td.append(get_stats(solver='explicit1.5'))
stats_td.append(get_stats(solver="taylor1.5"))
stats_td.append(get_stats(solver="taylor1.5-imp", tol=1e-9))
stats_td.append(get_stats(solver="taylor2.0"))
stats_td.append(get_stats(solver="taylor2.0", noiseDepth=500))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_td):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.loglog(stepsizes, 0.5*np.array(stepsizes)**2.0, label="$\propto\Delta t^{2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
def H_f(t,args):
return 0.125+t/12+t*t/72
def H_bf(t,args):
return 0.125+t/10+t*t/108
sc_op_td = [[sc_op[0],H_bf]]
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op_td, e_op,
nsubsteps=2000, method="homodyne",solver="taylor15")
y_sse_btd = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_btd)
ntraj = 1
def run_sse_td(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op_td, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_btd - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats_b(**kw):
y = run_sse_td(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit
stats_d2_td = []
stats_d2_td.append(get_stats_b(solver='euler-maruyama'))
stats_d2_td.append(get_stats_b(solver='platen'))
stats_d2_td.append(get_stats_b(solver='pred-corr'))
stats_d2_td.append(get_stats_b(solver='milstein'))
stats_d2_td.append(get_stats_b(solver='milstein-imp'))
stats_d2_td.append(get_stats_b(solver='pred-corr-2'))
stats_d2_td.append(get_stats_b(solver='explicit1.5'))
stats_d2_td.append(get_stats_b(solver="taylor1.5"))
stats_d2_td.append(get_stats_b(solver="taylor1.5-imp", tol=1e-9))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_d2_td):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
def H_f(t,args):
return 0.125+t/12+t*t/36
def H_bf(t,args):
return 0.125+t/10+t*t/108
sc_op_td = [[sc_op[0]],[sc_op[0],H_bf],[sc_op[0],H_f]]
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist/3, sc_op_td, e_op,
nsubsteps=2000, method="homodyne",solver="taylor15")
y_sse_multi = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_multi)
ntraj = 1
def run_sss_multi(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist/3, sc_op_td, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_multi - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats_multi(**kw):
y = run_sss_multi(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return (y,tag,fit)
stats_multi = []
stats_multi.append(get_stats_multi(solver='euler-maruyama'))
stats_multi.append(get_stats_multi(solver="platen"))
stats_multi.append(get_stats_multi(solver='pred-corr'))
stats_multi.append(get_stats_multi(solver='milstein'))
stats_multi.append(get_stats_multi(solver='milstein-imp'))
stats_multi.append(get_stats_multi(solver='pred-corr-2'))
stats_multi.append(get_stats_multi(solver='explicit1.5'))
stats_multi.append(get_stats_multi(solver="taylor1.5"))
stats_multi.append(get_stats_multi(solver="taylor1.5-imp", tol=1e-9))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>Dd"
for run in stats_multi:
ax.loglog(stepsizes, run[0], 'o', label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
from qutip.ipynbtools import version_table
version_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1 - The problem of very deep neural networks
Step4: Expected Output
Step6: Expected Output
Step7: Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running model.fit(...) below.
Step8: As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.
Step9: The model is now ready to be trained. The only thing you need is a dataset.
Step10: Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch.
Step11: Expected Output
Step12: Expected Output
Step13: ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.
Step14: You can also print a summary of your model by running the following code.
Step15: Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
# GRADED FUNCTION: identity_block
def identity_block(X, f, filters, stage, block):
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (โ3 lines)
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (โ2 lines)
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (โ2 lines)
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# GRADED FUNCTION: convolutional_block
def convolutional_block(X, f, filters, stage, block, s = 2):
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s),padding='valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (โ3 lines)
X = Conv2D(F2, (f, f), strides = (1,1),padding='same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (โ2 lines)
X = Conv2D(F3, (1, 1), strides = (1,1),padding='valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
##### SHORTCUT PATH #### (โ2 lines)
X_shortcut = Conv2D(F3, (1, 1), strides = (s,s),padding='valid', name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (โ2 lines)
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# GRADED FUNCTION: ResNet50
def ResNet50(input_shape = (64, 64, 3), classes = 6):
Implementation of the popular ResNet50 the following architecture:
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3 (โ4 lines)
X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
# Stage 4 (โ6 lines)
X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
# Stage 5 (โ3 lines)
X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
# AVGPOOL (โ1 line). Use "X = AveragePooling2D(...)(X)"
X = AveragePooling2D((2, 2), padding='same')(X)
### END CODE HERE ###
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
model = ResNet50(input_shape = (64, 64, 3), classes = 6)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
model = load_model('ResNet50.h5')
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
img_path = 'images/image222.jpg'
img = image.load_img(img_path, target_size=(64, 64))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
my_image = scipy.misc.imread(img_path)
imshow(my_image)
print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ")
print(model.predict(x))
model.summary()
plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: basic photometric addtions to the catalogs
Step2: Basic residual comparisons
Step3: Inspect objects with failed residuals
Step4: ???? Why are so many of the residuals NaN/infs ???
Step5: Inspect objects with high residuals vs. flux
Step6: X-matching to SDSS
Step8: now inspect those that are in the upper-left of that plot
|
<ASSISTANT_TASK:>
Python Code:
hsts = hosts.get_saga_hosts_from_google(clientsecretjsonorfn='client_secrets.json', useobservingsummary=False)
anak = [h for h in hsts if h.name=='AnaK']
assert len(anak)==1
anak = anak[0]
bricknames = []
with open('decals_dr3/anakbricks') as f:
for l in f:
l = l.strip()
if l != '':
bricknames.append(l)
print(bricknames)
base_url = 'http://portal.nersc.gov/project/cosmo/data/legacysurvey/dr3/tractor/{first3}/tractor-{brickname}.fits'
for brickname in ProgressBar(bricknames, ipython_widget=True):
url = base_url.format(brickname=brickname, first3=brickname[:3])
target = os.path.join('decals_dr3/catalogs/', url.split('/')[-1])
if not os.path.isfile(target):
!wget $url -O $target
else:
print(target, 'already exists, not downloading')
bricks = Table.read('decals_dr3/survey-bricks.fits.gz')
bricksdr3 = Table.read('decals_dr3/survey-bricks-dr3.fits.gz')
catalog_fns = ['decals_dr3/catalogs/tractor-{}.fits'.format(bnm) for bnm in bricknames]
decals_catalogs = [Table.read(fn) for fn in catalog_fns]
dcatall = table.vstack(decals_catalogs, metadata_conflicts='silent')
sdss_catalog = Table.read('catalogs/base_sql_nsa{}.fits.gz'.format(anak.nsaid))
for dcat in [dcatall]:
for magnm, idx in zip('grz', [1, 2, 4]):
mag, mag_err = fluxivar_to_mag_magerr(dcat['decam_flux'][:, idx], dcat['decam_flux_ivar'][:, idx])
dcat[magnm] = mag
dcat[magnm + '_err'] = mag_err
dcat['sb_r_0.5'] = compute_sb(0.5*u.arcsec, dcat['decam_apflux'][:, 2, :])
dcat['sb_r_0.75'] = compute_sb(0.75*u.arcsec, dcat['decam_apflux'][:, 2, :])
dcat['sb_r_1'] = compute_sb(1.0*u.arcsec, dcat['decam_apflux'][:, 2, :])
dcat['sb_r_2'] = compute_sb(2.0*u.arcsec, dcat['decam_apflux'][:, 2, :])
DECALS_AP_SIZES
apmag, apmagerr = fluxivar_to_mag_magerr(dcatall['decam_apflux'], dcatall['decam_apflux_ivar'])
apmagres, _ = fluxivar_to_mag_magerr(dcatall['decam_apflux_resid'], dcatall['decam_apflux_ivar'])
apdiff = subselect_aperture(apmagres - apmag, 'r')
apcolor = subselect_aperture(apmag, 'g') - subselect(apmag, 'r')
apmagx = subselect_aperture(apmag, 'r')
good = ~np.isnan(apdiff)&~np.isnan(apcolor)&~np.isnan(apmagx)
plt.scatter(apcolor[good], apmagx[good], c=apdiff[good], cmap='viridis', alpha=.1, lw=0, s=1)
plt.colorbar()
plt.xlim(-2,5)
plt.ylim(26,15)
for band in 'ugrizy':
reses = subselect_aperture(apmagres, band, None)
print('Band', band)
for ap, res in zip(DECALS_AP_SIZES, reses.T):
print('Aperture', ap,'has', 100*np.sum(np.isfinite(res))/len(res),'% good')
rs = subselect_aperture(apmagres, 'r')
catnotfin = dcatall[~np.isfinite(rs)]
catnotfin['apmagres_allaps'] = subselect_aperture(apmagres, 'r', None)[~np.isfinite(rs)]
make_cutout_comparison_table(catnotfin[np.random.permutation(len(catnotfin))[:10]],
inclres=True, inclmod=True, inclsdss=False, doprint=False,
add_annotation=['apmagres_allaps'])
dmag_of_ap_distr = {}
for ap in DECALS_AP_SIZES:
rs = subselect_aperture(apmag, 'r', ap)
rres = subselect_aperture(apmagres, 'r', ap)
dmag_of_ap_distr[ap] = dmag = rs - rres
plt.hist(dmag[np.isfinite(dmag)&(rs<22*u.mag)], bins=100, histtype='step', label=str(ap), normed=True)
plt.legend(loc=0)
plt.xlabel('r_flux - r_resid')
ap = 1.0*u.arcsec
rs = subselect_aperture(apmag, 'r', ap)
rres = subselect_aperture(apmagres, 'r', ap)
dmag = rs - rres
perc = 95
p = np.percentile(dmag[np.isfinite(dmag)&(rs<22*u.mag)], perc)
print('nobjs in', perc,'percentile:', np.sum(dmag.value>p), 'cutoff is', p)
msk = np.isfinite(dmag)&(dmag.value>p)&(rs<22*u.mag)
dcatbadres = dcatall[msk]
dcatbadres['dmag'] = dmag[msk]
dcatbadres['r'] = rs[msk]
make_cutout_comparison_table(dcatbadres[:10],
inclres=True, inclmod=True, inclsdss=False, doprint=False,
add_annotation=['dmag', 'r'])
#cut out the non-overlap region
dsc = SkyCoord(dcatall['ra'], dcatall['dec'], unit=u.deg)
dcutall = dcatall[dsc.separation(anak.coords) < 1*u.deg]
dsc = SkyCoord(dcutall['ra'], dcutall['dec'], unit=u.deg)
ssc = SkyCoord(sdss_catalog['ra'], sdss_catalog['dec'], unit=u.deg)
threshold = 1*u.arcsec
idx, d2d, _ = ssc.match_to_catalog_sky(dsc)
plt.hist(d2d.arcsec, bins=100, range=(0, 3),histtype='step', log=True)
plt.axvline(threshold.to(u.arcsec).value, c='k')
None
dmatchmsk = idx[d2d<threshold]
dmatch = dcutall[dmatchmsk]
smatch = sdss_catalog[d2d<threshold]
idx, d2d, _ = dsc.match_to_catalog_sky(ssc)
dnomatchmsk = d2d>threshold
dnomatch = dcutall[dnomatchmsk]
plt.figure(figsize=(12, 10))
xnm = 'r'
ynm = 'sb_r_0.5'
ap = 1*u.arcsec
apmag, apmagerr = fluxivar_to_mag_magerr(dnomatch['decam_apflux'], dnomatch['decam_apflux_ivar'])
apmagres, _ = fluxivar_to_mag_magerr(dnomatch['decam_apflux_resid'], dnomatch['decam_apflux_ivar'])
rs = subselect_aperture(apmag, xnm, ap)
rres = subselect_aperture(apmagres, xnm, ap)
dmag = rs - rres
dnstar = dnomatch['type']=='PSF '
dnoext = -2.5*np.log10(dnomatch['decam_mw_transmission'][:, 2])
r0 = (dnomatch[xnm] - dnoext)
sb = dnomatch[ynm] - dnoext
plt.scatter(r0[~dnstar], sb[~dnstar],
c=dmag[~dnstar], lw=0, alpha=1, s=3, label='Glx in DECALS, not in SDSS', vmax=0, vmin=-5,
cmap='viridis_r')
plt.colorbar().set_label('r_ap - r_res [{}]'.format(ap))
plt.axvline(20.75, color='k', ls=':')
plt.xlim(17, 23)
plt.ylim(18, 28)
plt.xlabel(r'$r_{0, {\rm DECaLS}}$', fontsize=28)
plt.ylabel(r'$SB_{0.5^{\prime \prime}, {\rm DECaLS}}$', fontsize=28)
plt.xticks(fontsize=24)
plt.yticks(fontsize=24)
plt.legend(loc='lower right', fontsize=20)
msk = (r0[~dnstar]<20.75)&(sb[~dnstar]>24)
cat = dnomatch[~dnstar][msk]
cat['dmag'] = dmag[~dnstar][msk]
p = np.percentile(cat['dmag'][np.isfinite(cat['dmag'])], 10)
catlower = cat[cat['dmag']<p]
print(len(catlower))
make_cutout_comparison_table(catlower[np.random.permutation(len(catupper))[:10]],
inclres=True, inclmod=True, inclsdss=False, doprint=False,
add_annotation=['dmag', 'r'])
p = np.percentile(cat['dmag'][np.isfinite(cat['dmag'])], 90)
catupper = cat[cat['dmag']>p]
print(len(catupper))
make_cutout_comparison_table(catupper[np.random.permutation(len(catupper))[:10]],
inclres=True, inclmod=True, inclsdss=False, doprint=False,
add_annotation=['dmag', 'r'])
# things we want to identify automatically
disky_things_from_marla =
mag ra dec unk
19.4785 354.157853072 0.102747198705 false
19.3311 354.255069696 0.619242808225 false
19.1127 354.284237813 0.160859730123 false
18.6914 354.069400831 -0.115904590235 false
19.3392 354.415038652 -0.0645766794736 false
19.4624 354.114525096 0.00532483801292 false
19.1087 354.534841322 0.436958919955 false
19.0242 354.447705125 0.266811924681 false
19.0354 354.136706143 0.691858529943 false
19.0534 354.53888635 0.236989192453 false
19.2452 354.568916976 0.837946572935 false
19.3481 353.473924073 -0.0912764847886 false
19.268 354.011615043 -0.445983276629 false
19.3429 354.408722681 0.904017267882 true
19.1914 354.851596507 0.401264434888 true
19.2529 353.412613626 0.640755638979 false
19.3848 354.878706758 0.364835196656 false
disky_things_from_marla = Table.read([disky_things_from_marla], format='ascii')
sc_marla = SkyCoord(disky_things_from_marla['ra'], disky_things_from_marla['dec'], unit=u.deg)
idx, d2d, _ = sc_marla.match_to_catalog_sky(SkyCoord(dcatall['ra'], dcatall['dec'], unit=u.deg))
np.sum(d2d < 1*u.arcsec)/len(d2d)
matchcat = dcatall[idx]
apmag, apmagerr = fluxivar_to_mag_magerr(matchcat['decam_apflux'], matchcat['decam_apflux_ivar'])
apmagres, _ = fluxivar_to_mag_magerr(matchcat['decam_apflux_resid'], matchcat['decam_apflux_ivar'])
rs = subselect_aperture(apmag, xnm, None)
rres = subselect_aperture(apmagres, xnm, None)
matchcat['dmag'] = rs - rres
dmagsigs = []
for dmag, dmagdistr in zip(matchcat['dmag'].T, dmag_of_ap_distr.values()):
msk = np.isfinite(dmagdistr)
dmagsigs.append(np.mean(dmagdistr[msk].value)-dmag/np.std(dmagdistr[msk].value))
print(np.mean(dmagdistr[msk].value).shape, dmag.shape, np.std(dmagdistr[msk].value).shape, dmagsigs[-1].shape)
matchcat['dmag_sig'] = np.array(dmagsigs).T
make_cutout_comparison_table(matchcat[np.random.permutation(len(matchcat))[:10]],
inclres=True, inclmod=True, inclsdss=False, doprint=False,
add_annotation=['dmag', 'r', 'dmag_sig'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Read epochs for all channels, removing a bad one
Step3: Transform to source space
Step4: Transform to common cortical space
Step5: It's a good idea to spatially smooth the data, and for visualization
Step6: Now we need to prepare the group matrix for the ANOVA statistic. To make the
Step7: Prepare function for arbitrary contrast
Step8: Finally we will pick the interaction effect by passing 'A
Step9: A stat_fun must deal with a variable number of input arguments.
Step10: Compute clustering statistic
Step11: Visualize the clusters
Step12: Finally, let's investigate interaction effect by reconstructing the time
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Denis Engemannn <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from numpy.random import randn
import matplotlib.pyplot as plt
import mne
from mne.stats import (spatio_temporal_cluster_test, f_threshold_mway_rm,
f_mway_rm, summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path / 'subjects'
src_fname = subjects_dir / 'fsaverage' / 'bem' / 'fsaverage-ico-5-src.fif'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
# we'll load all four conditions that make up the 'two ways' of our ANOVA
event_id = dict(l_aud=1, r_aud=2, l_vis=3, r_vis=4)
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
epochs.equalize_event_counts(event_id)
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE, sLORETA, or eLORETA)
inverse_operator = read_inverse_operator(fname_inv)
# we'll only use one hemisphere to speed up this example
# instead of a second vertex array we'll pass an empty array
sample_vertices = [inverse_operator['src'][0]['vertno'], np.array([], int)]
# Let's average and compute inverse, then resample to speed things up
conditions = []
for cond in ['l_aud', 'r_aud', 'l_vis', 'r_vis']: # order is important
evoked = epochs[cond].average()
evoked.resample(30).crop(0., None)
condition = apply_inverse(evoked, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition.crop(0, None)
conditions.append(condition)
tmin = conditions[0].tmin
tstep = conditions[0].tstep * 1000 # convert to milliseconds
n_vertices_sample, n_times = conditions[0].lh_data.shape
n_subjects = 6
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 4) * 10
for ii, condition in enumerate(conditions):
X[:, :, :, ii] += condition.lh_data[:, :, np.newaxis]
# Read the source space we are morphing to (just left hemisphere)
src = mne.read_source_spaces(src_fname)
fsave_vertices = [src[0]['vertno'], []]
morph_mat = mne.compute_source_morph(
src=inverse_operator['src'], subject_to='fsaverage',
spacing=fsave_vertices, subjects_dir=subjects_dir, smooth=20).morph_mat
morph_mat = morph_mat[:, :n_vertices_sample] # just left hemi from src
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 4)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 4)
X = np.transpose(X, [2, 1, 0, 3]) #
X = [np.squeeze(x) for x in np.split(X, 4, axis=-1)]
factor_levels = [2, 2]
effects = 'A:B'
# Tell the ANOVA not to compute p-values which we don't need for clustering
return_pvals = False
# a few more convenient bindings
n_times = X[0].shape[1]
n_conditions = 4
def stat_fun(*args):
# get f-values only.
return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels,
effects=effects, return_pvals=return_pvals)[0]
# as we only have one hemisphere we need only need half the adjacency
print('Computing adjacency.')
adjacency = mne.spatial_src_adjacency(src[:1])
# Now let's actually do the clustering. Please relax, on a small
# notebook and one single thread only this will take a couple of minutes ...
pthresh = 0.005
f_thresh = f_threshold_mway_rm(n_subjects, factor_levels, effects, pthresh)
# To speed things up a bit we will ...
n_permutations = 50 # ... run way fewer permutations (reduces sensitivity)
print('Clustering.')
F_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_test(X, adjacency=adjacency, n_jobs=1,
threshold=f_thresh, stat_fun=stat_fun,
n_permutations=n_permutations,
buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# The brighter the color, the stronger the interaction between
# stimulus modality and stimulus location
brain = stc_all_cluster_vis.plot(subjects_dir=subjects_dir, views='lat',
time_label='temporal extent (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('cluster-lh.png')
brain.show_view('medial')
inds_t, inds_v = [(clusters[cluster_ind]) for ii, cluster_ind in
enumerate(good_cluster_inds)][0] # first cluster
times = np.arange(X[0].shape[1]) * tstep * 1e3
plt.figure()
colors = ['y', 'b', 'g', 'purple']
event_ids = ['l_aud', 'r_aud', 'l_vis', 'r_vis']
for ii, (condition, color, eve_id) in enumerate(zip(X, colors, event_ids)):
# extract time course at cluster vertices
condition = condition[:, :, inds_v]
# normally we would normalize values across subjects but
# here we use data from the same subject so we're good to just
# create average time series across subjects and vertices.
mean_tc = condition.mean(axis=2).mean(axis=0)
std_tc = condition.std(axis=2).std(axis=0)
plt.plot(times, mean_tc.T, color=color, label=eve_id)
plt.fill_between(times, mean_tc + std_tc, mean_tc - std_tc, color='gray',
alpha=0.5, label='')
ymin, ymax = mean_tc.min() - 5, mean_tc.max() + 5
plt.xlabel('Time (ms)')
plt.ylabel('Activation (F-values)')
plt.xlim(times[[0, -1]])
plt.ylim(ymin, ymax)
plt.fill_betweenx((ymin, ymax), times[inds_t[0]],
times[inds_t[-1]], color='orange', alpha=0.3)
plt.legend()
plt.title('Interaction between stimulus-modality and location.')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convergence analysis for a cantilever beam
Step2: The particular solution for parameters $E=1000.0$, $P=-50$ $\nu=0.30$, $I=42.67$,
Step3: We also want to analyze this problem using the FEM with bilineal square elements under plane stress conditions.
Step4: Notice that in the finite elment models the load is applied as nodal point loads of the same magnitud distributed along all the nodes at $x=-12$. This implies a uniform load distribution instead of the parabolic load that is consistent with the shear stress in the analytic solution.
Step5: Refences
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib import rcParams
from solidspy.preprocesor import rect_grid
import solidspy.postprocesor as pos
import solidspy.assemutil as ass
import solidspy.solutil as sol
%matplotlib notebook
rcParams['figure.figsize'] = 6, 4
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 12
def beam_sln(x, y, nu, P, E, I, L, h):
Compute the solution for a cantilever beam
Parameters
----------
x : ndarray (float)
Array with x coordinates.
y : ndarray (float)
Array with y coordinates.
nu : float, (-1, 0.5)
Poisson coefficient.
P : float
Applied force at the end of the beam.
E : float, >0
Young modulus.
I : float, >0
Moment of inertia.
L : float, >0
Length of the beam.
h : float, >0
Height of the beam.
Returns
-------
u : ndarray (float)
Horizontal displacement at the nodes.
v : ndarray (float)
Vertical displacement at the nodes.
exx : ndarray (float)
xx component of the strain tensor.
eyy : ndarray (float)
yy component of the strain tensor.
gammaxy : ndarray (float)
xy component of the strain tensor.
References
----------
.. [1] Timoshenko, S. & Goodier, J., 1970. Theory of Elasticity,
McGraw-Hill, 3rd Ed.
G = E/(2*(1 + nu))
c = h/2
C1 = -P/(2*E*I)
C2 = -(nu*P)/(6*E*I)
C3 = P/(2*I*G)
C4 = (P*L**2)/(2*E*I)
C5 = -(P*c**2)/(2*I*G)
C6 = C4 + C5
C7 = (nu*P)/(2*E*I)
C8 = P/(6*E*I)
C9 = -(P*L**2)/(2*E*I)
C10 = (P*L**3)/(3*E*I)
B1 = -P/(E*I)
B2 = (nu*P)/(E*I)
B3 = P/(2*I*G)
u = C1*y*x**2 + C2*y**3 + C3*y**3 + (C5 + C6)*y
v = C7*x*y**2 + C8*x**3 + C9*x + C10
exx = B1*x*y
eyy = B2*x*y
gammaxy = B3*(y**2 - c**2)
return u, v, exx, eyy, gammaxy
nu, P, E, I, L, h = 0.3, -50, 1000, 42.67, 24, 4
x = np.linspace(0, L, 100)
y = np.linspace(-h, h, 100)
x, y = np.meshgrid(x, y)
u, v, exx, eyy, gammaxy = beam_sln(x, y, nu, P, E, I, L, h)
plt.figure()
plt.contourf(x, y, u, 10)
plt.axis("image");
plt.figure()
plt.contourf(x, y, v, 10)
plt.axis("image");
P = -50
E = 1000
nu = 0.3
L = 24
h = 8
I = 42.67
niter = 8
err = np.zeros((niter))
hvec = np.zeros((niter))
mats = np.array([[E, nu]])
fname = "error_vs_h.txt"
fid = open(fname, "w")
fid.write("Iteration, Elements, h, error\n")
fid.close()
for cont in range(1, niter + 1):
print("Starting iteration %i, " % cont +
"h=%g, %i elements" % (8/2**(cont - 1), 3*4**(cont - 1)))
nx = 3*2**(cont - 1)
ny = 2**(cont - 1)
x, y, els = rect_grid(L, h, nx, ny)
nodes = np.zeros(((nx + 1)*(ny + 1), 5))
nodes[:, 0] = range((nx + 1)*(ny + 1))
nodes[:, 1] = x
nodes[:, 2] = y
nodes[x==L/2, 3] = -1
nodes[nx*(ny//2 + 1) - 1, 4] = -1
loads = np.zeros((ny + 1, 3))
loads[:, 0] = nodes[x==-L/2, 0]
loads[:, 2] = P/ny
# Assembly
DME, IBC, neq = ass.DME(nodes, els)
stiff = ass.assembler(els, mats, nodes, neq, DME)
rhs = ass.loadasem(loads, IBC, neq)
# Solution
disp = sol.static_sol(stiff, rhs)
disp_comp = pos.complete_disp(IBC, nodes, disp)
# Interpolation and error evaluation
if cont > 1:
disp_interp = np.column_stack([u_interp, v_interp])
aux = np.linalg.norm(disp_interp - disp_comp)
err[cont - 1] = aux/np.linalg.norm(disp_comp)
hvec[cont - 1] = 8*1/2**(cont - 1)
fid = open(fname, "a")
fid.write("%i, %i, %g, %g\n" %
(cont, 3*4**(cont - 1), 8*1/2**(cont - 1), err[cont-1]))
fid.close()
x_new, y_new, _ = rect_grid(L, h, 2*nx, 2*ny)
u_interp = griddata((x, y), disp_comp[:, 0], (x_new, y_new))
v_interp = griddata((x, y), disp_comp[:, 1], (x_new, y_new))
data = np.loadtxt("error_vs_h.txt", skiprows=1, delimiter=",")
els = data[:, 1]
h = data[:, 2]
err = data[:, 3]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.loglog(h, err, '-bo')
plt.xlabel(r"Element size: $h$")
plt.ylabel(r"Relative error: $\frac{\Vert u - u_h \Vert}{\Vert u \Vert}$",
fontsize=14)
xticks, xlabels = plt.xticks()
plt.grid(which="both", alpha=0.3, lw=0.5)
ax2 = ax1.twiny()
ax2.set_xscale("log")
ax2.set_xlim(ax1.get_xlim())
ax2.set_xticks(xticks[1:-1])
labels = 192/4**np.log2(xticks[1:-1])
ax2.set_xticklabels(["%g"%label for label in labels])
ax2.xaxis.set_ticks_position("none")
plt.xlabel("Number of elements")
plt.tight_layout()
from IPython.core.display import HTML
def css_styling():
styles = open('../custom_barba.css', 'r').read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def encode_cyclic(s: str):
# split string to groups. Each of length 3.
groups = [s[(3 * i):min((3 * i + 3), len(s))] for i in range((len(s) + 2) // 3)]
# cycle elements in each group. Unless group has fewer elements than 3.
groups = [(group[1:] + group[0]) if len(group) == 3 else group for group in groups]
return "".join(groups)
def decode_cyclic(s: str):
takes as input string encoded with encode_cyclic function. Returns decoded return encode_cyclic(encode_cyclic(s))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we instantiate the channel library. By default bbndb will use an sqlite database at the location specified by the BBN_DB environment variabe, but we override this behavior below in order to use a specific filename. Also shown (but commented out) is the syntax for creating a temporary in-memory database for testing purposes.
Step2: The channel library has a number of convenience functions defined to create instruments and qubits, as well as functions to define the relationships between them. Let us create a qubit first
Step3: In order to compile the QGL program into pulse sequences, we need to define a minimal hardware configuration. Basically, we need to specify AWG resources for output pulse compilation and digitizer resources for signal measurement.
Step4: Commit the changes to the channel library.
Step5: Basic sequence construction and plotting
Step6: This sequence of pulses can be plotted for visual review. First, you must compile the QGL into pulses based on the hardware defined above. Since our Qubit object is a quadrature channel, you see two colors corresponding to the I and Q control signals.
Step7: Now, let's plot the second sequence.
Step8: Constructing more sophisticated sequences
Step9: To rotate about an arbitrary axis, use the "U" primitives
Step10: Z rotations are performed in "software
Step11: Sequences can act on multiple qubits, i.e., channels. Let's create another "logical" qubit channel as well as a "physical" channel.
Step12: When you plot a sequence with multiple logical channels, each channel (both I and Q) is plotted seperately.
Step13: One can express simultaneous operations with the * operator (meant to evoke a tensor product). If no operation is specified for a channel in a given time slot, an identity (no-op) operation is inserted.
Step14: Constructing sequences with measurements
Step15: Single-sideband modulation
Step16: For measurement pulses, we define the SSB frequency as
Step17: Longer sequences using list comprehensions
Step18: T1
Step19: Ramsey
|
<ASSISTANT_TASK:>
Python Code:
from QGL import *
cl = ChannelLibrary("example")
# This would be a temporary, in memory database
# cl = ChannelLibrary(":memory:")
q1 = cl.new_qubit("q1")
# Most calls required label and address. Let's define
# an AWG for control pulse generation
aps2_1 = cl.new_APS2("BBNAPS1", address="192.168.5.101")
# an AWG for measurement pulse generation
aps2_2 = cl.new_APS2("BBNAPS2", address="192.168.5.102")
# and digitizer for measurement collection
dig_1 = cl.new_X6("X6_1", address=0)
# Qubit q1 is controlled by AWG aps2_1
cl.set_control(q1, aps2_1)
# Qubit q1 is measured by AWG aps2_2 and digitizer dig_1
cl.set_measure(q1, aps2_2, dig_1.ch(1))
cl.commit()
seq1 = [[X(q1), Y(q1)]]
seq2 = [[X90(q1),Y90(q1),X(q1),Id(q1),Y(q1)]]
mf = compile_to_hardware(seq1, 'Test1')
plot_pulse_files(mf)
mf = compile_to_hardware(seq2, 'Test2')
plot_pulse_files(mf)
seq = [[Xtheta(q1, 0.2), Xtheta(q1, 0.4), Xtheta(q1, 0.6), Xtheta(q1, 0.8), Xtheta(q1, 1.0)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[U(q1, 0.0), U(q1, np.pi/8), U(q1, np.pi/4), U(q1, 3*np.pi/8), U(q1, np.pi/2)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[X(q1), Z90(q1), X(q1), Z90(q1), X(q1), Z90(q1), X(q1), Z90(q1), X(q1)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
q2 = cl.new_qubit("q2")
aps2_3 = cl.new_APS2("BBNAPS3", address="192.168.5.103")
cl.set_control(q2, aps2_3)
seq = [[X(q1), X(q2), Y(q1), Y(q2)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[X(q1)*X(q2), X(q1)*Y(q2), Y(q1), X(q2)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[MEAS(q1)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
q1.frequency = 50e6
seq= [[X(q1), Y(q1)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
# set the modulation frequency
cl["q1"].measure_chan.autodyne_freq = 10e6
seq = [[X(q1), MEAS(q1)]]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[Xtheta(q1, a), MEAS(q1)] for a in np.linspace(0,2,11)]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[X(q1), Id(q1, d), MEAS(q1)] for d in np.linspace(0, 10e-7, 11)]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
seq = [[X90(q1), Id(q1, delay), X90(q1), MEAS(q1)] for delay in np.linspace(0, 5e-7, 11)]
mf = compile_to_hardware(seq, 'Test')
plot_pulse_files(mf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic classification
Step2: Import the Fashion MNIST dataset
Step3: Loading the dataset returns four NumPy arrays
Step4: Explore the data
Step5: Likewise, there are 60,000 labels in the training set
Step6: Each label is an integer between 0 and 9
Step7: There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels
Step8: And the test set contains 10,000 images labels
Step9: Preprocess the data
Step10: Scale these values to a range of 0 to 1 before feeding them to the neural network model. To do so, divide the values by 255. It's important that the training set and the testing set be preprocessed in the same way
Step11: To verify that the data is in the correct format and that you're ready to build and train the network, let's display the first 25 images from the training set and display the class name below each image.
Step12: Build the model
Step13: The first layer in this network, tf.keras.layers.Flatten, transforms the format of the images from a two-dimensional array (of 28 by 28 pixels) to a one-dimensional array (of 28 * 28 = 784 pixels). Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
Step14: Train the model
Step15: As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.91 (or 91%) on the training data.
Step16: It turns out that the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy represents overfitting. Overfitting happens when a machine learning model performs worse on new, previously unseen inputs than it does on the training data. An overfitted model "memorizes" the noise and details in the training dataset to a point where it negatively impacts the performance of the model on the new data. For more information, see the following
Step17: Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction
Step18: A prediction is an array of 10 numbers. They represent the model's "confidence" that the image corresponds to each of the 10 different articles of clothing. You can see which label has the highest confidence value
Step19: So, the model is most confident that this image is an ankle boot, or class_names[9]. Examining the test label shows that this classification is correct
Step20: Graph this to look at the full set of 10 class predictions.
Step21: Verify predictions
Step22: Let's plot several images with their predictions. Note that the model can be wrong even when very confident.
Step23: Use the trained model
Step24: tf.keras models are optimized to make predictions on a batch, or collection, of examples at once. Accordingly, even though you're using a single image, you need to add it to a list
Step25: Now predict the correct label for this image
Step26: tf.keras.Model.predict returns a list of listsโone list for each image in the batch of data. Grab the predictions for our (only) image in the batch
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 Franรงois Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# TensorFlow and tf.keras
import tensorflow as tf
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
true_label, img = true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
true_label = true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = probability_model.predict(img)
print(predictions_single)
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
plt.show()
np.argmax(predictions_single[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple popups
Step2: Vega Popup
Step4: Fancy HTML popup
Step5: Note that you can put another Figure into an IFrame ; this should let you do stange things...
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.insert(0,'..')
import folium
import branca
print (folium.__file__)
print (folium.__version__)
m = folium.Map([45,0], zoom_start=4)
folium.Marker([45,-30], popup="inline implicit popup").add_to(m)
folium.CircleMarker([45,-10], radius=1e5, popup=folium.Popup("inline explicit Popup")).add_to(m)
ls = folium.PolyLine([[43,7],[43,13],[47,13],[47,7],[43,7]], color='red')
ls.add_children(folium.Popup("outline Popup on Polyline"))
ls.add_to(m)
gj = folium.GeoJson({ "type": "Polygon", "coordinates": [[[27,43],[33,43],[33,47],[27,47]]]})
gj.add_children(folium.Popup("outline Popup on GeoJSON"))
gj.add_to(m)
m
import vincent, json
import numpy as np
scatter_points = {
'x' : np.random.uniform(size=(100,)),
'y' : np.random.uniform(size=(100,)),
}
# Let's create the vincent chart.
scatter_chart = vincent.Scatter(scatter_points,
iter_idx='x',
width=600,
height=300)
# Let's convert it to JSON.
scatter_json = scatter_chart.to_json()
# Let's convert it to dict.
scatter_dict = json.loads(scatter_json)
m = folium.Map([43,-100], zoom_start=4)
# Let's create a Vega popup based on scatter_chart.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_chart, height=350, width=650).add_to(popup)
folium.Marker([30,-120], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_json.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_json, height=350, width=650).add_to(popup)
folium.Marker([30,-100], popup=popup).add_to(m)
# Let's create a Vega popup based on scatter_dict.
popup = folium.Popup(max_width=800)
folium.Vega(scatter_dict, height=350, width=650).add_to(popup)
folium.Marker([30,-80], popup=popup).add_to(m)
m
m = folium.Map([43,-100], zoom_start=4)
html=
<h1> This is a big popup</h1><br>
With a few lines of code...
<p>
<code>
from numpy import *<br>
exp(-2*pi)
</code>
</p>
iframe = branca.element.IFrame(html=html, width=500, height=300)
popup = folium.Popup(iframe, max_width=2650)
folium.Marker([30,-100], popup=popup).add_to(m)
m
# Let's create a Figure, with a map inside.
f = branca.element.Figure()
folium.Map([-25,150], zoom_start=3).add_to(f)
# Let's put the figure into an IFrame.
iframe = branca.element.IFrame(width=500, height=300)
f.add_to(iframe)
# Let's put the IFrame in a Popup
popup = folium.Popup(iframe, max_width=2650)
# Let's create another map.
m = folium.Map([43,-100], zoom_start=4)
# Let's put the Popup on a marker, in the second map.
folium.Marker([30,-100], popup=popup).add_to(m)
# We get a map in a Popup. Not really useful, but powerful.
m
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We're going to want to see our vectors rendered in some way. Visual Python, or VPython, provides an excellent solution. However, we're going to stay with still ray tracings in this Chapter and work with POV-Ray.
|
<ASSISTANT_TASK:>
Python Code:
class Vector:
"A point in space"
pass
class Edge:
"A pair of vectors"
pass
class Face:
"A set of vectors in clockwise or counter-clockwise order"
pass
class Polyhedron:
"A set of faces"
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. ์
๋ ฅDataLoader ์ค์
Step2: 2. ์ฌ์ ์ค์
Step3: 3. Restore model paramter from saved file
Step4: 6. Predict & Evaluate
Step5: 5. plot weights
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
import matplotlib.pyplot as plt
is_cuda = torch.cuda.is_available() # cuda ์ฌ์ฉ๊ฐ๋ฅ์, True
checkpoint_filename = 'minist.ckpt'
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data', train=False, transform=transforms.ToTensor()),
batch_size=100, shuffle=False)
class MnistModel(nn.Module):
def __init__(self):
super(MnistModel, self).__init__()
# input is 28x28
# padding=2 for same padding
self.conv1 = nn.Conv2d(1, 32, 5, padding=2)
# feature map size is 14*14 by pooling
# padding=2 for same padding
self.conv2 = nn.Conv2d(32, 64, 5, padding=2)
# feature map size is 7*7 by pooling
self.fc1 = nn.Linear(64*7*7, 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, 64*7*7) # reshape Variable
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
model = MnistModel()
if is_cuda : model.cuda()
checkpoint = torch.load(checkpoint_filename)
model.load_state_dict(checkpoint)
model.eval()
correct = 0
for image, target in test_loader:
if is_cuda : image, target = image.cuda(), target.cuda()
image, target = Variable(image, volatile=True), Variable(target)
output = model(image)
prediction = output.data.max(1)[1]
correct += prediction.eq(target.data).sum()
print('\nTest set: Accuracy: {:.2f}%'.format(100. * correct / len(test_loader.dataset)))
model.state_dict().keys()
plt.rcParams["figure.figsize"] = [8, 4]
weight = model.state_dict()['conv1.weight']
wmax, wmin = torch.max(weight), torch.min(weight)
gridimg = torchvision.utils.make_grid(weight).cpu().numpy().transpose((1,2,0))
plt.imshow(gridimg[:,:,0], vmin = wmin, vmax =wmax, interpolation='nearest', cmap='seismic') # gridimg[:, :, 0]๋ ํ color channel์ ์ถ๋ ฅ
plt.rcParams["figure.figsize"] = [8, 8]
weight = model.state_dict()['conv2.weight'] # 64 x 32 x 5 x 5
weight = weight[:, 0:1, :, :] # 64 x 1 x 5 x 5
wmax, wmin = torch.max(weight), torch.min(weight)
gridimg = torchvision.utils.make_grid(weight).cpu().numpy().transpose((1,2,0))
plt.imshow(gridimg[:,:,0], vmin = wmin, vmax =wmax, interpolation='nearest', cmap='seismic') # gridimg[:, :, 0]๋ ํ color channel์ ์ถ๋ ฅ
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Remember, we can use loops with dummy variables to iterate over lists and perform operations on each element. For example, say we want to print ourList with each element multiplied by 10. We can use a while loop to do this
Step2: We can also use conditional statements like if and else to implement more complex logic. What if we wanted to print out all the elements in ourList that are smaller than 5? We can use conditional statements to do this
Step3: Finally, recall that we can use the matplotlib module to plot data. Say we wanted to plot each element of ourList versus the square of each element.
Step4: Lecture 2 - NumPy, Functions, and Data
Step5: What if we wanted to square each element of the list? Give it a try
Step6: It doesn't work! Regular Python lists don't have this functionality. Let's convert our list to a NumPy array and see what happens.
Step7: To convert our list to a numpy array, we'll use np.array().
Step8: Now what happens if we try to square each element?
Step9: Did it work?
Step10: What if we want to fill an array with a different list of numbers? Sometimes it is handy to create an array of all zeros, which can then be replaced later with data. This can be done by using the command np.zeros().
Step11: We can also assign new values to elements of existing arrays, using the following "square bracket" notation
Step12: Now we move onto slightly more sophisticated ways of accessing elements of an array.
Step13: Sometimes it's useful to access more than one element of an array. Let's say that we have an array spanning the range [0,10] (including endpoints), with a step size of 0.1. If you recall, this can be done via the np.linspace() or np.arange() functions.
Step14: In order to get a range of elements rather than simply a single one, we use the notation
Step15: If you want all the elements beyond a certain point of the array (including that point), then you would leave out the second index, for example
Step16: To practice with this, try to select just the first half of the array.
Step17: Then, pick out middle sixty elements of the array.
Step18: Finally, use only the "
Step19: B. Defining Your Own Functions
Step20: So the outline for a function is
Step22: If you want more information on this, the documentation of possible functions that can be applied to integers and floats (i.e. single numbers), as well as numpy arrays, can be found here
Step23: If we call this function on the vector (3,4), we should get 5.
Step24: In this lecture, we've learned about numpy arrays, loops, and defining functions. You'll have a chance to test these skills in the following exercises!
Step25: Use the lines below to test the function you wrote
Step26: Exercise 2 - Calculate path length.
Step27: Test your function on the example below. Your answer should come out to $4\sqrt{2} \approx 5.657$
Step28: C. Loading And Saving Data Arrays
Step29: Now then, let's say we are doing a timing experiment, where we look at the brightness of an object as a function of time. This is actually a very common type of measurement that you may do in research, such as looking for dips in the brightness of stars as a way to detect planets.
Step30: Now we have the data loaded into Python as a numpy array, and one handy thing you can do is to use Python to find the dimensions of the array. This is done by using the array.shape method as so.
Step31: From the output of this function, we know that timeseriesData is a 2x1000 array (two rows, 1000 columns).
Step32: By convention, we first specify the row index followed by the column index.
Step33: Looking at our data, you see clear spikes that jump well above most of the signal. (I've added this to the data to represent outliers that may sometimes appear when you're messing with raw data, and those must be dealt with). In astronomy, you sometimes have relativistic charged particles, not from your source, that hit the detector known as cosmic rays, and we often have to remove these.
Step34: In this case, the conditional statement that we have used is signal < cutOff.
Step35: Now let's plot it. You try.
Step36: Now that you have your data all cleaned up, it would be nice if we could save it for later and not have to go through the process of cleaning it up every time. Fear not! Python has you covered.
Step37: Then, we can use either the np.save() function or the np.savetxt() function, the first saving the array into a '.npy' file and the other, into a '.txt' file. The syntax is pretty much the same for each.
Step38: Now that your data files are saved, you can load them up again, using np.loadtxt() and np.load() for .txt and .npy files respectively. We used np.loadtxt() above, and np.load works the same way. So, let's load in the .npy file and see if our data was saved correctly.
Step39: Now, let's see if you can do the same thing, but with the .txt file that we saved.
Step40: D. Loading data files automatically
Step41: You can also cast an integer to a string using the str command.
Step42: Now you try
Step43: This is an odd idea, defining an list variable without any elements, so instead think of it as a basket without anything inside of it yet. We will use the append() function to fill it.
Step44: Now it's your turn. Can you figure out how to load the rest of the data files into datalist automatically?
|
<ASSISTANT_TASK:>
Python Code:
ourList = [0,1,2,3,4,5,6,7,8,9]
i = 0
while i < 10:
num = ourList[i] *10
print(num)
i = i+1
i = 0
while (i<10):
num = ourList[i]
if num < 5:
print(num)
else:
print("The number is not less than 5")
i = i+1
import matplotlib.pyplot as plt
%matplotlib inline
squareList = [0,1,4,9,16,25,36,49,64,81]
plt.plot(ourList,squareList)
# plt.show()
c = [0,1,2,3,4,5,6,7,8,9]
d = c**2
import numpy as np
c = np.array(c)
d = c**2
print(d)
np.arange(0,11,1)
np.linspace(0,9,10)
data = np.ones(10) * 2#what do i put here?
print(data)
data = np.zeros(10)
print(data[0])
data[0] = 137#
print(data[0])
#Your code goes here
a = np.arange(0,10,1)
print(a)
print(a[-1])
print(a[-2])
x = np.arange(0,10,.1)
print(len(x))
y = np.linspace(0,9.9,100)
x[0:3]
#Your code goes here
x[:10]
#Your code goes here
x[:50]
#Your code goes here
x[20:80]
#Your code goes here
x[:]
# What happens if you take square root of a negative number?
n = (-2)**(1/2)
print(n)
#Defining a square root function
def sqrt(n):
if(n < 0):
print("Negative numbers not allowed")
else:
return n**(1/2)
sqrt(4)
sqrt(-4)
import numpy as np
pi = np.pi
print(np.sqrt(25))
print(np.sin(pi/2))
print(np.exp(pi)-pi)
np.log(10)
x = np.linspace(0,10,100)
y = np.log(x)
plt.plot(x,y)
#Make sure you run this cell!
def length(x, y):
Calculates the length of a vector (x,y) using the Pythagorean theorem.
return np.sqrt(x**2+y**2)
length(3,4)
def even(array):
#Your code here
testList = [0,1,2,3,4,5,6,7,8,9,10]
even(testList)
def pathLength(x_array,y_array):
#Your code goes here
L = 0
i = 1
while (i < len(x_array)):
L = L + length(x_array[i] - x_array[i-1],y_array[i]-y_array[i-1])
i = i+1
return L
x = np.array([1,2,3,4,5])
y = np.array([1,2,3,4,5])
pathLength(x,y)
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
timeseriesData = np.loadtxt("./lecture2_data/timeseries_data.txt")
timeseriesData.shape
t = timeseriesData[0,:] # this represents the time
signal = timeseriesData[1,:] # this represents the brightness
%matplotlib nbagg
import matplotlib.pyplot as plt
#Your code goes here
plt.close()
plt.plot(t,signal)
cutOff = 15.
signalFix = signal[signal < cutOff]
tFix = t[signal < cutOff]
#Your code goes here
plt.close()
plt.plot(tFix,signalFix)
dataFix = np.array([tFix,signalFix])
np.save('./lecture2_data/dataFix.npy',dataFix)
np.savetxt('./lecture2_data/dataFix.txt',dataFix)
data = np.load('./lecture2_data/dataFix.npy')
t = data[0,:]
signal = data[1,:]
plt.close()
plt.plot(t,signal)
plt.show()
#Your code goes here
'5'*5
first_string = 'a'
second_string = 'b'
print(first_string + second_string)
first_string = 'a'
second_string = str(1)
print(first_string + second_string)
datalist = []
loadedFile = np.loadtxt('./lecture2_data/c1.dat')
datalist.append(loadedFile)
# Your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some possibly useful links
Step2: Tutorial on functions in python
Step3: Question 3
Step5: Assignment wrapup
|
<ASSISTANT_TASK:>
Python Code:
# Imports the functionality that we need to display YouTube videos in a Jupyter Notebook.
# You need to run this cell before you run ANY of the YouTube videos.
from IPython.display import YouTubeVideo
# WATCH THE VIDEO IN FULL-SCREEN MODE
YouTubeVideo("fF841G53fGo",width=640,height=360) # random numbers
# put your code here.
import random
random.seed(8675309)
for i in range(10):
x = 5 + 5.0*random.random()
print(x)
# WATCH THE VIDEO IN FULL-SCREEN MODE
YouTubeVideo("o_wzbAUZWQk",width=640,height=360) # functions
# put your code here.
mylist = [2.7, 3.5, 5.7, 9.1, 10.3, -7.0]
def getvals(thislist):
min = thislist[0]
max = thislist[0]
avg = 0.0
for val in thislist:
if val < min:
min = val
if val > max:
max = val
avg += val
avg /= len(thislist)
return min,max,avg
min,max,avg = getvals(mylist)
print(min,max,avg)
# put your code here
mylist = [2.7, 3.5, 5.7, 9.1, 10.3, -7.0]
def getvals(thislist, return_quant='mean'):
min = thislist[0]
max = thislist[0]
avg = 0.0
for val in thislist:
if val < min:
min = val
if val > max:
max = val
avg += val
avg /= len(thislist)
if return_quant == 'min':
return min
elif return_quant == 'max':
return max
elif return_quant == 'mean':
return avg
else:
print("I don't understand your keyword:", return_quant)
return -99999
returnval = getvals(mylist,'min')
print(returnval)
from IPython.display import HTML
HTML(
<iframe
src="https://goo.gl/forms/rTmsyHG72q8pF0cT2?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Array operations are very similar to that of the Python list. For example, the following code snippet creates a Python list and then converts it to a NumPy array
Step2: In this case, array1 is known as a rank 1 (one dimensional) array. You can print out the array as usual using the print() function
Step3: You can print out the shape of the array using the shape property
Step4: The shape property returns a tuple containing the dimension of the array. In the above example, array1 is a 1-dimensional array of five items.
Step5: You can also pass in a list containing the index of the items you want to extract to the array
Step6: The following code snippet shows how you can create a two-dimensional array
Step7: <h1>Creating and Initializing Arrays using NumPy</h1>
Step8: If you want to create a rank 2 array, simply pass in a tuple
Step9: To initialize the array to some other values other than zeroes, use the full() function
Step10: In linear algebra, you often need to deal with an identity matrix, and you can create this in NumPy easily with the eye() function
Step11: And if you need to populate an array with some random values, you can use the random.random() function to generate random values between 0.0 and 1.0
Step12: Finally, you can create a range of values from 0 to n-1 using the arange() function
Step13: <h1>Boolean Array Indexing</h1>
Step14: You first specify the condition, testing for even numbers, and then assign it to a variable
Step15: The even_nums variable is now a NumPy array, containing a collection of Boolean values. If you print it out, youโll see just that
Step16: The True values indicate that the particular item is an even number. Using this Boolean array, you can now use it as an index to the numbers array
Step17: The above statements could be written succinctly like this
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
l1 = [1,2,3,4,5]
array1 = np.array(l1) # rank 1 array
print (array1) # [1 2 3 4 5]
print (array1.shape) # (5,)
print ('array1:', array1) # [1 2 3 4 5]
print ('array1.shape: ', array1.shape) # (5,)
print ('array1[0]:', array1[0]) # 1
print ('array1[1]:', array1[1]) # 2
print ('array1[1:3]:', array1[1:3]) # [2 3]
print ('array1[:-2]:', array1[:-2]) # [1 2 3]
print ('array1[3:]', array1[3:]) # [4 5]
print ('array1[[2,3]]:', array1[[2,3]]) # [3,4]
l2 = [6,7,8,9,0]
array2 = np.array([l1,l2]) # rank 2 array
print ('array2:', array2)
'''
[[1 2 3 4 5]
[6 7 8 9 0]]
'''
print ('shape:', array2.shape) # (2,5) - 2 rows and
# 5 columns
print ('array2[0,0]:', array2[0,0]) # 1
print ('array2[0,1]:', array2[0,1]) # 2
print ('array2[1,0]:', array2[1,0]) # 6
a1 = np.zeros(2) # array of rank 1 with all 0s
print ('a1.shape:', a1.shape) # (2,)
print ('a1[0]:', a1[0]) # 0.0
print ('a1[1]:', a1[1]) # 0.0
a2 = np.zeros((2,3)) # array of rank 2 with all 0s;
# 2 rows and 3 columns
print ('a2.shape:', a2.shape) # (2,3)
print ('a2:', a2)
a3 = np.full((2,3), 8.0) # array of rank 2
# with all 8s
print ('a3:', a3)
a4 = np.eye(4) # 4x4 identity matrix
print ('a4:', a4)
a5 = np.random.random((2,4)) # populate a rank 2
# array (2 rows
# 4 columns) with
# random values
print ('a5:', a5)
a6 = np.arange(10) # creates a range from 0 to 9
print ('a6:', a6) # [0 1 2 3 4 5 6 7 8 9]
nums = np.array([23,45,78,89,23,11,22])
even_nums = nums % 2 == 0
print (even_nums)
print (nums[even_nums])
print (nums[nums % 2 == 0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This dataset is comprised of 30 subjects, who each performed 8 study/test blocks of 16 words each. Here are some of the presented words
Step2: and some of the recalled words
Step3: We can start with the simplest analysis - recall accuracy - which is just the proportion of stimuli recalled that were in the encoding lists. To compute accuracy, simply call the analyze method, with the analysis key word argument set to accuracy
Step4: The result is a FriedEgg data object. The accuracy data can be retrieved using the get_data method, which returns a multi-index Pandas DataFrame where the first-level index is the subject identifier and the second level index is the list number. By default, note that each list is analyzed separately. However, you can easily return the average over lists using the listgroup kew word argument
Step5: Now, the result is a single value for each subject representing the average accuracy across the 16 lists. The listgroup kwarg can also be used to do some fancier groupings, like splitting the data into the first and second half of the experiment
Step6: These analysis results can be passed directly into the plot function like so
Step7: For more details on plotting, see the advanced plotting tutorial. Next, lets take a look at the serial position curve analysis. As stated above the serial position curve (or spc) computes recall accuracy as a function of the encoding position of the stimulus. To use it, use the same analyze method illustrated above, but set the analysis kwarg to spc. Let's also average across lists within subject
Step8: The result is a df where each row is a subject and each column is the encoding position of the word. To plot, simply pass the result of the analysis function to the plot function
Step9: Probability of First Recall
Step10: This df is set up just like the serial position curve. To plot
Step11: Lag-CRP
Step12: Unlike the previous two analyses, the result of this analysis returns a df where the number of columns are double the length of the lists. To view the results
Step13: Temporal clustering
Step14: Memory Fingerprint
Step15: Like the other analyses, computing the memory fingerprint can be done using the analyze method with the analysis kwarg set to fingerprint
Step16: The result of this analysis is a df, where each row is a subject's fingerprint and each column is a feature dimensions. The values represent a subjects tendency to cluster their recall responses along a particular feature dimensions. They are probability values, and thus, greater values indicate more clustering along that feature dimension. To plot, simply pass the result to the plot function
Step17: This result suggests that subjects in this example dataset tended to cluster their recall responses by category as well as the size (bigger or smaller than a shoebox) of the word. List length and other properties of your experiment can bias these clustering scores. To help with this, we implemented a permutation clustering procedure which shuffles the order of each recall list and recomputes the clustering score with respect to that distribution. Note
|
<ASSISTANT_TASK:>
Python Code:
import quail
%matplotlib inline
egg = quail.load_example_data()
egg.get_pres_items().head()
egg.get_rec_items().head()
acc = egg.analyze('accuracy')
acc.get_data().head()
accuracy_avg = egg.analyze('accuracy', listgroup=['average']*8)
accuracy_avg.get_data().head()
accuracy_split = egg.analyze('accuracy', listgroup=['First Half']*4+['Second Half']*4)
accuracy_split.get_data().head()
accuracy_split.plot()
spc = egg.analyze('spc', listgroup=['average']*8)
spc.get_data().head()
spc.plot(ylim=[0, 1])
pfr = egg.analyze('pfr', listgroup=['average']*8)
pfr.get_data().head()
pfr.plot()
lagcrp = egg.analyze('lagcrp', listgroup=['average']*8)
lagcrp.get_data().head()
lagcrp.plot()
temporal = egg.analyze('temporal', listgroup=['First Half']*4+['Second Half']*4)
temporal.plot(plot_style='violin', ylim=[0,1])
egg.feature_names
fingerprint = egg.analyze('fingerprint', listgroup=['average']*8)
fingerprint.get_data().head()
order=sorted(egg.feature_names)
fingerprint.plot(order=order, ylim=[0, 1])
# warning: this can take a little while. Setting parallel=True will help speed up the permutation computation
# fingerprint = quail.analyze(egg, analysis='fingerprint', listgroup=['average']*8, permute=True, n_perms=100)
# ax = quail.plot(fingerprint, ylim=[0,1.2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nuist', 'sandbox-1', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Word counting
Step5: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
Step7: Write a function sort_word_counts that return a list of sorted word counts
Step8: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt
Step9: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
s = "this is a test\n here it is"
print(s.splitlines())
s.split(" ")
def tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t\n'):
Split a string into a list of words, removing punctuation and stop words.
# add stuff for multiple lines
lines = s.splitlines()
s = " ".join(lines)
# split string into words
words = s.split(" ")
# remove punctuation
nopunc = ["".join(list(filter(lambda f: f not in punctuation, x))) for x in words]
# remove stop words
if stop_words is not None:
nostops = list(filter(lambda f: f not in stop_words, nopunc))
out = nostops
else:
out = nopunc
words = [x.lower() for x in out if len(x) > 0]
return(words)
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
from collections import Counter
def count_words(data):
Return a word count dictionary from the list of words in data.
cnt = Counter()
for word in data:
cnt[word] +=1
return(cnt)
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
def sort_word_counts(wc):
Return a list of 2-tuples of (word, count), sorted by count descending.
cnt = Counter(wc)
return(wc.most_common())
sort_word_counts(count_words(tokenize('this and the this from and a a a')))
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
# import file
f = open('mobydick_chapter1.txt', mode = 'r')
data = f.read()
f.close
# get individual word frequencies
count_words(tokenize(data))
swc = sort_word_counts(count_words(tokenize(data, stop_words = ['the','of','and','a','to','in','is','it','that','as'])))
assert swc[0]==('i',43)
assert len(swc)==848
len([float(w[1]) for w in swc])
list(range(10,1,-1))
f = plt.figure(figsize=(10,10))
plt.subplot(1,2,1)
revd = list(reversed(swc))
plt.scatter([n[1] for n in revd], range(len(revd)))
# plt.xlabel('word count')
# plt.box(False)
plt.ylim(-10,900)
plt.xlim(0,45)
plt.yticks([])
plt.title("All words")
plt.subplot(1,2,2)
smallDat = swc[0:50]
revd = list(reversed(smallDat))
plt.scatter([n[1] for n in revd], range(len(revd)))
# # plt.xlabel('word count')
# #plt.box(False)
plt.ylim(-1,50)
plt.yticks(range(50), [w[0] for w in revd])
plt.grid(axis = "y")
plt.title("Top 50 words")
assert True # use this for grading the dotplot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, define the network
Step2: Preprocess
Step3: Counting cycles
Step4: The first list shows $N_l^+ - N_l^-$ and the second list shows $N_l^+ + N_l^-$ for $l \in {0,1,...,5}$ where $N_l^+$ and $N_l^-$ are number of positive and negative simple cycles of length l. For weighted networks, the weight of a cycle is equal to multiplication of the weights of the edges in the cycle.
Step5: Estimating balance ratios defining the sampling algorithm and number of samples needed.
Step6: As you can see, the ratios are not accurate, but good enough. We can also use multiple processes to calculate the ratio.
Step7: The PC I am using has only two cores, so the improvement is not that much. When more cores are available, the algorithm uses all of them.
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
from cycleindex.sampling import nrsampling, vxsampling
from cycleindex import clean_matrix, cycle_count, balance_ratio
gama_pos = np.array(
[[0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[0,0,0,1,0,1,1,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0],
[0,0,1,0,0,0,1,1,0,0,1,1,0,0,0,0],
[0,0,1,0,1,1,0,1,0,0,1,1,1,0,0,0],
[0,0,1,1,0,1,1,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,0,0,0,0,1,0,0,1,0,0,0],
[0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0],
[0,0,0,0,0,1,1,1,0,0,0,1,0,0,0,0],
[0,0,0,0,0,1,1,1,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,1,0,1,1,0,0,0,1,0,0],
[0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0]]
)
gama_neg = np.array(
[[0,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0],
[0,0,1,0,1,1,0,0,1,1,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1],
[1,1,0,0,0,0,0,0,1,0,0,0,1,0,0,1],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0],
[0,1,0,0,0,0,0,0,0,0,1,0,0,0,1,0],
[0,0,0,0,0,0,0,0,1,1,0,0,1,0,1,1],
[1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1],
[0,0,0,0,0,1,0,0,0,0,1,0,0,0,1,1],
[0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,1],
[0,0,0,0,1,0,0,0,1,1,1,1,1,0,0,0],
[0,0,0,0,1,1,0,0,0,0,1,1,1,1,0,0]]
)
gama = gama_pos - gama_neg
print("# nodes: {}".format(len(gama)))
print("# positive edges: {}".format(np.sum(np.where(gama > 0))))
print("# negative edges: {}".format(np.sum(np.where(gama < 0))))
gama_reduced = clean_matrix(gama)
print("# nodes: {}".format(len(gama_reduced)))
print("# positive edges: {}".format(np.sum(np.where(gama_reduced > 0))))
print("# negative edges: {}".format(np.sum(np.where(gama_reduced < 0))))
?cycle_count # run to see the documentation on the pager.
start = time.time()
counts = cycle_count(gama_reduced,5)
print("Runtime: {:.2f}s".format(time.time() - start))
print(counts)
print(np.array(counts)) # Numpy deals with floating-point issues better.
start = time.time()
ratios = balance_ratio(gama_reduced, 5, exact=True)
print("Runtime: {:.2f}s".format(time.time() - start))
print(ratios)
start = time.time()
ratios = balance_ratio(gama_reduced, 5, exact=False, n_samples=3000, parallel=False, sampling_func=vxsampling)
print("Runtime: {:.2f}s".format(time.time() - start))
print(ratios)
start = time.time()
ratios = balance_ratio(gama_reduced, 5, exact=False, n_samples=3000, parallel=True, sampling_func=vxsampling)
print("Runtime: {:.2f}s".format(time.time() - start))
print(ratios)
start = time.time()
ratios = balance_ratio(gama_reduced, 5, exact=False, accuracy=0.01, parallel=True, sampling_func=vxsampling)
print("Runtime: {:.2f}s".format(time.time() - start))
print(ratios)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One of the Classics
Step2: https
Step3: This is the output of all 3 hidden neurons, but what we really want is a category for iris category
Step4: <h1 style="color
Step5: Evaluation
Step6: Save Model in Keras Format
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import matplotlib.pylab as plt
import numpy as np
from distutils.version import StrictVersion
import sklearn
print(sklearn.__version__)
assert StrictVersion(sklearn.__version__ ) >= StrictVersion('0.18.1')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
assert StrictVersion(tf.__version__) >= StrictVersion('1.1.0')
import keras
print(keras.__version__)
assert StrictVersion(keras.__version__) >= StrictVersion('2.0.0')
import pandas as pd
print(pd.__version__)
assert StrictVersion(pd.__version__) >= StrictVersion('0.20.0')
from sklearn.datasets import load_iris
iris = load_iris()
iris.data[0]
print(iris.DESCR)
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
CMAP = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
pd.plotting.scatter_matrix(iris_df, c=iris.target, edgecolor='black', figsize=(15, 15), cmap=CMAP)
plt.show()
# keras.layers.Input?
from keras.layers import Input
inputs = Input(shape=(4, ))
# keras.layers.Dense?
from keras.layers import Dense
# just linear activation (like no activation function at all)
fc = Dense(3)(inputs)
from keras.models import Model
model = Model(input=inputs, output=fc)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# this is just random stuff, no training has taken place so far
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
inputs = Input(shape=(4, ))
fc = Dense(10)(inputs)
predictions = Dense(3, activation='softmax')(fc)
model = Model(input=inputs, output=predictions)
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X = np.array(iris.data)
y = np.array(iris.target)
X.shape, y.shape
y[100]
# tiny little pieces of feature engeneering
from keras.utils.np_utils import to_categorical
num_categories = 3
y = to_categorical(y, num_categories)
y[100]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
!rm -r tf_log
tb_callback = keras.callbacks.TensorBoard(log_dir='./tf_log')
# https://keras.io/callbacks/#tensorboard
# To start tensorboard
# tensorboard --logdir=/mnt/c/Users/olive/Development/ml/tf_log
# open http://localhost:6006
%time model.fit(X_train, y_train, epochs=500, validation_split=0.2, callbacks=[tb_callback])
# %time model.fit(X_train, y_train, epochs=500, validation_split=0.2)
model.predict(np.array([[ 5.1, 3.5, 1.4, 0.2]]))
X[0], y[0]
train_loss, train_accuracy = model.evaluate(X_train, y_train)
train_loss, train_accuracy
test_loss, test_accuracy = model.evaluate(X_test, y_test)
test_loss, test_accuracy
# Keras format
model.save('nn-iris.hdf5')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up our source space
Step2: Get a surface-based source space, here with few source points for speed
Step3: Now we create a mixed src space by adding the volume regions specified in the
Step4: View the source space
Step5: We could write the mixed source space with
Step6: Compute the fwd matrix
Step7: Compute inverse solution
Step8: Plot the mixed source estimate
Step9: Plot the surface
Step10: Plot the volume
Step11: Process labels
|
<ASSISTANT_TASK:>
Python Code:
# Author: Annalisa Pascarella <a.pascarella@iac.cnr.it>
#
# License: BSD-3-Clause
import os.path as op
import matplotlib.pyplot as plt
from nilearn import plotting
import mne
from mne.minimum_norm import make_inverse_operator, apply_inverse
# Set dir
data_path = mne.datasets.sample.data_path()
subject = 'sample'
data_dir = op.join(data_path, 'MEG', subject)
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
# Set file names
fname_mixed_src = op.join(bem_dir, '%s-oct-6-mixed-src.fif' % subject)
fname_aseg = op.join(subjects_dir, subject, 'mri', 'aseg.mgz')
fname_model = op.join(bem_dir, '%s-5120-bem.fif' % subject)
fname_bem = op.join(bem_dir, '%s-5120-bem-sol.fif' % subject)
fname_evoked = data_dir + '/sample_audvis-ave.fif'
fname_trans = data_dir + '/sample_audvis_raw-trans.fif'
fname_fwd = data_dir + '/sample_audvis-meg-oct-6-mixed-fwd.fif'
fname_cov = data_dir + '/sample_audvis-shrunk-cov.fif'
labels_vol = ['Left-Amygdala',
'Left-Thalamus-Proper',
'Left-Cerebellum-Cortex',
'Brain-Stem',
'Right-Amygdala',
'Right-Thalamus-Proper',
'Right-Cerebellum-Cortex']
src = mne.setup_source_space(subject, spacing='oct5',
add_dist=False, subjects_dir=subjects_dir)
vol_src = mne.setup_volume_source_space(
subject, mri=fname_aseg, pos=10.0, bem=fname_model,
volume_label=labels_vol, subjects_dir=subjects_dir,
add_interpolator=False, # just for speed, usually this should be True
verbose=True)
# Generate the mixed source space
src += vol_src
print(f"The source space contains {len(src)} spaces and "
f"{sum(s['nuse'] for s in src)} vertices")
src.plot(subjects_dir=subjects_dir)
nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject)
src.export_volume(nii_fname, mri_resolution=True, overwrite=True)
plotting.plot_img(nii_fname, cmap='nipy_spectral')
fwd = mne.make_forward_solution(
fname_evoked, fname_trans, src, fname_bem,
mindist=5.0, # ignore sources<=5mm from innerskull
meg=True, eeg=False, n_jobs=1)
del src # save memory
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
print(f"The fwd source space contains {len(fwd['src'])} spaces and "
f"{sum(s['nuse'] for s in fwd['src'])} vertices")
# Load data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname_evoked, condition=condition,
baseline=(None, 0))
noise_cov = mne.read_cov(fname_cov)
snr = 3.0 # use smaller SNR for raw data
inv_method = 'dSPM' # sLORETA, MNE, dSPM
parc = 'aparc' # the parcellation to use, e.g., 'aparc' 'aparc.a2009s'
loose = dict(surface=0.2, volume=1.)
lambda2 = 1.0 / snr ** 2
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, depth=None, loose=loose, verbose=True)
del fwd
stc = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori=None)
src = inverse_operator['src']
initial_time = 0.1
stc_vec = apply_inverse(evoked, inverse_operator, lambda2, inv_method,
pick_ori='vector')
brain = stc_vec.plot(
hemi='both', src=inverse_operator['src'], views='coronal',
initial_time=initial_time, subjects_dir=subjects_dir,
brain_kwargs=dict(silhouette=True), smoothing_steps=7)
brain = stc.surface().plot(initial_time=initial_time,
subjects_dir=subjects_dir, smoothing_steps=7)
fig = stc.volume().plot(initial_time=initial_time, src=src,
subjects_dir=subjects_dir)
# Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi
labels_parc = mne.read_labels_from_annot(
subject, parc=parc, subjects_dir=subjects_dir)
label_ts = mne.extract_label_time_course(
[stc], labels_parc, src, mode='mean', allow_empty=True)
# plot the times series of 2 labels
fig, axes = plt.subplots(1)
axes.plot(1e3 * stc.times, label_ts[0][0, :], 'k', label='bankssts-lh')
axes.plot(1e3 * stc.times, label_ts[0][-1, :].T, 'r', label='Brain-stem')
axes.set(xlabel='Time (ms)', ylabel='MNE current (nAm)')
axes.legend()
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now weโve got our array and weโve got a method or two that we can use to create them. Letโs learn a bit more about some of the functions that arrays give us.
Step2: First letโs checkout the type weโve got, you can see itโs an ndarray which is the fundamental array in numpy. We can get the data type of the array. Itโs worth elaborating a bit on dtypes or data types.
Step3: Data type Description
Step4: Itโs worth starting that numpy arrays can be created in the fly simply by passing a list into the โarrayโ function.
Step5: This allows for consistent memory allocation and vectorization.
Step6: If you have any worry about what kind of array youโre creating, just specify the type.
Step7: Numpy arrays also have some nifty functions that make our lives a lot easier. We can get the size, which is functionally equivalent to len in raw python. We can get the shape which gives us a hint as to where weโll be going next, towards multidimensional arrays.
Step8: We can also get things like the maximum, the minimum, the mean, the standard deviation, and variance. This makes it super easy to perform calculations quickly.
Step9: We can also just get the locations of certain values with argmin, argmax.
Step10: Now that weโve seen how we can play around with arrays and some of their helper functions. We can elaborate on all the different ways of creating them. Weโve seen arange but we can actually do some cool things with it. We can count up or down. We can also fill in between two numbers. Say we want 1 to 2 broken up into 5 numbers.
Step11: What about converting that to integers? We can use the as type method to convert it.
Step12: a 32bit float or any other type
Step13: Now there is one type that hasnโt been listed yet that is a bit difficult to describe. is nan or not a number. This will certainly come up and can throw off your analysis. Now nanโs are technically floats and can be created when you divide two 0 floating point numbers.
Step14: If youโve got a nan value in your array. It will throw off all your calculations. Youโve got to explicitly check for it in numpy.
Step15: Weโre going to build on this knowledge but at this point all that we need to know is that
|
<ASSISTANT_TASK:>
Python Code:
import sys
print(sys.version)
import numpy as np
print(np.__version__)
npa = np.arange(25)
npa
print(type(npa))
print(npa.dtype)
np.array(range(20))
np.array([1.0,0,2,3])
np.array([1.0,0,2,3]).dtype
np.array([True,2,2.0]).dtype
True == 1
np.array([True,2,2.0])
np.array([True, 1, 2.0], dtype='bool_')
np.array([True, 1, 2.0], dtype='float_')
np.array([True, 1, 2.0], dtype='uint8')
npa
npa.size
npa.shape
print(npa.min(), npa.max())
npa.std()
npa.var()
npa.argmin()
npa.argmax()
np.arange(1,10)
np.arange(10,1,-1)
np.linspace(1,2,5)
np.linspace(0,10,11)
np.linspace(0,10,11).astype('int')
np.linspace(0,10,11).astype(np.float32)
np.float32(5)
problem = np.array([0.])/np.array([0.])
# python throws an error because it knows something is wrong
problem[0]
np.nan
ar = np.linspace(0,10,11).astype(np.float32)
ar
ar[0] = np.nan
ar
ar.min()
ar.max()
ar.mean()
np.isnan(ar.max())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Quick Python Refresher
Step2: 2. pandas Series and DataFrames
Step3: 2b. pandas DataFrames
Step4: 3. Loading data
Step5: 4. Selecting and slicing data from a DataFrame
Step6: 4a. Select DataFrame's column(s) by name
Step7: 4b. Select row(s) by label
Step8: 4c. Select by (integer) position
Step9: 4d. Select/filter by value
Step10: 5. Merge and concatenate
Step11: 5b. Concatenating DataFrames
Step12: 6. Grouping and summarizing
Step13: 7. Vectorization, Map, and Apply
Step14: .map() applies a function element-wise on a series
Step15: A lambda function is a simple, one-off, anonymous function. You can't call it again later because it doesn't have a name. It just lets you repeatedly perform some operation across a series of values (in our case, a column in our dataframe) using a minimal amount of code.
Step16: .apply() is like .map(), but it works on a row or column basis on an entire DataFrame (specify the axis)
Step17: .applymap() works element-wise on an entire DataFrame. This is like doing a .map() to each column in the DataFrame.
Step18: 8. Hierarchical Indexing
Step19: So what's the benefit of multi-indexing like this? Your dataframe becomes a very fast look-up table. A good index is one that gives you a useful handle to select rows.
Step20: Stacking, unstacking, and pivoting are common ways to reshape data with a MultiIndex. Depending on the form your original data come in, they may be useful in the data prep phase to make your data easier to analyze. We'll discuss data prep, cleaning, and (review) visualization next week.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
# integers (int)
x = 100
type(x)
# floating-point numbers (float)
x = 100.5
type(x)
# sequence of characters (str)
x = 'Los Angeles, CA 90089'
len(x)
# list of items
x = [1, 2, 3, 'USC']
len(x)
# sets are unique
x = {2, 2, 3, 3, 1}
x
# tuples are immutable sequences
latlng = (34.019425, -118.283413)
type(latlng)
# you can unpack a tuple
lat, lng = latlng
type(lat)
# dictionary of key:value pairs
person = {'first_name': 'Geoff', 'last_name': 'Boeing', 'employer': 'USC'}
type(person)
# you can convert types
x = '100'
print(type(x))
y = int(x)
print(type(y))
# you can loop through an iterable, such as a list or tuple
for coord in latlng:
print('Current coordinate is:', coord)
# loop through a dictionary keys and values as tuples
for key, value in person.items():
print(key, value)
# booleans are trues/falses
x = 101
x > 100
# use two == for equality and one = for assignment
x == 100
# if, elif, else for conditional branching execution
x = 101
if x > 100:
print('Value is greater than 100.')
elif x < 100:
print('Value is less than 100.')
else:
print('Value is 100.')
# use functions to encapsulate and reuse bits of code
def convert_items(my_list, new_type=str):
# convert each item in a list to a new type
new_list = [new_type(item) for item in my_list]
return new_list
l = [1, 2, 3, 4]
convert_items(l)
# a pandas series is based on a numpy array: it's fast, compact, and has more functionality
# perhaps most notably, it has an index which allows you to work naturally with tabular data
my_list = [8, 5, 77, 2]
my_series = pd.Series(my_list)
my_series
# look at a list-representation of the index
my_series.index.tolist()
# look at the series' values themselves
my_series.values
# what's the data type of the series' values?
type(my_series.values)
# what's the data type of the individual values themselves?
my_series.dtype
# a dict can contain multiple lists and label them
my_dict = {'hh_income' : [75125, 22075, 31950, 115400],
'home_value' : [525000, 275000, 395000, 985000]}
my_dict
# a pandas dataframe can contain one or more columns
# each column is a pandas series
# each row is a pandas series
# you can create a dataframe by passing in a list, array, series, or dict
df = pd.DataFrame(my_dict)
df
# the row labels in the index are accessed by the .index attribute of the DataFrame object
df.index.tolist()
# the column labels are accessed by the .columns attribute of the DataFrame object
df.columns
# the data values are accessed by the .values attribute of the DataFrame object
# this is a numpy (two-dimensional) array
df.values
# load a data file
# note the relative filepath! where is this file located?
# use dtype argument if you don't want pandas to guess your data types
df = pd.read_csv('../../data/world_cities.csv')
# dataframe shape as rows, columns
df.shape
# or use len to just see the number of rows
len(df)
# view the dataframe's "head"
df.head()
# view the dataframe's "tail"
df.tail()
# CHEAT SHEET OF COMMON TASKS
# Operation Syntax Result
#------------------------------------------------------------
# Select column by name df[col] Series
# Select columns by name df[col_list] DataFrame
# Select row by label df.loc[label] Series
# Select row by integer location df.iloc[loc] Series
# Slice rows by label df.loc[a:c] DataFrame
# Select rows by boolean vector df[mask] DataFrame
# select a single column by column name
# this is a pandas series
df['resident_pop']
# select multiple columns by a list of column names
# this is a pandas dataframe that is a subset of the original
df[['resident_pop', 'built_up_area']]
# create a new column by assigning df['new_col'] to some values
df['pop_density'] = df['resident_pop'] / df['built_up_area']
# you can do vectorized math operations on any numeric columns
df['pop_density_1000s'] = df['pop_density'] / 1000
# inspect the results
df[['resident_pop', 'built_up_area', 'pop_density', 'pop_density_1000s']].head()
# use .loc to select by row label
# returns the row as a series whose index is the dataframe column names
df.loc[0]
# use .loc to select single value by row label, column name
df.loc[0, 'resident_pop']
# slice of rows from label 5 to label 7, inclusive
# this returns a pandas dataframe
df.loc[5:7]
# slice of rows from label 5 to label 7, inclusive
# slice of columns from uc_names to world_subregion, inclusive
df.loc[1:3, 'uc_names':'world_subregion']
# subset of rows from with labels in list
# subset of columns with names in list
df.loc[[1, 3], ['uc_names', 'world_subregion']]
# you can use a column of identifiers as the index (indices do not *need* to be unique)
# uc_id values uniquely identify each row (but verify!)
df = df.set_index('uc_id')
df.index.is_unique
df.head()
# .loc works by label, not by position in the dataframe
try:
df.loc[0]
except KeyError as e:
print('label not found')
# the index now contains uc_id values, so you have to use .loc accordingly to select by row label
df.loc[14]
# get the row in the zero-th position in the dataframe
df.iloc[0]
# you can slice as well
# note, while .loc is inclusive, .iloc is not
# get the rows from position 0 up to but not including position 3 (ie, rows 0, 1, and 2)
df.iloc[0:3]
# get the value from the row in position 3 and the column in position 2 (zero-indexed)
df.iloc[3, 2]
# filter the dataframe by urban areas with more than 25 million residents
df[df['resident_pop'] > 25000000]
# what exactly did that do? let's break it out.
df['resident_pop'] > 25000000
# essentially a true/false mask that filters by value
mask = df['resident_pop'] > 25000000
df[mask]
# you can chain multiple conditions together
# pandas logical operators are: | for or, & for and, ~ for not
# these must be grouped by using parentheses due to order of operations
mask = (df['resident_pop'] > 25000000) & (df['built_up_area'] > 2000)
df[mask]
# which urban areas have 25 million residents and either 2000 km2 area or >200 meter avg elevation?
mask1 = df['resident_pop'] > 25000000
mask2 = df['built_up_area'] > 2000
mask3 = df['avg_elevation'] > 200
mask = mask1 & (mask2 | mask3)
df[mask]
# see the mask
mask
# ~ means not... it essentially flips trues to falses and vice-versa
~mask
# which urban areas contain a city with "New " in its name?
mask = df['uc_names'].str.contains('New ')
df.loc[mask, ['uc_names', 'country']]
# now it's your turn
# create a new subset dataframe containing all urban areas in the US with >5 million residents
# how many rows did you get?
# create a subset dataframe with climate related variables
climate_cols = ['core_city', 'avg_elevation', 'avg_precipitation', 'avg_temperature', 'climate_classes']
df_climate = df[climate_cols].sample(2000).sort_values('avg_temperature', ascending=True)
df_climate.head()
# create a subset dataframe with economic variables
econ_cols = ['core_city', 'gdp_ppp', 'night_light_em', 'un_income_class']
df_econ = df[econ_cols].sample(2000).sort_values('gdp_ppp', ascending=False)
df_econ.head()
# merge them together, aligning rows based on their labels in the index
df_merged = pd.merge(left=df_econ, right=df_climate, how='inner', left_index=True, right_index=True)
df_merged.head()
# reset df_econ's index
df_econ = df_econ.reset_index()
df_econ.head()
# merge them together, aligning rows based on left's column values and right's index labels
df_merged = pd.merge(left=df_econ, right=df_climate, how='inner', left_on='uc_id', right_index=True)
df_merged
# inner join: only retain rows where a match is found in both dataframes
# left join: retain all rows in left and bring in attributes from rows that matched in right
# right join: retain all rows in right and bring in attributes from rows that matched in left
# outer join: retain all rows in both dataframes
# create two subset dataframes
df_us = df[df['country'] == 'united_states']
df_uk = df[df['country'] == 'united_kingdom']
# concatenate them together
df_us_uk = pd.concat([df_us, df_uk], sort=False)
df_us_uk
# calculate per capita GDP then group the rows by region
df['gdp_percap'] = df['gdp_ppp'] / df['resident_pop']
groups = df.groupby('world_subregion')
# what is the median per capita GDP across the urban areas in each region?
groups['gdp_percap'].median().sort_values(ascending=False)
# look at several columns' medians by region
groups[['gdp_percap', 'avg_temperature', 'pop_density']].median()
# now it's your turn
# re-group the urban areas by country and find the highest then lowest urban area avg elevation in each country
# calculate resident population z-scores, vectorized
z = (df['resident_pop'] - df['resident_pop'].mean()) / df['resident_pop'].std()
z
mean = df['resident_pop'].mean()
std = df['resident_pop'].std()
def calculate_zscore(x, mean=mean, std=std):
return (x - mean) / std
# map the function to the series
z = df['resident_pop'].map(calculate_zscore)
mean = df['resident_pop'].mean()
std = df['resident_pop'].std()
# map a lambda function to the series
z = df['resident_pop'].map(lambda x: (x - mean) / std)
%%timeit
# check timings of vectorized vs map
z = (df['resident_pop'] - df['resident_pop'].mean()) / df['resident_pop'].std()
%%timeit
mean = df['resident_pop'].mean()
std = df['resident_pop'].std()
z = df['resident_pop'].map(lambda x: (x - mean) / std)
# find the difference between the min and max values in each column (ie, row-wise)
df_subset = df[['area', 'built_up_area', 'avg_elevation']]
df_subset.apply(lambda col: col.max() - col.min(), axis='rows')
# find the difference between the min and max values in each row (ie, column-wise)
df_subset.apply(lambda row: row.max() - row.min(), axis='columns')
# now it's your turn
# how would you vectorize the above .apply() methods to make the calculation faster?
# then compare the timings of your vectorized version to the .apply version above
# this uses applymap, but you could (should) vectorize it
# we'll see that next week
df_subset = df[['country', 'world_region', 'world_subregion']]
df_subset.applymap(lambda x: x.upper().replace('_', ' '))
df = df.reset_index().set_index(['country', 'core_city']).sort_index()
df
# index isn't unique, but doesn't need to be
# you could make it unique by adding uc_id as a 3rd level, for instance
df.index.is_unique
# select all urban areas in china (ie, first index level)
df.loc['china']
# or select rows by multiple index levels
# lots of unnamed core cities in china in this dataset
df.loc[('china', 'unnamed')]
# select every row with an unnamed core city in the dataset
# that is, only select by the 2nd level of the index
# the first : slices everything in the first index level, and the trailing : slices all columns
df.loc[pd.IndexSlice[:, ['unnamed']], :]
# select every row in argentina or spain with core city of cordoba
df.loc[(['argentina', 'spain'], ['cordoba']), :]
# now it's your turn
# multi-index the dataframe by world region, subregion, and uc_id
# use .loc to select all the rows from southern africa and eastern europe
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us solve the following problem
Step2: Firstly, define initial domain.
Step3: To update domain we can define some function which return new domain or None if domain will not be updated. In our case funtion update_domain accepts research results as pandas.DataFrame, takes model with the highest accuracy and create new domain with that model and n_epochs=2.
Step4: We add update function into research as a parameter of update_domain function. each parameter defines how often function will be applied. If each='last', update function will be applied when current domain will be exhausted. n_updates parameter defines the number of domain updates. All other parameters are used as kwargs for update_domain function.
Step5: Resulting pandas.DataFrame will have 'update' column with the number of updates before we get current config.
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import shutil
import numpy as np
import matplotlib
%matplotlib inline
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
sys.path.append('../../..')
from batchflow import Pipeline, B, C, V, D, L
from batchflow.opensets import CIFAR10
from batchflow.models.torch import VGG7, VGG16, ResNet18
from batchflow.research import Research, Option, Results, PrintLogger, RP, RR
BATCH_SIZE=64
ds = CIFAR10()
def clear_previous_results(res_name):
if os.path.exists(res_name):
shutil.rmtree(res_name)
model_config={
'inputs/images/shape': B('image_shape'),
'inputs/labels/classes': D('num_classes'),
'inputs/labels/name': 'targets',
'initial_block/inputs': 'images'
}
train_pipeline = (Pipeline()
.set_dataset(C('dataset'))
.init_variable('loss')
.init_model('dynamic', C('model'), 'conv', config=model_config)
.to_array(dtype='float32')
.train_model('conv', B('images'), B('labels'),
fetches='loss', save_to=V('loss', mode='w'))
.run_later(batch_size=BATCH_SIZE, n_epochs=C('n_epochs'))
)
test_pipeline = (Pipeline()
.init_variable('predictions')
.init_variable('metrics')
.import_model('conv', C('import_from'))
.to_array(dtype='float32')
.predict_model('conv', B('images'),
fetches='predictions', save_to=V('predictions'))
.gather_metrics('class', targets=B('labels'), predictions=V('predictions'),
fmt='logits', axis=-1, save_to=V('metrics', mode='a'))
.run_later(batch_size=BATCH_SIZE, n_epochs=1)) << CIFAR10().test
domain = Option('model', [VGG7, VGG16, ResNet18]) * Option('n_epochs', [1])
def update_domain(results):
best_model = results.iloc[results['accuracy'].idxmax()].model
domain = Option('model', [best_model]) * Option('n_epochs', [2])
return domain
def create_dataset(config):
update = config['update']
if update == 0:
dataset = ds.train.create_subset(np.arange(1000))
else:
dataset = ds.train
return {'dataset': dataset}
research = (Research()
.add_pipeline(train_pipeline, variables='loss', name='train_ppl')
.add_pipeline(test_pipeline, run=False, name='test_ppl',
import_from=RP('train_ppl'), execute='last')
.get_metrics(pipeline='test_ppl', metrics_var='metrics', metrics_name='accuracy',
returns='accuracy', execute='last')
.init_domain(domain)
.update_config(create_dataset)
.update_domain(update_domain, each='last', n_updates=1,
results=RR(names='test_ppl_metrics', use_alias=False).df))
res_name = 'dynamic_domain_research'
clear_previous_results(res_name)
research.run(n_iters=None, name=res_name, bar=True)
acc = research.load_results(names='test_ppl_metrics', update=1).df
print('Best model: ', acc.model.values[0])
print('Final accuracy:', acc.accuracy.values[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import Policy, RL agent, ...
Step3: Define a Callback Function
Step5: Create and wrap the environment
Step6: Define and train the PPO agent
Step9: Plotting helpers
Step10: Make a GIF of a Trained Agent
Step11: Evaluate the trained agent
|
<ASSISTANT_TASK:>
Python Code:
import stable_baselines
stable_baselines.__version__
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from stable_baselines.bench import Monitor
from stable_baselines.common.callbacks import BaseCallback
from stable_baselines.results_plotter import load_results, ts2xy
from stable_baselines import results_plotter
sns.set_context("talk")
class SaveOnBestTrainingRewardCallback(BaseCallback):
Callback for saving a model (the check is done every ``check_freq`` steps)
based on the training reward (in practice, we recommend using ``EvalCallback``).
:param check_freq: (int)
:param log_dir: (str) Path to the folder where the model will be saved.
It must contains the file created by the ``Monitor`` wrapper.
:param verbose: (int)
def __init__(self, check_freq: int, log_dir: str, verbose=1):
super(SaveOnBestTrainingRewardCallback, self).__init__(verbose)
self.check_freq = check_freq
self.log_dir = log_dir
self.save_path = os.path.join(log_dir, 'best_model')
self.best_mean_reward = -np.inf
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.check_freq == 0:
# Retrieve training reward
x, y = ts2xy(load_results(self.log_dir), 'timesteps')
if len(x) > 0:
# Mean training reward over the last 100 episodes
mean_reward = np.mean(y[-100:])
if self.verbose > 0:
print("Num timesteps: {}".format(self.num_timesteps))
print("Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}".format(self.best_mean_reward, mean_reward))
# New best model, you could save the agent here
if mean_reward > self.best_mean_reward:
self.best_mean_reward = mean_reward
# Example for saving best model
if self.verbose > 0:
print("Saving new best model to {}".format(self.save_path))
self.model.save(self.save_path)
return True
from gym import spaces
import random
# https://stable-baselines.readthedocs.io/en/master/guide/custom_env.html
# https://towardsdatascience.com/creating-a-custom-openai-gym-environment-for-stock-trading-be532be3910e
POSITION_MIN = -1000.
POSITION_MAX = 1000.
VELOCITY_MIN = -100.
VELOCITY_MAX = 100.
ACTION_MIN = -2.
ACTION_MAX = 2.
DT = 0.1
MASS = 0.1
MAX_STEPS = 1000
class PointMassEnv(gym.Env):
Custom Environment that follows gym interface
metadata = {'render.modes': ['human']}
def __init__(self, mass=MASS, dt=DT):
super(PointMassEnv, self).__init__() # Define action and observation space
self.mass = mass
self.dt = dt
self.position = None
self.velocity = None
self.current_step = None
self.viewer = None
# Actions: force
self.action_space = spaces.Box(low=ACTION_MIN, high=ACTION_MAX, shape=(1,), dtype=np.float32)
# State: (position, velocity)
self.observation_space = spaces.Box(low=np.array([VELOCITY_MIN, POSITION_MIN]), high=np.array([VELOCITY_MAX, POSITION_MAX]), dtype=np.float32)
def _reward(self, position):
# https://en.wikipedia.org/wiki/Gaussian_function
a = 1. # The height of the curve's peak
b = 0. # The position of the center of the peak
c = 100. # The width of the "bell"
x = position
return a * np.exp(-(x - b)**2/(2.*c**2))
def step(self, action):
# Execute one time step within the environment
self.current_step += 1
# Kinetics of point mass
force = action[0]
acceleration = force / self.mass
self.velocity += acceleration * self.dt
self.position += self.velocity * self.dt
#print("force: {}, acceleration: {}, velocity: {}, position: {}".format(force, acceleration, self.velocity, self.position))
obs = np.array([self.velocity, self.position])
# Compute reward and done
reward = self._reward(self.position)
done = self.current_step > MAX_STEPS
return obs, reward, done, {}
def reset(self):
# Reset the state of the environment to an initial state
self.position = random.uniform(POSITION_MIN, POSITION_MAX)
self.velocity = 0.
self.current_step = 0
return np.array([self.velocity, self.position])
# def render(self, mode='human', close=False):
# # Render the environment to the screen
# print(self.velocity, self.position)
#
#
# def close(self):
# pass
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = POSITION_MAX - POSITION_MIN
scale = screen_width/world_width
carty = 100 # TOP OF CART
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
if self.position is None:
return None
x = self.position
cartx = x * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
env = PointMassEnv()
env.reset()
#env = PointMassEnv()
#env.reset()
#a = env.action_space.sample()
#s = env.step(a)
#a, s
##env = gym.make('CartPole-v1')
#env = gym.make('MountainCarContinuous-v0')
#env.reset()
#a = env.action_space.sample()
#s = env.step(a)
#a, s
#env.action_space.sample()
y = []
T = range(1000)
for t in T:
action = np.array([2.])
next_velocity, next_pos = env.step(action)[0]
y.append(next_pos)
plt.plot(list(T), y);
#x = np.arange(-100, 100, 0.1)
#y = np.array([env._reward(_x) for _x in x])
#plt.plot(x, y);
# Create log dir
log_dir = "/tmp/gym/"
os.makedirs(log_dir, exist_ok=True)
# Create and wrap the environment
#env = gym.make('CartPole-v1')
env = PointMassEnv()
# Logs will be saved in log_dir/monitor.csv
env = Monitor(env, log_dir)
env = DummyVecEnv([lambda: env]) # PPO2 requires a vectorized environment to run
# Create the callback: check every 1000 steps
callback = SaveOnBestTrainingRewardCallback(check_freq=1000, log_dir=log_dir)
%%time
model = PPO2(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=100000, callback=callback)
# Helper from the library
results_plotter.plot_results([log_dir], 1e5, results_plotter.X_TIMESTEPS, "PPO Point Mass")
def moving_average(values, window):
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
def plot_results(log_folder, title='Learning Curve'):
plot the results
:param log_folder: (str) the save location of the results to plot
:param title: (str) the title of the task to plot
x, y = ts2xy(load_results(log_folder), 'timesteps')
#y = moving_average(y, window=50)
## Truncate x
#x = x[len(x) - len(y):]
fig = plt.figure(title, figsize=(16,6))
plt.plot(x, y)
plt.xlabel('Number of Timesteps')
plt.ylabel('Rewards')
plt.title(title + " Smoothed")
plt.show()
plot_results(log_dir)
import imageio
images = []
obs = model.env.reset()
img = model.env.render(mode='rgb_array')
for i in range(350):
images.append(img)
action, _ = model.predict(obs)
obs, _, _ ,_ = model.env.step(action)
img = model.env.render(mode='rgb_array')
imageio.mimsave('ppo_point_mass_env.gif', [np.array(img) for i, img in enumerate(images) if i%2 == 0], fps=29)
import IPython
from IPython.display import Image
Image(open('ppo_point_mass_env.gif','rb').read()) # https://stackoverflow.com/questions/61110188/how-to-display-a-gif-in-jupyter-notebook-using-google-colab
env._max_episode_steps = 1000
reward_list = []
NUM_EPISODES = 100
for episode_index in range(NUM_EPISODES):
reward_sum = 0
obs = env.reset()
done = False
while not done:
action, _states = model.predict(obs)
obs, reward, done, info = env.step(action)
reward_sum += reward
#env.render() # Cannot render on Google Colab
reward_list.append(reward_sum)
print("Mean reward:", sum(reward_list) / NUM_EPISODES)
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Chassagnole2002
Step2: A Network object containts other objects stored in arrays
Step3: The following calls are required before generating the jacobians for the tracer and concentration perturbation.
Step4: To find the jacobian that accounts for the tracers dynamics the algorithm first searches for the steady state of the model. At steady state the probability for a labelled molecule $A^t$ to be transformed through a reaction $v^+$ is proportional to the fraction of $A$ that is labelled. The tracer reaction releases labelled carbons that are shared between the substrate of the reaction proportionally to their stoichiometry and to the number of carbons they contain.
Step5: Model Teusink 2000
Step6: Model Mosca 2012
Step7: Model Curto 1998
|
<ASSISTANT_TASK:>
Python Code:
from imp import reload
import re
import numpy as np
from scipy.integrate import ode
import NetworkComponents
chassagnole = NetworkComponents.Network("chassagnole2002")
chassagnole.readSBML("./published_models/Chassagnole2002.xml")
chassagnole.readInformations("./published_models/Chassagnole2002_info.csv")
chassagnole.separateForwardBackwardFluxes()
chassagnole.updateNetwork()
chassagnole.generateDerivatives()
chassagnole.generateRates()
chassagnole.testCarbonBalance()
Jtracer = chassagnole.generateTracerJacobian()
Jperturbation = chassagnole.generatePerturbationJacobian()
tauc,Tc = chassagnole.computeCharacteristicTimes("perturbation",method="integration")
taut,Tt = chassagnole.computeCharacteristicTimes("tracer",method="inverseJacobian")
print("tau_c = %f s"%(tauc))
print("tau_t = %f s"%(taut))
print("T_c = %f s"%(Tc))
print("T_t = %f s"%(Tt))
teusink = NetworkComponents.Network("Teusink2000")
teusink.readSBML("./published_models/Teusink2000.xml")
teusink.readInformations("./published_models/Teusink2000_info.csv")
teusink.separateForwardBackwardFluxes()
teusink.updateNetwork()
teusink.generateDerivatives()
teusink.generateRates()
teusink.testCarbonBalance()
Jtracer = teusink.generateTracerJacobian()
Jperturbation = teusink.generatePerturbationJacobian()
tauc,Tc = teusink.computeCharacteristicTimes("perturbation",method="integration")
taut,Tt = teusink.computeCharacteristicTimes("tracer",method="integration")
print("tau_c = %f s"%(tauc*60))
print("tau_t = %f s"%(taut*60))
print("T_c = %f s"%(Tc*60))
print("T_t = %f s"%(Tt*60))
mosca = NetworkComponents.Network("Mosca2012")
mosca.readSBML("./published_models/Mosca2012.xml")
mosca.readInformations("./published_models/Mosca2012_info.csv")
mosca.separateForwardBackwardFluxes()
mosca.updateNetwork()
mosca.generateDerivatives()
mosca.generateRates()
mosca.testCarbonBalance()
Jtracer = mosca.generateTracerJacobian()
Jperturbation = mosca.generatePerturbationJacobian()
tauc,Tc = mosca.computeCharacteristicTimes("perturbation",method="integration")
taut,Tt = mosca.computeCharacteristicTimes("tracer",method="inverseJacobian")
print("tau_c = %f s"%(tauc*60))
print("tau_t = %f s"%(taut*60))
print("T_c = %f s"%(Tc*60))
print("T_t = %f s"%(Tt*60))
curto = NetworkComponents.Network("Curto1998")
curto.readSBML("./published_models/Curto1998.xml")
curto.readInformations("./published_models/Curto1998_info.csv")
curto.separateForwardBackwardFluxes()
curto.updateNetwork()
curto.generateDerivatives()
curto.generateRates()
curto.testCarbonBalance()
Jtracer = curto.generateTracerJacobian()
Jperturbation = curto.generatePerturbationJacobian()
tauc,Tc = curto.computeCharacteristicTimes("perturbation",method="inverseJacobian")
taut,Tt = curto.computeCharacteristicTimes("tracer",method="inverseJacobian")
print("tau_c = %f s"%(tauc*60))
print("tau_t = %f s"%(taut*60))
print("T_c = %f s"%(Tc*60))
print("T_t = %f s"%(Tt*60))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The sex and race columns contain potentially interesting information on how gun deaths in the US vary by gender and race. Exploring both of these columns can be done with a similar dictionary counting technique to what we did earlier.
Step2: However, our analysis only gives us the total number of gun deaths by race in the US. Unless we know the proportion of each race in the US, we won't be able to meaningfully compare those numbers.
Step3: Finding
|
<ASSISTANT_TASK:>
Python Code:
import csv
data = list(csv.reader(open('guns.csv', 'r')))
print(data[:5])
#removing header row
headers = data[:1]
data = data[1:]
print(data[:5])
#count in the dictionary of how many times each element occurs in the year column
years = [each[1] for each in data]
years
year_counts = {}
for each in years:
if each in year_counts:
year_counts[each] += 1
else:
year_counts[each] = 1
print(year_counts)
#Let's see if gun deaths in the US change by month and year
import datetime
dates = [datetime.datetime(year=int(each[1]), month=int(each[2]), day=1) for each in data]
date_counts = {}
for each in dates:
if each in date_counts:
date_counts[each] += 1
else:
date_counts[each] = 1
dates[:5]
sex_counts = {}
race_counts = {}
for each in data:
sex = each[5]
if sex in sex_counts:
sex_counts[sex] += 1
else:
sex_counts[sex] = 1
for each in data:
race = each[7]
if race in race_counts:
race_counts[race] += 1
else:
race_counts[race] = 1
print(race_counts)
print(sex_counts)
f = open ('census.csv', 'r')
census = list(csv.reader(f))
census
mapping = {
'Asian/Pacific Islander': int(census[1][14]) + int(census[1][15]),
'Black': int(census[1][12]),
'Native American/Native Alaskan': int(census[1][13]),
'Hispanic': int(census[1][11]),
'White': int(census[1][10])
}
race_per_hundredk = {}
for key, value in race_counts.items():
result = race_counts[key] / mapping[key] * 100000
race_per_hundredk[key] = result
race_per_hundredk
#We can filter our results, and restrict them to the Homicide intent
intents = [each[3] for each in data]
races = [each[7] for each in data]
homicide_race_counts = {}
for i, each in enumerate(races):
if intents[i] == 'Homicide':
if each not in homicide_race_counts:
homicide_race_counts[each] = 0
else:
homicide_race_counts[each] += 1
homicide_race_counts
homicide_race_per_hundredk = {}
for key, value in homicide_race_counts.items():
result = homicide_race_counts[key] / mapping[key] * 100000
homicide_race_per_hundredk[key] = result
homicide_race_per_hundredk
month_homicide_rate = {}
months = [int(each[2]) for each in data]
for i, each in enumerate(months):
if intents[i] == 'Homicide':
if each not in month_homicide_rate:
month_homicide_rate[each] = 0
else:
month_homicide_rate[each] += 1
month_homicide_rate
def months_diff(input_dict):
max_value = 0
max_key = 0
min_value = input_dict[1]
min_key = 0
for key, value in input_dict.items():
if value > max_value:
max_value = value
max_key = key
if value < min_value:
min_value = value
min_key = key
gap = round((max_value / min_value), 2)
print ('max month is',max_key,'has',max_value,'and min month is',min_key,'has',min_value,'. The gap between min and max months is',gap,'!')
months_diff(month_homicide_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
def insertion_sort(data):
# TODO: Implement me
pass
# %load test_insertion_sort.py
from nose.tools import assert_equal
class TestInsertionSort(object):
def test_insertion_sort(self):
print('Empty input')
data = []
insertion_sort(data)
assert_equal(data, [])
print('One element')
data = [5]
insertion_sort(data)
assert_equal(data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
insertion_sort(data)
assert_equal(data, sorted(data))
print('Success: test_insertion_sort')
def main():
test = TestInsertionSort()
test.test_insertion_sort()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ## Experiment
Step2: Balancing the data
Step3: Cross Validation tests
Step4: Result
Step5: Next time, we can reload the results as follows
Step6: The importance of features
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv("provstore/data.csv")
df.head()
df.describe()
# The number of each label in the dataset
df.label.value_counts()
from analytics import balance_smote, test_classification
df = balance_smote(df)
results, importances = test_classification(df)
results.to_pickle("provstore/results.pkl")
importances.to_pickle("provstore/importances.pkl")
import pandas as pd
results = pd.read_pickle("provstore/results.pkl")
importances = pd.read_pickle("provstore/importances.pkl")
results.shape, importances.shape # showing the shape of the data (for checking)
%matplotlib inline
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("talk")
# Rename the columns with Math notation for consistency with the metrics symbols in the paper
feature_name_maths_mapping = {
"entities": "$n_e$", "agents": "$n_{ag}$", "activities": "$n_a$", "nodes": "$n$", "edges": "$e$",
"diameter": "$d$", "assortativity": "$r$", "acc": "$\\mathsf{ACC}$",
"acc_e": "$\\mathsf{ACC}_e$", "acc_a": "$\\mathsf{ACC}_a$", "acc_ag": "$\\mathsf{ACC}_{ag}$",
"mfd_e_e": "$\\mathrm{mfd}_{e \\rightarrow e}$", "mfd_e_a": "$\\mathrm{mfd}_{e \\rightarrow a}$",
"mfd_e_ag": "$\\mathrm{mfd}_{e \\rightarrow ag}$", "mfd_a_e": "$\\mathrm{mfd}_{a \\rightarrow e}$",
"mfd_a_a": "$\\mathrm{mfd}_{a \\rightarrow a}$", "mfd_a_ag": "$\\mathrm{mfd}_{a \\rightarrow ag}$",
"mfd_ag_e": "$\\mathrm{mfd}_{ag \\rightarrow e}$", "mfd_ag_a": "$\\mathrm{mfd}_{ag \\rightarrow a}$",
"mfd_ag_ag": "$\\mathrm{mfd}_{ag \\rightarrow ag}$", "mfd_der": "$\\mathrm{mfd}_\\mathit{der}$", "powerlaw_alpha": "$\\alpha$"
}
importances.rename(columns=feature_name_maths_mapping, inplace=True)
plot = sns.barplot(data=importances)
for i in plot.get_xticklabels():
i.set_rotation(90)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exemplo 2
Step2: Exemplo 3
Step3: A funรงรฃo linspace(a,b,c) retorna um vetor de c elementos, iniciando em a, terminando em b, igualmente espaรงados. Ela gera a sequรชncia de valores de x que vamos passar ร funรงรฃo para plotagem.
Step4: Normalmente o Python nรฃo mostra instantaneamente os grรกficos gerados, entรฃo precisamos pedir para que eles sejam mostrados atravรฉs da funรงรฃo show do mรณdulo Matplotlib. Mas o Jupyter Notebook รฉ configurado para exibir os grรกficos da funรงรฃo plot automaticamente sem precisarmos invocar o show.
Step5: Exercรญcio 2
Step6: Exercรญcio 3
Step7: Parte 2 - Integraรงรฃo Numรฉrica
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pylab as py
v1 = np.array([3,2,1])
v1
v1.sort()
v1
v2 = np.empty([2,2])
v2
class Complex:
def __init__(x, realpart, imagpart): # este "x" sรณ pertence ao escopo deste bloco de definiรงรฃo de classe
x.r = realpart
x.i = imagpart
x = Complex(2,1) # este "x" รฉ diferente do x utilizado na definiรงรฃo de classe acima
x.i,x.r
def f(x,c):
return x**2 + c
x = np.linspace(-2,2,30)
x
p1 = py.plot(x, f(x,.05))
#p2 = py.plot([0,2],[0,2])
#show()
# experimente apagar a porรงรฃo "p1 = " do cรณdigo acima
# experimente descomentar a segunda linha
v1 = [1,2,3]
v2 = [v1,v1]
v3 = range(10)
v1,v2,v3
# exclua este comentรกrio
x = np.ones(10)
for i in x:
if np.sin(i) > .5:
print(i)
# exclua este comentรกrio
x = np.ones(10)
def media(vetor):
return np.sum(vetor)/len(vetor)
a = np.random.rand(30,3)
#y = np.zeros(30)
y = [media(i) for i in a] #opa! Trapaรงa!
py.plot(y,'b.')
# exclua este comentรกrio
def Fun(f,t=0):
return -f**2 + 2 # o que acontece se vocรช mudar o valor da constante (2)? vocรช consegue explicar porquรช?
# e se vocรช mudar o sinal do termo -x**2?
def solveF(F, F_t0, time, dt):
steps = (int(time/dt))
F_t = list(np.zeros(steps))
F_t[0] = F_t0
for t in range(1,steps):
F_t[t] = F_t[t - 1] + F(F_t[t - 1])*dt
return F_t
t_0 = 0
time = 10
dt = .01
F_t0 = -1.4
x_axis = np.arange(0,time,dt)
y_axis = solveF(Fun,F_t0,time,dt)
py.plot(x_axis,y_axis)
from scipy.integrate import odeint
t = np.arange(t_0,time,dt)
y = odeint(Fun,F_t0,t)
py.plot(t,y)
y_0 = solveF(Fun,F_t0,time,dt)
y_1 = odeint(Fun,F_t0,t)
py.plot(t,(y_1.T - y_0.T)[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And take a look at what our data looks like
Step2: Look how far the minimum and maximum values for the density are from the top and bottom quartile breakpoints! We have some outliers in our data that are well outside the meat of most of the distribution. Let's look into this to find the culprits within the sample.
Step3: Looks like Washington D.C. and Alaska were the culprits on each end of the range. Washington was more dense than the next most dense state, New Jersey, than the least dense state, Alaska was from Wyoming, however. Washington D.C. has a has a relatively small land area for the amount of people that live there, so it makes sense that it's pretty dense. And Alaska has a lot of land area, but not much of it is habitable for humans.
Step4: This looks better. Our min and max values for the colorscale are much closer to the mean value now. Let's run with these values, and make a colorscale. I'm just going to use a sequential light-to-dark color palette from the ColorBrewer.
Step5: Let's narrow down these cities to United states cities, by using GeoPandas' spatial join functionality between two GeoDataFrame objects, using the Point 'within' Polygon functionality.
Step6: Ok, now we have a new GeoDataFrame with our top 20 populated cities. Let's see the top 5.
Step7: Alright, let's build a map!
|
<ASSISTANT_TASK:>
Python Code:
import geopandas
states = geopandas.read_file(
"https://raw.githubusercontent.com/PublicaMundi/MappingAPI/master/data/geojson/us-states.json",
driver="GeoJSON",
)
cities = geopandas.read_file(
"https://d2ad6b4ur7yvpq.cloudfront.net/naturalearth-3.3.0/ne_50m_populated_places_simple.geojson",
driver="GeoJSON",
)
states.describe()
states_sorted = states.sort_values(by="density", ascending=False)
states_sorted.head(5).append(states_sorted.tail(5))[["name", "density"]]
def rd2(x):
return round(x, 2)
minimum, maximum = states["density"].quantile([0.05, 0.95]).apply(rd2)
mean = round(states["density"].mean(), 2)
print(f"minimum: {minimum}", f"maximum: {maximum}", f"Mean: {mean}", sep="\n\n")
import branca
colormap = branca.colormap.LinearColormap(
colors=["#f2f0f7", "#cbc9e2", "#9e9ac8", "#756bb1", "#54278f"],
index=states["density"].quantile([0.2, 0.4, 0.6, 0.8]),
vmin=minimum,
vmax=maximum,
)
colormap.caption = "Population Density in the United States"
colormap
us_cities = geopandas.sjoin(cities, states, how="inner", predicate="within")
pop_ranked_cities = us_cities.sort_values(by="pop_max", ascending=False)[
["nameascii", "pop_max", "geometry"]
].iloc[:20]
pop_ranked_cities.head(5)
import folium
from folium.plugins import Search
m = folium.Map(location=[38, -97], zoom_start=4)
def style_function(x):
return {
"fillColor": colormap(x["properties"]["density"]),
"color": "black",
"weight": 2,
"fillOpacity": 0.5,
}
stategeo = folium.GeoJson(
states,
name="US States",
style_function=style_function,
tooltip=folium.GeoJsonTooltip(
fields=["name", "density"], aliases=["State", "Density"], localize=True
),
).add_to(m)
citygeo = folium.GeoJson(
pop_ranked_cities,
name="US Cities",
tooltip=folium.GeoJsonTooltip(
fields=["nameascii", "pop_max"], aliases=["", "Population Max"], localize=True
),
).add_to(m)
statesearch = Search(
layer=stategeo,
geom_type="Polygon",
placeholder="Search for a US State",
collapsed=False,
search_label="name",
weight=3,
).add_to(m)
citysearch = Search(
layer=citygeo,
geom_type="Point",
placeholder="Search for a US City",
collapsed=True,
search_label="nameascii",
).add_to(m)
folium.LayerControl().add_to(m)
colormap.add_to(m)
m
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Manipulating an Image in Python
Step2: We can now take a look at our image to see if we uploaded it properly. To do this we will use Matplotlib to display the image. But first we must load the image from the virtual machine hosting this Colab. Right now that image is stored on the virtual machine's hard drive.
Step3: Reshaping
Step4: As expected, we have dimensions that indicate that we have an image in landscape
Step5: Now we need to find out how much padding we need to add to each side of the image. The longer side shouldn't get any extra padding, and since we want to make the image a square, the shorter side should get enough padding to make it equal to the longer side.
Step6: We don't want all of the padding to be on one side of the image, though. We need to split the amount of padding in half and then add each half of the padding to each side of the shorter dimension.
Step7: Now that we know how much padding to add to the image, we can do so by asking Pillow to expand the image.
Step8: We will do one final check to confirm that the image is indeed a square now. You should now have a 1920x1920 image.
Step9: Scale the Image
Step10: We can see the exact size of the resized image.
Step11: Padding before resizing ensures that we don't distort the shape of the contents of our image, but it did require that we apply an artificial background.
Step12: Exercise 2
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
from PIL import Image
import matplotlib.pyplot as plt
image_file = "running-shoe-371624_1920.jpg"
with Image.open(image_file) as sneaker:
plt.imshow(sneaker)
plt.show()
from PIL import Image
import matplotlib.pyplot as plt
image_width_height = None
with Image.open(image_file) as sneaker:
image_width_height = sneaker.size
print(image_width_height)
max_dimension = max(image_width_height)
print(max_dimension)
width_padding = max_dimension - image_width_height[0]
height_padding = max_dimension - image_width_height[1]
print("Width padding: {}, Height padding: {}".format(width_padding, height_padding))
left_padding = width_padding // 2
right_padding = width_padding - left_padding
top_padding = height_padding // 2
bottom_padding = height_padding - top_padding
print("Left padding: {}, Top padding {}, Right padding: {}, Bottom padding {}".format(
left_padding,
top_padding,
right_padding,
bottom_padding))
from PIL import ImageOps
padding = (
left_padding,
top_padding,
right_padding,
bottom_padding
)
image = Image.open("running-shoe-371624_1920.jpg")
padded_image = ImageOps.expand(image, padding, (255,255,255,255))
image.close()
_ = plt.imshow(padded_image)
padded_image.size
desired_size = (200, 200)
resized_image = padded_image.resize(desired_size, Image.ANTIALIAS)
_ = plt.imshow(resized_image)
resized_image.size
# Upload the file you just downloaded from your computer to the Colab runtime
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
### YOUR CODE HERE ###
# Open the image file and plot the image
# Print the dimension of the image
# Find the longer dimension
# Compute the delta width and height
# Compute the padding amounts
# Pad and plot the image
# Resize and plot the image
### YOUR CODE HERE ###
# Flip the image horizontally (left to right)
# Plot the image to show the image is indeed flipped horizontally
### YOUR CODE HERE ###
# Save newly generated image to the folder
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introducing Principal Component Analysis
Step2: We can see that there is a definite trend in the data. What PCA seeks to do is to find the Principal Axes in the data, and explain how important those axes are in describing the data distribution
Step3: To see what these numbers mean, let's view them as vectors plotted on top of the data
Step4: Notice that one vector is longer than the other. In a sense, this tells us that that direction in the data is somehow more "important" than the other direction.
Step5: By specifying that we want to throw away 5% of the variance, the data is now compressed by a factor of 50%! Let's see what the data look like after this compression
Step6: The light points are the original data, while the dark points are the projected version. We see that after truncating 5% of the variance of this dataset and then reprojecting it, the "most important" features of the data are maintained, and we've compressed the data by 50%!
Step7: This gives us an idea of the relationship between the digits. Essentially, we have found the optimal stretch and rotation in 64-dimensional space that allows us to see the layout of the digits, without reference to the labels.
Step8: But the pixel-wise representation is not the only choice. We can also use other basis functions, and write something like
Step9: Here we see that with only six PCA components, we recover a reasonable approximation of the input!
Step10: Here we see that our two-dimensional projection loses a lot of information (as measured by the explained variance) and that we'd need about 20 components to retain 90% of the variance. Looking at this plot for a high-dimensional dataset can help you understand the level of redundancy present in multiple observations.
Step11: Let's take another look at this by using IPython's interact functionality to view the reconstruction of several images at once
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(1)
X = np.dot(np.random.random(size=(2, 2)), np.random.normal(size=(2, 200))).T
plt.plot(X[:, 0], X[:, 1], 'o')
plt.axis('equal');
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
print(pca.explained_variance_ratio_)
print(pca.components_)
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.5)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
plt.plot([0, v[0]], [0, v[1]], '-k', lw=3)
plt.axis('equal');
clf = PCA(0.95) # keep 95% of variance
X_trans = clf.fit_transform(X)
print(X.shape)
print(X_trans.shape)
X_new = clf.inverse_transform(X_trans)
plt.plot(X[:, 0], X[:, 1], 'o', alpha=0.2)
plt.plot(X_new[:, 0], X_new[:, 1], 'ob', alpha=0.8)
plt.axis('equal');
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
y = digits.target
pca = PCA(2) # project from 64 to 2 dimensions
Xproj = pca.fit_transform(X)
print(X.shape)
print(Xproj.shape)
plt.scatter(Xproj[:, 0], Xproj[:, 1], c=y, edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('nipy_spectral', 10))
plt.colorbar();
from fig_code.figures import plot_image_components
sns.set_style('white')
plot_image_components(digits.data[0])
from fig_code.figures import plot_pca_interactive
plot_pca_interactive(digits.data)
sns.set()
pca = PCA().fit(X)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
fig, axes = plt.subplots(8, 8, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
pca = PCA(i + 1).fit(X)
im = pca.inverse_transform(pca.transform(X[20:21]))
ax.imshow(im.reshape((8, 8)), cmap='binary')
ax.text(0.95, 0.05, 'n = {0}'.format(i + 1), ha='right',
transform=ax.transAxes, color='green')
ax.set_xticks([])
ax.set_yticks([])
from IPython.html.widgets import interact
def plot_digits(n_components):
fig = plt.figure(figsize=(8, 8))
plt.subplot(1, 1, 1, frameon=False, xticks=[], yticks=[])
nside = 10
pca = PCA(n_components).fit(X)
Xproj = pca.inverse_transform(pca.transform(X[:nside ** 2]))
Xproj = np.reshape(Xproj, (nside, nside, 8, 8))
total_var = pca.explained_variance_ratio_.sum()
im = np.vstack([np.hstack([Xproj[i, j] for j in range(nside)])
for i in range(nside)])
plt.imshow(im)
plt.grid(False)
plt.title("n = {0}, variance = {1:.2f}".format(n_components, total_var),
size=18)
plt.clim(0, 16)
interact(plot_digits, n_components=[1, 64], nside=[1, 8]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
a, b = 0.1, 0.9 # range point
x_min, x_max = 0, 255
return a + (image_data - x_min)*(b - a) / (x_max - x_min)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
# TODO: Set the features and labels tensors
# features =
# labels =
weights = tf.Variable(tf.truncated_normal((features_count, labels_count)))
bias = tf.Variable(tf.zeros(labels_count))
# TODO: Set the weights and biases tensors
# weights =
# biases =
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
# epochs =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, generate pseudodata. The bulk of the data will be linear with noise distributed normally, but additionally several outliers will be interjected.
Step2: Plot this data. The three data points in the top left are the interjected data.
Step3: To highlight the problem, first fit a standard normally-distributed linear regression.
Step4: Remember, the true intercept was 1, the true slope was 2. The recovered intercept is much higher, and the slope is much lower, so the influence of the outliers is apparent.
Step5: The recovered regression line, as well as the $0.5\sigma$ and $1\sigma$ bands are shown.
Step6: As we can see, the tails of the Student T are much larger, which means values far from the mean are more likely when compared to the normal distribution.
Step7: In Bambi, the way to specify a regression with Student T distributed data is by passing "t" to the family parameter of a Model.
Step8: Note the new parameter in the model, y_nu. This is the aforementioned degrees of freedom. If this number were very high, we would expect it to be well described by a normal distribution. However, the HDI of this spans from 1.5 to 3.7, meaning that the tails are much heavier than a normal distribution. As a result of the heavier tails, y_sigma has also dropped precipitously from the normal model, meaning the oversized uncertainty bands from above have shrunk.
Step9: Here we can see the mean recovered values of both the slope and intercept are far closer to the true values using the robust regression model compared to the normally distributed one.
Step10: This is much better. The true and recovered regression lines are much closer, and the uncertainty bands are appropriate sized. The effect of the outliers is not entirely gone, the recovered line still slightly differs from the true line, but the effect is far smaller, which is a result of the Student T likelihood function ascribing a higher probability to outliers than the normal distribution. Additionally, this inference is based on sampling methods, so it is expected to have small differences (especially given a relatively small number of samples).
Step11: Here it is quite obvious that the Student T model is much better, due to having a clearly larger value of LOO.
|
<ASSISTANT_TASK:>
Python Code:
import arviz as az
import bambi as bmb
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
az.style.use("arviz-darkgrid")
np.random.seed(1111)
size = 100
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
# y = a + b*x
true_regression_line = true_intercept + true_slope * x
# add noise
y = true_regression_line + np.random.normal(scale=0.5, size=size)
# Add outliers
x_out = np.append(x, [0.1, 0.15, 0.2])
y_out = np.append(y, [8, 6, 9])
data = pd.DataFrame({
"x": x_out,
"y": y_out
})
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, xlabel="x", ylabel="y", title="Generated data and underlying model")
ax.plot(x_out, y_out, "x", label="sampled data")
ax.plot(x, true_regression_line, label="true regression line", lw=2.0)
plt.legend(loc=0);
# Note, "gaussian" is the default argument for family. Added to be explicit.
gauss_model = bmb.Model("y ~ x", data, family="gaussian")
gauss_fitted = gauss_model.fit(draws=2000)
gauss_model.predict(gauss_fitted, kind="pps", draws=1000)
az.summary(gauss_fitted)
plt.figure(figsize=(7, 5))
# Plot Data
plt.plot(x_out, y_out, "x", label="data")
# Plot recovered linear regression
x_range = np.linspace(min(x_out), max(x_out), 2000)
y_pred = gauss_fitted.posterior.x.values.mean() * x_range + gauss_fitted.posterior.Intercept.values.mean()
plt.plot(x_range, y_pred,
color="black",linestyle="--",
label="Recovered regression line"
)
# Plot HDIs
for interval in [0.38, 0.68]:
az.plot_hdi(x_out, gauss_fitted.posterior_predictive.y,
hdi_prob=interval, color="firebrick")
# Plot true regression line
plt.plot(x, true_regression_line,
label="True regression line", lw=2.0, color="black")
plt.legend(loc=0);
normal_data = np.random.normal(loc=0, scale=1, size=100_000)
t_data = np.random.standard_t(df=1, size=100_000)
bins = np.arange(-8,8,0.15)
plt.hist(normal_data,
bins=bins, density=True,
alpha=0.6,
label="Normal"
)
plt.hist(t_data,
bins=bins,density=True,
alpha=0.6,
label="Student T"
)
plt.xlabel("x")
plt.ylabel("Probability density")
plt.xlim(-8,8)
plt.legend();
bins = np.arange(-8,8,0.15)
for ndof in [0.1, 1, 10]:
t_data = np.random.standard_t(df=ndof, size=100_000)
plt.hist(t_data,
bins=bins,density=True,
label=f"$\\nu = {ndof}$",
histtype="step"
)
plt.hist(normal_data,
bins=bins, density=True,
histtype="step",
label="Normal"
)
plt.xlabel("x")
plt.ylabel("Probability density")
plt.xlim(-6,6)
plt.legend();
t_model = bmb.Model("y ~ x", data, family="t")
t_fitted = t_model.fit(draws=2000)
t_model.predict(t_fitted, kind="pps", draws=100)
az.summary(t_fitted)
def get_slope_intercept(mod):
return (
mod.posterior.x.values.mean(),
mod.posterior.Intercept.values.mean()
)
gauss_slope, gauss_int = get_slope_intercept(gauss_fitted)
t_slope, t_int = get_slope_intercept(t_fitted)
pd.DataFrame({
"Model":["True","Normal","T"],
"Slope":[2, gauss_slope, t_slope],
"Intercept": [1, gauss_int, t_int]
}).set_index("Model").T.round(decimals=2)
plt.figure(figsize=(7, 5))
# Plot Data
plt.plot(x_out, y_out, "x", label="data")
# Plot recovered robust linear regression
x_range = np.linspace(min(x_out), max(x_out), 2000)
y_pred = t_fitted.posterior.x.values.mean() * x_range + t_fitted.posterior.Intercept.values.mean()
plt.plot(x_range, y_pred,
color="black",linestyle="--",
label="Recovered regression line"
)
# Plot HDIs
for interval in [0.05, 0.38, 0.68]:
az.plot_hdi(x_out, t_fitted.posterior_predictive.y,
hdi_prob=interval, color="firebrick")
# Plot true regression line
plt.plot(x, true_regression_line,
label="true regression line", lw=2.0, color="black")
plt.legend(loc=0);
models = {
"gaussian": gauss_fitted,
"Student T": t_fitted
}
df_compare = az.compare(models)
df_compare
az.plot_compare(df_compare, insample_dev=False);
%load_ext watermark
%watermark -n -u -v -iv -w
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='inputs_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='inputs_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out:
'''
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(inputs=z, activation=None,units=n_units)
# Leaky ReLU
h1 = tf.maximum(h1 * alpha,h1)
# Logits and tanh output
logits = tf.layers.dense(inputs=h1, activation=None, units=out_dim)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(inputs=x, activation=None,units=n_units)
# Leaky ReLU
h1 = tf.maximum(h1 * alpha,h1)
logits = tf.layers.dense(inputs=h1, activation=None, units=1)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, out_dim=input_size, n_units=g_hidden_size, reuse=False, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(x=input_real, n_units=d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(x=g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1-smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))
d_loss = tf.add(d_loss_real, d_loss_fake)
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image.n_px
Step2: Image.shape
Step3: Image.aspect
Step4: Basic types
Step5: Images
Step6: Segmentation masks
Step7: Points
Step8: Points are expected to come as an array/tensor of shape (n,2) or as a list of lists with two elements. Unless you change the defaults in PointScaler (see later on), coordinates should go from 0 to width/height, with the first one being the column index (so from 0 to width) and the second one being the row index (so from 0 to height).
Step9: Bounding boxes
Step10: Test get_annotations on the coco_tiny dataset against both image filenames and bounding box labels.
Step11: Bounding boxes are expected to come as tuple with an array/tensor of shape (n,4) or as a list of lists with four elements and a list of corresponding labels. Unless you change the defaults in PointScaler (see later on), coordinates for each bounding box should go from 0 to width/height, with the following convention
Step12: Basic Transforms
Step13: Any data augmentation transform that runs on PIL Images must be run before this transform.
Step14: Let's confirm we can pipeline this with PILImage.create.
Step15: To work with data augmentation, and in particular the grid_sample method, points need to be represented with coordinates going from -1 to 1 (-1 being top or left, 1 bottom or right), which will be done unless you pass do_scale=False. We also need to make sure they are following our convention of points being x,y coordinates, so pass along y_first=True if you have your data in an y,x format to add a flip.
Step16: To work with data augmentation, and in particular the grid_sample method, points need to be represented with coordinates going from -1 to 1 (-1 being top or left, 1 bottom or right), which will be done unless you pass do_scale=False. We also need to make sure they are following our convention of points being x,y coordinates, so pass along y_first=True if you have your data in an y,x format to add a flip.
Step17: Export -
|
<ASSISTANT_TASK:>
Python Code:
#|export
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
mnist_stats = ([0.131], [0.308])
im = Image.open(TEST_IMAGE).resize((30,20))
#|export
if not hasattr(Image,'_patched'):
_old_sz = Image.Image.size.fget
@patch(as_prop=True)
def size(x:Image.Image): return fastuple(_old_sz(x))
Image._patched = True
#|export
@patch(as_prop=True)
def n_px(x: Image.Image): return x.size[0] * x.size[1]
test_eq(im.n_px, 30*20)
#|export
@patch(as_prop=True)
def shape(x: Image.Image): return x.size[1],x.size[0]
test_eq(im.shape, (20,30))
#|export
@patch(as_prop=True)
def aspect(x: Image.Image): return x.size[0]/x.size[1]
test_eq(im.aspect, 30/20)
#|export
@patch
def reshape(x: Image.Image, h, w, resample=0):
"`resize` `x` to `(w,h)`"
return x.resize((w,h), resample=resample)
show_doc(Image.Image.reshape)
test_eq(im.reshape(12,10).shape, (12,10))
#|export
@patch
def to_bytes_format(im:Image.Image, format='png'):
"Convert to bytes, default to PNG format"
arr = io.BytesIO()
im.save(arr, format=format)
return arr.getvalue()
show_doc(Image.Image.to_bytes_format)
#|export
@patch
def to_thumb(self:Image.Image, h, w=None):
"Same as `thumbnail`, but uses a copy"
if w is None: w=h
im = self.copy()
im.thumbnail((w,h))
return im
show_doc(Image.Image.to_thumb)
#|export
@patch
def resize_max(x: Image.Image, resample=0, max_px=None, max_h=None, max_w=None):
"`resize` `x` to `max_px`, or `max_h`, or `max_w`"
h,w = x.shape
if max_px and x.n_px>max_px: h,w = fastuple(h,w).mul(math.sqrt(max_px/x.n_px))
if max_h and h>max_h: h,w = (max_h ,max_h*w/h)
if max_w and w>max_w: h,w = (max_w*h/w,max_w )
return x.reshape(round(h), round(w), resample=resample)
test_eq(im.resize_max(max_px=20*30).shape, (20,30))
test_eq(im.resize_max(max_px=300).n_px, 294)
test_eq(im.resize_max(max_px=500, max_h=10, max_w=20).shape, (10,15))
test_eq(im.resize_max(max_h=14, max_w=15).shape, (10,15))
test_eq(im.resize_max(max_px=300, max_h=10, max_w=25).shape, (10,15))
show_doc(Image.Image.resize_max)
#|export
def to_image(x):
"Convert a tensor or array to a PIL int8 Image"
if isinstance(x,Image.Image): return x
if isinstance(x,Tensor): x = to_np(x.permute((1,2,0)))
if x.dtype==np.float32: x = (x*255).astype(np.uint8)
return Image.fromarray(x, mode=['RGB','CMYK'][x.shape[0]==4])
#|export
def load_image(fn, mode=None):
"Open and load a `PIL.Image` and convert to `mode`"
im = Image.open(fn)
im.load()
im = im._new(im.im)
return im.convert(mode) if mode else im
#|export
def image2tensor(img):
"Transform image to byte tensor in `c*h*w` dim order."
res = tensor(img)
if res.dim()==2: res = res.unsqueeze(-1)
return res.permute(2,0,1)
#|export
class PILBase(Image.Image, metaclass=BypassNewMeta):
_bypass_type=Image.Image
_show_args = {'cmap':'viridis'}
_open_args = {'mode': 'RGB'}
@classmethod
def create(cls, fn:(Path,str,Tensor,ndarray,bytes), **kwargs)->None:
"Open an `Image` from path `fn`"
if isinstance(fn,TensorImage): fn = fn.permute(1,2,0).type(torch.uint8)
if isinstance(fn, TensorMask): fn = fn.type(torch.uint8)
if isinstance(fn,Tensor): fn = fn.numpy()
if isinstance(fn,ndarray): return cls(Image.fromarray(fn))
if isinstance(fn,bytes): fn = io.BytesIO(fn)
return cls(load_image(fn, **merge(cls._open_args, kwargs)))
def show(self, ctx=None, **kwargs):
"Show image using `merge(self._show_args, kwargs)`"
return show_image(self, ctx=ctx, **merge(self._show_args, kwargs))
def __repr__(self): return f'{self.__class__.__name__} mode={self.mode} size={"x".join([str(d) for d in self.size])}'
#|export
class PILImage(PILBase): pass
#|export
class PILImageBW(PILImage): _show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
im = PILImage.create(TEST_IMAGE)
test_eq(type(im), PILImage)
test_eq(im.mode, 'RGB')
test_eq(str(im), 'PILImage mode=RGB size=1200x803')
im.resize((64,64))
ax = im.show(figsize=(1,1))
test_fig_exists(ax)
timg = TensorImage(image2tensor(im))
tpil = PILImage.create(timg)
tpil.resize((64,64))
#|hide
test_eq(np.array(im), np.array(tpil))
#|export
class PILMask(PILBase): _open_args,_show_args = {'mode':'L'},{'alpha':0.5, 'cmap':'tab20'}
im = PILMask.create(TEST_IMAGE)
test_eq(type(im), PILMask)
test_eq(im.mode, 'L')
test_eq(str(im), 'PILMask mode=L size=1200x803')
#|export
OpenMask = Transform(PILMask.create)
OpenMask.loss_func = CrossEntropyLossFlat(axis=1)
PILMask.create = OpenMask
mnist = untar_data(URLs.MNIST_TINY)
fns = get_image_files(mnist)
mnist_fn = TEST_IMAGE_BW
timg = Transform(PILImageBW.create)
mnist_img = timg(mnist_fn)
test_eq(mnist_img.size, (28,28))
assert isinstance(mnist_img, PILImageBW)
mnist_img
#|export
class AddMaskCodes(Transform):
"Add the code metadata to a `TensorMask`"
def __init__(self, codes=None):
self.codes = codes
if codes is not None: self.vocab,self.c = codes,len(codes)
def decodes(self, o:TensorMask):
if self.codes is not None: o.codes=self.codes
return o
camvid = untar_data(URLs.CAMVID_TINY)
fns = get_image_files(camvid/'images')
cam_fn = fns[0]
mask_fn = camvid/'labels'/f'{cam_fn.stem}_P{cam_fn.suffix}'
cam_img = PILImage.create(cam_fn)
test_eq(cam_img.size, (128,96))
tmask = Transform(PILMask.create)
mask = tmask(mask_fn)
test_eq(type(mask), PILMask)
test_eq(mask.size, (128,96))
_,axs = plt.subplots(1,3, figsize=(12,3))
cam_img.show(ctx=axs[0], title='image')
mask.show(alpha=1, ctx=axs[1], vmin=1, vmax=30, title='mask')
cam_img.show(ctx=axs[2], title='superimposed')
mask.show(ctx=axs[2], vmin=1, vmax=30);
#|export
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, img_size=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
#|export
TensorPointCreate = Transform(TensorPoint.create)
TensorPointCreate.loss_func = MSELossFlat()
TensorPoint.create = TensorPointCreate
pnt_img = TensorImage(mnist_img.resize((28,35)))
pnts = np.array([[0,0], [0,35], [28,0], [28,35], [9, 17]])
tfm = Transform(TensorPoint.create)
tpnts = tfm(pnts)
test_eq(tpnts.shape, [5,2])
test_eq(tpnts.dtype, torch.float32)
ctx = pnt_img.show(figsize=(1,1), cmap='Greys')
tpnts.show(ctx=ctx);
#|export
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {o['id']:o['name'] for o in annot_dict['categories']}
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]])
id2cats[o['image_id']].append(classes[o['category_id']])
id2images = {o['id']:ifnone(prefix, '') + o['file_name'] for o in annot_dict['images'] if o['id'] in id2bboxes}
ids = list(id2images.keys())
return [id2images[k] for k in ids], [(id2bboxes[k], id2cats[k]) for k in ids]
coco = untar_data(URLs.COCO_TINY)
test_images, test_lbl_bbox = get_annotations(coco/'train.json')
annotations = json.load(open(coco/'train.json'))
categories, images, annots = map(lambda x:L(x),annotations.values())
test_eq(test_images, images.attrgot('file_name'))
def bbox_lbls(file_name):
img = images.filter(lambda img:img['file_name']==file_name)[0]
bbs = annots.filter(lambda a:a['image_id'] == img['id'])
i2o = {k['id']:k['name'] for k in categories}
lbls = [i2o[cat] for cat in bbs.attrgot('category_id')]
bboxes = [[bb[0],bb[1], bb[0]+bb[2], bb[1]+bb[3]] for bb in bbs.attrgot('bbox')]
return [bboxes, lbls]
for idx in random.sample(range(len(images)),5):
test_eq(test_lbl_bbox[idx], bbox_lbls(test_images[idx]))
#|export
from matplotlib import patches, patheffects
#|export
def _draw_outline(o, lw):
o.set_path_effects([patheffects.Stroke(linewidth=lw, foreground='black'), patheffects.Normal()])
def _draw_rect(ax, b, color='white', text=None, text_size=14, hw=True, rev=False):
lx,ly,w,h = b
if rev: lx,ly,w,h = ly,lx,h,w
if not hw: w,h = w-lx,h-ly
patch = ax.add_patch(patches.Rectangle((lx,ly), w, h, fill=False, edgecolor=color, lw=2))
_draw_outline(patch, 4)
if text is not None:
patch = ax.text(lx,ly, text, verticalalignment='top', color=color, fontsize=text_size, weight='bold')
_draw_outline(patch,1)
#|export
class TensorBBox(TensorPoint):
"Basic type for a tensor of bounding boxes in an image"
@classmethod
def create(cls, x, img_size=None)->None: return cls(tensor(x).view(-1, 4).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
x = self.view(-1,4)
for b in x: _draw_rect(ctx, b, hw=False, **kwargs)
return ctx
#|export
class LabeledBBox(L):
"Basic type for a list of bounding boxes in an image"
def show(self, ctx=None, **kwargs):
for b,l in zip(self.bbox, self.lbl):
if l != '#na#': ctx = retain_type(b, self.bbox).show(ctx=ctx, text=l)
return ctx
bbox,lbl = add_props(lambda i,self: self[i])
coco = untar_data(URLs.COCO_TINY)
images, lbl_bbox = get_annotations(coco/'train.json')
idx=2
coco_fn,bbox = coco/'train'/images[idx],lbl_bbox[idx]
coco_img = timg(coco_fn)
tbbox = LabeledBBox(TensorBBox(bbox[0]), bbox[1])
ctx = coco_img.show(figsize=(3,3), cmap='Greys')
tbbox.show(ctx=ctx);
#|export
PILImage ._tensor_cls = TensorImage
PILImageBW._tensor_cls = TensorImageBW
PILMask ._tensor_cls = TensorMask
#|export
@ToTensor
def encodes(self, o:PILBase): return o._tensor_cls(image2tensor(o))
@ToTensor
def encodes(self, o:PILMask): return o._tensor_cls(image2tensor(o)[0])
tfm = ToTensor()
print(tfm)
print(type(mnist_img))
print(type(tfm(mnist_img)))
tfm = ToTensor()
test_eq(tfm(mnist_img).shape, (1,28,28))
test_eq(type(tfm(mnist_img)), TensorImageBW)
test_eq(tfm(mask).shape, (96,128))
test_eq(type(tfm(mask)), TensorMask)
pipe_img = Pipeline([PILImageBW.create, ToTensor()])
img = pipe_img(mnist_fn)
test_eq(type(img), TensorImageBW)
pipe_img.show(img, figsize=(1,1));
def _cam_lbl(x): return mask_fn
cam_tds = Datasets([cam_fn], [[PILImage.create, ToTensor()], [_cam_lbl, PILMask.create, ToTensor()]])
show_at(cam_tds, 0);
#|export
def _scale_pnts(y, sz, do_scale=True, y_first=False):
if y_first: y = y.flip(1)
res = y * 2/tensor(sz).float() - 1 if do_scale else y
return TensorPoint(res, img_size=sz)
def _unscale_pnts(y, sz): return TensorPoint((y+1) * tensor(sz).float()/2, img_size=sz)
#|export
class PointScaler(Transform):
"Scale a tensor representing points"
order = 1
def __init__(self, do_scale=True, y_first=False): self.do_scale,self.y_first = do_scale,y_first
def _grab_sz(self, x):
self.sz = [x.shape[-1], x.shape[-2]] if isinstance(x, Tensor) else x.size
return x
def _get_sz(self, x): return getattr(x, 'img_size') if self.sz is None else self.sz
def setups(self, dl):
res = first(dl.do_item(None), risinstance(TensorPoint))
if res is not None: self.c = res.numel()
def encodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def decodes(self, x:(PILBase,TensorImageBase)): return self._grab_sz(x)
def encodes(self, x:TensorPoint): return _scale_pnts(x, self._get_sz(x), self.do_scale, self.y_first)
def decodes(self, x:TensorPoint): return _unscale_pnts(x.view(-1, 2), self._get_sz(x))
def _pnt_lbl(x): return TensorPoint.create(pnts)
def _pnt_open(fn): return PILImage(PILImage.create(fn).resize((28,35)))
pnt_tds = Datasets([mnist_fn], [_pnt_open, [_pnt_lbl]])
pnt_tdl = TfmdDL(pnt_tds, bs=1, after_item=[PointScaler(), ToTensor()])
test_eq(pnt_tdl.after_item.c, 10)
#|hide
#Check the size was grabbed by PointScaler and added to y
tfm = PointScaler()
tfm.as_item=False
x,y = tfm(pnt_tds[0])
test_eq(tfm.sz, x.size)
test_eq(y.img_size, x.size)
x,y = pnt_tdl.one_batch()
#Scaling and flipping properly done
#NB: we added a point earlier at (9,17); formula below scales to (-1,1) coords
test_close(y[0], tensor([[-1., -1.], [-1., 1.], [1., -1.], [1., 1.], [9/14-1, 17/17.5-1]]))
a,b = pnt_tdl.decode_batch((x,y))[0]
test_eq(b, tensor(pnts).float())
#Check types
test_eq(type(x), TensorImage)
test_eq(type(y), TensorPoint)
test_eq(type(a), TensorImage)
test_eq(type(b), TensorPoint)
test_eq(b.img_size, (28,35)) #Automatically picked the size of the input
pnt_tdl.show_batch(figsize=(2,2), cmap='Greys');
#|export
class BBoxLabeler(Transform):
def setups(self, dl): self.vocab = dl.vocab
def decode (self, x, **kwargs):
self.bbox,self.lbls = None,None
return self._call('decodes', x, **kwargs)
def decodes(self, x:TensorMultiCategory):
self.lbls = [self.vocab[a] for a in x]
return x if self.bbox is None else LabeledBBox(self.bbox, self.lbls)
def decodes(self, x:TensorBBox):
self.bbox = x
return self.bbox if self.lbls is None else LabeledBBox(self.bbox, self.lbls)
#|export
#LabeledBBox can be sent in a tl with MultiCategorize (depending on the order of the tls) but it is already decoded.
@MultiCategorize
def decodes(self, x:LabeledBBox): return x
#|export
@PointScaler
def encodes(self, x:TensorBBox):
pnts = self.encodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
@PointScaler
def decodes(self, x:TensorBBox):
pnts = self.decodes(cast(x.view(-1,2), TensorPoint))
return cast(pnts.view(-1, 4), TensorBBox)
def _coco_bb(x): return TensorBBox.create(bbox[0])
def _coco_lbl(x): return bbox[1]
coco_tds = Datasets([coco_fn], [PILImage.create, [_coco_bb], [_coco_lbl, MultiCategorize(add_na=True)]], n_inp=1)
coco_tdl = TfmdDL(coco_tds, bs=1, after_item=[BBoxLabeler(), PointScaler(), ToTensor()])
#|hide
#Check the size was grabbed by PointScaler and added to y
tfm = PointScaler()
tfm.as_item=False
x,y,z = tfm(coco_tds[0])
test_eq(tfm.sz, x.size)
test_eq(y.img_size, x.size)
Categorize(add_na=True)
coco_tds.tfms
x,y,z
x,y,z = coco_tdl.one_batch()
test_close(y[0], -1+tensor(bbox[0])/64)
test_eq(z[0], tensor([1,1,1]))
a,b,c = coco_tdl.decode_batch((x,y,z))[0]
test_close(b, tensor(bbox[0]).float())
test_eq(c.bbox, b)
test_eq(c.lbl, bbox[1])
#Check types
test_eq(type(x), TensorImage)
test_eq(type(y), TensorBBox)
test_eq(type(z), TensorMultiCategory)
test_eq(type(a), TensorImage)
test_eq(type(b), TensorBBox)
test_eq(type(c), LabeledBBox)
test_eq(y.img_size, (128,128))
coco_tdl.show_batch();
#|hide
#test other direction works too
coco_tds = Datasets([coco_fn], [PILImage.create, [_coco_lbl, MultiCategorize(add_na=True)], [_coco_bb]])
coco_tdl = TfmdDL(coco_tds, bs=1, after_item=[BBoxLabeler(), PointScaler(), ToTensor()])
x,y,z = coco_tdl.one_batch()
test_close(z[0], -1+tensor(bbox[0])/64)
test_eq(y[0], tensor([1,1,1]))
a,b,c = coco_tdl.decode_batch((x,y,z))[0]
test_eq(b, bbox[1])
test_close(c.bbox, tensor(bbox[0]).float())
test_eq(c.lbl, b)
#Check types
test_eq(type(x), TensorImage)
test_eq(type(y), TensorMultiCategory)
test_eq(type(z), TensorBBox)
test_eq(type(a), TensorImage)
test_eq(type(b), MultiCategory)
test_eq(type(c), LabeledBBox)
test_eq(z.img_size, (128,128))
#|hide
from nbdev.export import notebook2script
notebook2script()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I've already pulled down The Sunned House from Project Gutenberg (https
Step2: Now we'll go through every sentence in the story and get the 'sentiment' of each one. Sentiment analysis in TextBlob returns a polarity and a subjectivity number. Here we'll just extract the polarity
Step3: Now we instantiate a dataframe by pulling in that csv
Step4: Let's plot our data! First let's just look at how the sentiment polarity changes from sentence to sentence
Step5: Very up and down from sentence to sentence! Some dark sentences (the ones below 0.0 polarity), some positive sentences (greater than 0.0 polarity) but overall kind of hovers around 0.0 polarity.
Step6: So, now let's plot the results-- How does the sentiment of Lovecraft's story change over the course of the book?
Step7: The climax of Lovecraft's story appears to be around sentence 255 or so. Things really drop off at that point and get dark, according to the TextBlob sentiment analysis.
Step8: Let's get some basic statistical information about sentence seniments
Step9: For fun, let's just see what TextBlob thinks are the most negatively polar sentences in the short story
Step10: Let's take a quick peak at word frequencies by using the re and collections library. Here we'll use the Counter() and most_common() methods to return a list of tuples of the most common words in the story
|
<ASSISTANT_TASK:>
Python Code:
from textblob import TextBlob
import pandas as pd
import pylab as plt
import collections
import re
%matplotlib inline
with open (r'lovecraft.txt', 'r') as myfile:
shunned = myfile.read()
ushunned = unicode(shunned, 'utf-8')
tb = TextBlob(ushunned)
paragraph = tb.sentences
i = -1
for sentence in paragraph:
i += 1
pol = sentence.sentiment.polarity
if i == 0:
write_type = 'w'
with open('shunned.csv', write_type) as text_file:
header = 'number,polarity\n'
text_file.write(str(header))
write_type = 'a'
with open('shunned.csv', write_type) as text_file:
newline = str(i) + ',' + str(pol) + '\n'
text_file.write(str(newline))
df = pd.DataFrame.from_csv('shunned.csv')
df.polarity.plot(figsize=(12,5), color='b', title='Sentiment Polarity for HP Lovecraft\'s The Shunned House')
plt.xlabel('Sentence number')
plt.ylabel('Sentiment polarity')
df['cum_sum'] = df.polarity.cumsum()
df.cum_sum.plot(figsize=(12,5), color='r',
title='Sentiment Polarity cumulative summation for HP Lovecraft\'s The Shunned House')
plt.xlabel('Sentence number')
plt.ylabel('Cumulative sum of sentiment polarity')
df.head()
df.describe()
for i in df[df.polarity < -0.5].index:
print i, tb.sentences[i]
words = re.findall(r'\w+', open('lovecraft.txt').read().lower())
collections.Counter(words).most_common(10)
words = re.findall(r'\w+', ushunned.lower())
common = collections.Counter(words).most_common()
df_freq = pd.DataFrame(common, columns=['word', 'freq'])
df_freq.set_index('word').head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 2
Step2: Exercise 3
|
<ASSISTANT_TASK:>
Python Code:
import cPickle as pickle
with open('GPD1_seq.fasta', 'r') as f:
lines = f.readlines()
a = 0
t = 0
g = 0
c = 0
for line in lines:
if line.startswith('>'):
continue
else:
a = a + line.count('A')
t = t + line.count('T')
g = g + line.count('G')
c = c + line.count('C')
# print 'Die Sequenz hat', a, '"A"s,', t, '"T"s,', g, '"G"s und', c, '"C"s.'
def compute_nt_composition(sequence):
global a, t, c, g
data = {'A' : a, 'T' : t, 'C' : c, 'G' : g}
for line in lines:
if line.startswith('>'):
continue
else:
a = a + line.count('A')
t = t + line.count('T')
g = g + line.count('G')
c = c + line.count('C')
print 'Die Sequenz hat', a, '"A"s,', t, '"T"s,', g, '"G"s und', c, '"C"s.'
compute_nt_composition(line)
print data['A']
%matplotlib inline
from pylab import *
import scipy.integrate
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.Drucke alle die Zahlen von 0 bis 4 aus
Step2: 4.Baue einen For-Loop, indem Du alle geraden Zahlen ausdruckst, die tiefer sind als 237.
Step3: 5.Addiere alle Zahlen in der Liste
Step4: 6.Addiere nur die Zahlen, die gerade sind
Step5: 7.Drucke mit einem For Loop 5 Mal hintereinander Hello World aus
Step6: 8.Entwickle ein Programm, das alle Nummern zwischen 2000 und 3200 findet, die durch 7, aber nicht durch 5 teilbar sind. Das Ergebnis sollte auf einer Zeile ausgedruckt werden. Tipp
Step7: 9.Schreibe einen For Loop, der die Nummern in der folgenden Liste von int in str verwandelt.
Step8: 10.Schreibe nun ein Programm, das alle Ziffern 4 mit dem Buchstaben A ersetzte, alle Ziffern 5 mit dem Buchtaben B.
|
<ASSISTANT_TASK:>
Python Code:
primzweibissieben = [2, 3, 5, 7]
for prime in primzweibissieben:
print(prime)
for x in range(5):
print(x)
for x in range(3, 6):
print(x)
numbers = [
951, 402, 984, 651, 360, 69, 408, 319, 601, 485, 980, 507, 725, 547, 544,
615, 83, 165, 141, 501, 263, 617, 865, 575, 219, 390, 984, 592, 236, 105, 942, 941,
386, 462, 47, 418, 907, 344, 236, 375, 823, 566, 597, 978, 328, 615, 953, 345,
399, 162, 758, 219, 918, 237, 412, 566, 826, 248, 866, 950, 626, 949, 687, 217,
815, 67, 104, 58, 512, 24, 892, 894, 767, 553, 81, 379, 843, 831, 445, 742, 717,
958, 609, 842, 451, 688, 753, 854, 685, 93, 857, 440, 380, 126, 721, 328, 753, 470,
743, 527
]
# Hier kommt Dein Code:
new_lst = [] # braucht es nicht
for elem in numbers:
if elem < 238 and elem % 2 == 0:
new_lst.append(elem)
else:#braucht es nicht
continue #braucht es nicht
print(new_lst) #aber dann muss print in der if-clause sein(eingezogen)
#Lรถsung:
sum(numbers)
count = 0
for x in numbers:
count = count + x
print(count)
#Lรถsung:
evennumber = []
for elem in numbers:
if elem % 2 == 0:
evennumber.append(elem)
sum(evennumber)
Satz = ['Hello World', 'Hello World','Hello World','Hello World','Hello World']
for elem in Satz:
print(elem)
hello = 'Hello World'
for x in range(5):
print(hello)
#Lรถsung
l=[]
for i in range(2000, 3201):
if (i % 7==0) and (i % 5!=0):#!= entspricht nichtgleich
l.append(str(i))
print(','.join(l))#join verbindet alle elemente einer liste, hier setze ich noch ein komma dazwischen
lst = range(45,99)
newlst = []
for i in lst:
i = str(i)
newlst.append(i)
print(newlst)#return kommt immer in Funktionen!
newnewlist = [] #Replace ist ein wichtiger Befehl, da man so grosse Datenmengen bereinigt. Man ersetzt dann einfach durch ''
for elem in newlst:
if '4' in elem:
elem = elem.replace('4', 'A')
if '5' in elem:
elem = elem.replace('5', 'B')
newnewlist.append(elem)
newnewlist
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ่ฎก็ฎๅๅๅๅจๅๆฏ๏ผ
Step2: ๅฐๅฎ้
ๅผๅฝ็ฎๆ6kvไธบๅบๅ็ๅฝ็ฎๅผ๏ผ
Step3: 2.10kvไธบๅบๅ๏ผๅ ไธบไนๅ็ฎๅบไบๅฎ้
ๅผ๏ผๅ ๆญคๅช้่ฆ้็ฎk
Step4: 3.ไปฅ110kvไธบๅบๅผ
Step5: 10๏ผ ๅฏนไธ้ขๆ็คบ็ตๅ็ณป็ป๏ผ่ฏไฝไปฅๆ ๅนบๅผ่กจ็คบ็้ปๆๅพใๅนถๅฐๅๆฐๆณจๅจๅพไธใๅๅบๅๅ็$S_{B}=100MVA$ ๏ผ$110KV$็บง็ๅบๅ็ตๅ$U_{B}=110kV$ ใ
Step6: ็ฌฌไบ็งๅๆณ
|
<ASSISTANT_TASK:>
Python Code:
x1=0.4
L1=100
X_L1=x1*L1
x2=0.4
L2=80
X_L2=x2*L2
#T1 SF7-16000/110
Sn_T1=16 #MVA
Uk1=10.5 #%
Un_T1=121#KV
X_T1=Uk1*Un_T1**2/(100*Sn_T1)
#T2 S
Sn_T2=31.5 #MVA
Uk2=10.5 #%
Un_T2=121#KV
X_T2=Uk2*Un_T2**2/(100*Sn_T2)
X_T1
k1=6.3/121
k2=110/11
imp_reduction=lambda z,k:z*(k**2)
X_L2x=imp_reduction(X_L2,k1*k2)
X_L1x=imp_reduction(X_L1,k1)
X_T1x=imp_reduction(X_T1,k1)
X_T2x=imp_reduction(X_T2,k1*k2)
print("X_L2=%.3f"%X_L2x)
print("X_L1=%.3f"%X_L1x)
print("X_T1=%.3f"%X_T1x)
print("X_T2=%.3f"%X_T2x)
#10kv
k1=121/6.3
k2=11/110
X_L2x=imp_reduction(X_L2,1)
X_L1x=imp_reduction(X_L1,k2)
X_T1x=imp_reduction(X_T1,k2)
X_T2x=imp_reduction(X_T2,k2)
print("X_L2=%.3f"%X_L2x)
print("X_L1=%.3f"%X_L1x)
print("X_T1=%.3f"%X_T1x)
print("X_T2=%.3f"%X_T2x)
#110kv
k1=121/6.3
k2=110/11
X_L2x=imp_reduction(X_L2,k2)
X_L1x=imp_reduction(X_L1,1)
X_T1x=imp_reduction(X_T1,1)
X_T2x=imp_reduction(X_T2,1)
print("X_L2=%.3f"%X_L2x)
print("X_L1=%.3f"%X_L1x)
print("X_T1=%.3f"%X_T1x)
print("X_T2=%.3f"%X_T2x)
#ๆ ๅนบๅผ่ฎก็ฎ1
UB=110#kv
SB=100#MVA
XB=UB**2/SB
puv=lambda x,xb:x/xb
X_L2b=puv(X_L2x,XB)
X_L1b=puv(X_L1x,XB)
X_T1b=puv(X_T1x,XB)
X_T2b=puv(X_T2x,XB)
print("X_L2=%.3f"%X_L2b)
print("X_L1=%.3f"%X_L1b)
print("X_T1=%.3f"%X_T1b)
print("X_T2=%.3f"%X_T2b)
#ๆ ๅนบๅผ่ฎก็ฎ2
k1=6.3/121
k2=11/110
U_6B=UB*k1
U_11B=UB*k2
X_6B=U_6B**2/SB
X_11B=U_11B**2/SB
X_L2b=puv(X_L2,X_11B)
print("X_L2=%.3f"%X_L2b)
print("X_L1=%.3f"%X_L1b)
print("X_T1=%.3f"%X_T1b)
print("X_T2=%.3f"%X_T2b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting data using requests
Step2: Here the response '200' indicates that the get request was successful. Now let's look at the actual text that was downloaded from the webpage.
Step3: Here you can see that we have downloaded all of the data from the PBS faculty page and that it is in the form of HTML.
Step4: Here we are going to find the unordered list tagged with the id 'faculty-container'. We are then going to look for any nested tag that use the 'h4' header tag. This should give us all of the lines with the faculty names as a list.
Step5: What if we wanted to get all of the faculty email addresses?
Step6: Parsing string data
Step7: One thing we might do with this data is create a dictionary with names and emails of all of the professors in the department. This could be useful if we wanted to send a bulk email to them.
Step8: You can see that every name also includes an initial. Let's try to just pull out the first and last name.
|
<ASSISTANT_TASK:>
Python Code:
try:
import requests
except:
!pip install requests
try:
from bs4 import BeautifulSoup
except:
!pip install bs4
page = requests.get("http://pbs.dartmouth.edu/people")
print(page)
print(page.content)
soup = BeautifulSoup(page.content, 'html.parser')
print(soup.prettify())
names_html = soup.find_all('ul',id='faculty-container')[0].find_all('h4')
names = [x.text for x in names_html]
print(names)
email_html = soup.find_all('ul',id='faculty-container')[0].find_all('span',{'class' : 'contact'})
email = [x.text for x in email_html]
print(email)
print([x.split('@')[0] for x in email])
email_dict = dict([(x.split('@')[0],x) for x in email])
print(email_dict)
for x in email_dict.keys():
old = x.split('.')
email_dict[" ".join([i for i in old if len(i) > 2])] = email_dict[x]
del email_dict[x]
print(email_dict)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: That looks pretty good, but the description looks nastier than we would generally prefer. Luckily, pattern provides functions to get rid of the HTML in a string.
Step2: While it's all well and good to have the title and description of a story this is often insufficient (some descriptions are just the title, which isn't particularly helpful). To get further information on the story, it is possible to combine the single-page scraping discussed previously and the results from the RSS scrape. The following code implements a function to scrape the NYT article pages, which can be done easily since the NYT is wonderfully consistent in their HTML, and then iterates over the results applying the scrape function to each result.
Step3: Scraping arbitrary websites
Step4: Getting back on topic, it is often more difficult than the above to iterate over numerous webpages within a site. This is where the Scrapy library comes in. Scrapy allows for the creation of web spiders that crawl over a webpage, following any links that it finds. This is often far more difficult to implement than a simple scraper since it requires the identification of rules for link following. The State Department offers a good example. I don't really have time to go into the depths of writing a Scrapy spider, but I thought I would put up some code to illustrate what it looks like.
|
<ASSISTANT_TASK:>
Python Code:
import pattern.web
url = 'http://rss.nytimes.com/services/xml/rss/nyt/World.xml'
results = pattern.web.Newsfeed().search(url, count=5)
results
print '%s \n\n %s \n\n %s \n\n' % (results[0].url, results[0].title, results[0].description)
print '%s \n\n %s \n\n %s \n\n' % (results[0].url, results[0].title, pattern.web.plaintext(results[0].description))
import codecs
outputFile = codecs.open('~/tutorialOutput.txt', encoding='utf-8', mode='a')
def scrape(url):
page = requests.get(url)
doc = lh.fromstring(page.content)
text = doc.xpath('//p[@itemprop="articleBody"]')
finalText = str()
for par in text:
finalText += par.text_content()
return finalText
for result in results:
outputText = scrape(result.url)
outputFile.write(outputText)
outputFile.close()
url = 'http://164.100.47.132/LssNew/psearch/Result13.aspx?dbsl='
for i in xrange(5175,5973):
newUrl = url + str(i)
print 'Scraping: %s' % newUrl
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy.item import Item
from BeautifulSoup import BeautifulSoup
import re
import codecs
class MySpider(CrawlSpider):
name = 'statespider' #name is a name
start_urls = ['http://www.state.gov/r/pa/prs/dpb/2010/index.htm',
] #defines the URL that the spider should start on. adjust the year.
#defines the rules for the spider
rules = (Rule(SgmlLinkExtractor(allow=('/2010/'), restrict_xpaths=('//*[@id="local-nav"]'),)), #allows only links within the navigation panel that have /year/ in them.
Rule(SgmlLinkExtractor(restrict_xpaths=('//*[@id="dpb-calendar"]',), deny=('/video/')), callback='parse_item'), #follows links within the caldendar on the index page for the individuals years, while denying any links with /video/ in them
)
def parse_item(self, response):
self.log('Hi, this is an item page! %s' % response.url) #prints the response.url out in the terminal to help with debugging
#Insert code to scrape page content
#opens the file defined above and writes 'texts' using utf-8
with codecs.open(filename, 'w', encoding='utf-8') as output:
output.write(texts)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we see the location of these 10 intersection
Step2: That the intersections with the most collions
Step3: Lets save them
Step4: Finding outliers
Step5: Look for some intersections that have a perticular high ration
|
<ASSISTANT_TASK:>
Python Code:
pd.DataFrame(series).plot(kind='bar',figsize=(15,10), subplots=True, layout=(5,2), legend=False, sharey=True)
lat = top10.LATITUDE.values
lon = top10.LONGITUDE.values
glp.dot({'lat': lat, 'lon': lon}, color="r")
glp.inline()
lat
top10Injuries = collisions.groupby(['LOCATION'])['NUMBER OF PERSONS INJURED'].sum().sort_values(ascending=False)[:10]
top10Injuries
top10Injuries
top10Killed = collisions.groupby(['LOCATION'])['NUMBER OF PERSONS KILLED'].sum().sort_values(ascending=False)[:11]
top10Killed
def splitLocation(df):
result = df.copy()
result['LAT'] = result.index.str.split(',').str.get(0).str.replace('(', '')
result['LON'] = result.index.str.split(',').str.get(1).str.replace(')', '')
return result
top10KilledDf = splitLocation(top10Killed.to_frame())
top10InjuredDf = splitLocation(top10Injuries.to_frame())
lon = top10KilledDf.LON.astype(float).values
lat = top10KilledDf.LAT.astype(float).values
glp.dot({'lat': lat, 'lon': lon}, color="r")
glp.dot({'lat': top10.LATITUDE.values, 'lon': top10.LONGITUDE.values}, color='g')
glp.dot({'lat': top10InjuredDf.LAT.astype(float).values, 'lon': top10InjuredDf.LON.astype(float).values}, color='b')
glp.inline()
def get_intersection_name(e):
name = collisions[collisions.LOCATION == e.name][['CROSS STREET NAME','ON STREET NAME']].iloc[0]
return name['ON STREET NAME'] + ' / '+ name['CROSS STREET NAME']
top10InjuredDf['NAME'] = top10InjuredDf.apply(get_intersection_name, axis=1, raw=True)
top10KilledDf['NAME'] = top10KilledDf.apply(get_intersection_name, axis=1, raw=True)
top10InjuredDf
top10KilledDf
top10InjuredDf.to_csv('datasets/intersection_top10_injuries.csv', sep=',')
top10KilledDf.to_csv('datasets/intersection_top10_killed.csv', sep=',')
# Lets find the average collisions per intersection
means = intersection.mean()
means
print "For each collision %f number of persons gets injured" % (means['NUMBER OF PERSONS INJURED'] / means.LOCATION)
top10InjuredDf
top10Means = top10.mean()
top10InjuredMeans = top10InjuredDf.mean()
top10KilledMean = top10KilledDf.mean()
print "In the intersections with the most collisions, only %f amount of persons where injured\n" % top10Means['NUMBER OF PERSONS INJURED']
print "However looking at the top most intersection with the most injured,\n then we have %f persons injured per collison.\n"\
% (top10InjuredMeans['NUMBER OF PERSONS INJURED'] / top10InjuredMeans['COLLISIONS'])
#print "And lastly looking at the intersections with the most killed people, only %f persons get injured (not including killed) per collision"\
#% (top10KilledMean['NUMBER OF PERSONS INJURED'] / top10KilledMean['COLLISIONS'])
loc_count = collisions.LOCATION.value_counts()
def get_collision_count(e):
return loc_count[e.name]
top10InjuredDf['COLLISIONS'] = top10InjuredDf.copy().apply(get_collision_count, axis=1)
top10KilledDf['COLLISIONS'] = top10KilledDf.copy().apply(get_collision_count, axis=1)
collisions.groupby(['LOCATION']).value_counts()
def calc_collision_inju_ration(e):
return (e['NUMBER OF PERSONS INJURED'] / e.LOCATION)
intersection[intersection.LOCATION > 20].apply(calc_collision_inju_ration, axis=1, raw=True).sort_values()
collisions[collisions.LOCATION == '(40.6790346, -73.9385077)']
top10['CONTRIBUTING FACTOR VEHICLE 1'].value_counts()
def causesTop(n):
danger_zone = collisions.LOCATION.value_counts()[n-1:n]
return collisions[collisions.LOCATION.isin(danger_zone.index)]['CONTRIBUTING FACTOR VEHICLE 1'].value_counts()
series = {}
for i in range(1,10):
series[i] = causesTop(i)
pd.DataFrame(series).plot(kind='bar', figsize=(15,10), subplots=True)
numberTop6 = collisions[collisions.LOCATION.isin(collisions.LOCATION.value_counts()[5:6].index)]
pd.to_datetime(numberTop6.DATE).value_counts().plot(figsize=(15,15))
#pd.Series(range(0,numberTop6['NUMBER OF PERSONS INJURED'].size), index=pd.to_datetime(numberTop6.DATE)).plot(kind='bar',figsize=(15,15))
numberTop6.size
top10
#top10['HOUR'] = top10.TIME.str.split(':').str.get(0).astype('int')
#top1['HOUR'] = top1.TIME.str.split(':').str.get(0).astype('int')
collisions['HOUR'] = collisions.TIME.str.split(':').str.get(0).astype('int')
top10.HOUR.value_counts().sort_index().plot(figsize=(15,10), kind='bar')
top1.HOUR.value_counts().sort_index().plot(figsize=(15,10), kind='bar')
top10.HOUR.value_counts().index
top1 = collisions[collisions.LOCATION.isin(collisions.LOCATION.value_counts()[0:1].index)]
series = {}
for r in collisions['CONTRIBUTING FACTOR VEHICLE 1'].unique():
series[r] = collisions[collisions['CONTRIBUTING FACTOR VEHICLE 1'] == r].HOUR.value_counts().sort_index()
series['Combined'] = collisions.HOUR.value_counts().sort_index()
pd.DataFrame(series).plot(figsize=(15,100), subplots=True)
collisions['CONTRIBUTING FACTOR VEHICLE 1'].unique()
collisions
collisions.['']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='section3b'></a>
Step2: <a id='section3c'></a>
Step3: <a id='section3d'></a>
Step4: <a id='section3e'></a>
Step5: <a id='section3f'></a>
Step6: Model selection
Step7: It looks like we were able to eke out some improvement in the performance. The Decision Tree Classifier seems to perform best with the min_samples_split set to 7. We will use this for our final model. Note that the default values for 'criterion' and 'splitter' seem to be part of the best performing set of parameters.
Step8: <a id='section3h'></a>
Step9: Some of the false negatives seem like they should have been marked as spam, so it is interesting that the model missed these. We may need to tune our vectorizer and/or attempt some other classifiers.
|
<ASSISTANT_TASK:>
Python Code:
# Import modules
import numpy as np
import pandas as pd
# Read the data set; print the first few rows
files = ['data\\Youtube01-Psy.csv', 'data\\Youtube02-KatyPerry.csv', 'data\\Youtube03-LMFAO.csv',
'data\\Youtube04-Eminem.csv', 'data\\Youtube05-Shakira.csv']
df = pd.DataFrame()
for file in files:
df = df.append(pd.read_csv(file))
df.head()
# Check for missing values
df.info()
# Looks like there are missing values in the DATE column, but it is not a column of interest. Let's proceed.
# Of the five columns, the only relevant columns for spam/ham classification are the CONTENT and CLASS columns.
# We will use just these two columns. But first, let's check the distribution of spam and ham
df.CLASS.value_counts()
# There is an almost equal distribution. Given that this is a small data set, this is probably good,
# because the algorithm has enough items it can learn from
# Now, let us set up our X and y
X = df.CONTENT
y = df.CLASS
# Let us now split the data set into train and test sets
# We will use an 80/20 split
test_size = 0.2
seed = 42
scoring = 'accuracy'
num_folds = 10
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed, test_size=test_size)
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
models = []
names = []
results = []
lr = ('LR', LogisticRegression())
knn = ('KNN', KNeighborsClassifier())
svc = ('SVC', SVC())
nb = ('NB', MultinomialNB())
cart = ('CART', DecisionTreeClassifier())
models.extend([lr, knn, svc, nb, cart])
# Set up a vectorizer, and create a Document-Term matrix
from sklearn.feature_extraction.text import CountVectorizer
vect = CountVectorizer()
X_train_dtm = vect.fit_transform(X_train)
# Check the layout of the Document-Term matrix
X_train_dtm
from sklearn.model_selection import KFold, cross_val_score
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed)
score = cross_val_score(model, X_train_dtm, y_train, scoring=scoring, cv=kfold)
names.append(name)
results.append(score)
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, \
RandomForestClassifier, ExtraTreesClassifier
ensembles = []
ensemble_names = []
ensemble_results = []
ensembles.append(('AB', AdaBoostClassifier()))
ensembles.append(('RF', RandomForestClassifier()))
ensembles.append(('ET', ExtraTreesClassifier()))
for name, model in ensembles:
kfold = KFold(n_splits=num_folds, random_state=seed)
score = cross_val_score(model, X_train_dtm, y_train, cv=kfold, scoring=scoring)
ensemble_names.append(name)
ensemble_results.append(score)
models_list = []
for i, name in enumerate(names):
d = {'model': name, 'mean': results[i].mean(), 'std': results[i].std()}
models_list.append(d)
for i, name in enumerate(ensemble_names):
d = {'model': name, 'mean': results[i].mean(), 'std': results[i].std()}
models_list.append(d)
models_df = pd.DataFrame(models_list).set_index('model')
models_df.sort_values('mean', ascending=False)
cart
from sklearn.model_selection import GridSearchCV
final_model = DecisionTreeClassifier()
criterion_values = ['gini', 'entropy']
splitter_values = ['best', 'random']
min_samples_split_values = np.arange(2, 11, 1)
param_grid = dict(criterion=criterion_values, splitter=splitter_values,
min_samples_split=min_samples_split_values)
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=final_model, cv=kfold, scoring=scoring, param_grid=param_grid)
grid_result = grid.fit(X_train_dtm, y_train)
print(grid_result.best_params_, grid_result.best_score_)
final_model = DecisionTreeClassifier(min_samples_split=7, random_state=seed)
final_model.fit(X_train_dtm, y_train)
# Transform the test data to a DTM and predict
X_test_dtm = vect.transform(X_test)
y_pred = final_model.predict(X_test_dtm)
# Let us check the accuracy score
# It needs to better than 50%, which was the baseline
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
accuracy_score(y_test, y_pred)
# The accuracy score was 93.37%, which is lower than we may have anticipated
# Let us check the confusion matrix to get a sense of the prediction distribution
confusion_matrix(y_test, y_pred)
# The model predicted 366 out of 392 instances correctly
# We had 14 false positives and 12 false negatives
# What were the false positive comments? (That is, ham marked as spam)
X_test[y_pred > y_test]
# And what were the false negative comments? (That is, spam comments that went undetected)
X_test[y_pred < y_test]
roc_auc_score(y_test, final_model.predict_proba(X_test_dtm)[:, 1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Batch Gradient Descent
Step2: The Softmax Function
Step3: Non-linear Perceptron With SoftMax
Step4: Cross Entropy Error
Step11: Gradient of the Cross Entropy Error
Step12: Gradient of the Cross Entropy Error
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
def L(x):
return x**2 - 2*x + 1
def L_prime(x):
return 2*x - 2
def converged(x_prev, x, epsilon):
"Return True if the abs value of all elements in x-x_prev are <= epsilon."
absdiff = np.abs(x-x_prev)
return np.all(absdiff <= epsilon)
def gradient_descent(f_prime, x_0, learning_rate=0.2, n_iters=100, epsilon=1E-8):
x = x_0
for _ in range(n_iters):
x_prev = x
x -= learning_rate*f_prime(x)
if converged(x_prev, x, epsilon):
break
return x
x_min = gradient_descent(L_prime, 2)
print('Minimum value of L(x) = x**2 - 2*x + 1.0 is [%.2f] at x = [%.2f]' % (L(x_min), x_min))
import seaborn as sns
import pandas as pd
iris_df = sns.load_dataset('iris')
print('Columns: %s' % (iris_df.columns.values, ))
print('Labels: %s' % (pd.unique(iris_df['species']), ))
iris_df.head(5)
def softmax(x):
# Uncomment to find out why we shouldn't do it this way...
# return np.exp(x) / np.sum(np.exp(x))
scaled_x = x - np.max(x)
result = np.exp(scaled_x) / np.sum(np.exp(scaled_x))
return result
a = np.array([-500.9, 2000, 7, 11, 12, -15, 100])
sm_a = softmax(a)
print('Softmax(%s) = %s' % (a, sm_a))
def encode_1_of_n(ordered_labels, y):
label2idx = dict((label, idx)
for idx, label in enumerate(ordered_labels))
def encode_one(y_i):
enc = np.zeros(len(ordered_labels))
enc[label2idx[y_i]] = 1.0
return enc
return np.array([x for x in map(encode_one, y)])
encode_1_of_n(['apple', 'banana', 'orange'],
['apple', 'banana', 'orange', 'apple', 'apple'])
def cross_entropy_loss(Y, Y_hat):
entropy_sum = 0.0
log_Y_hat = np.log(Y_hat)
for y, y_hat in zip(Y, log_Y_hat):
entropy_sum += np.dot(y, y_hat)
return -entropy_sum/Y.shape[0]
Y_tst = np.array([[1, 0, 0],
[0, 1, 0]])
# log(Y_hat_tst1) is the same as Y_tst, so we expect the x-entropy error to be the min (-1) in this case.
print(Y_tst)
Y_hat_tst1 = np.array([[np.e, 1, 1,],
[1, np.e, 1]])
print(Y_hat_tst1)
print(cross_entropy_loss(Y_tst, Y_hat_tst1))
print()
# expect it to be > -1
Y_hat_tst2 = np.array([[1, 1, 1,],
[1, np.e, 1]])
print(Y_hat_tst2)
print(cross_entropy_loss(Y_tst, Y_hat_tst2))
print()
import pandas as pd
class OneLayerNetworkWithSoftMax:
def __init__(self):
self.w, self.bias = None, 0.0
self.optimiser = None
self.output = None
def init_weights(self, X, Y):
Initialize a 2D weight matrix as a Dataframe with
dim(n_labels*n_features).
self.labels = np.unique(Y)
w_init = np.random.randn(len(self.labels), X.shape[1])
self.w = pd.DataFrame(data=w_init)
self.w.index.name = 'node_id'
def predict(self, x):
Return the predicted label of x using current weights.
output = self.forward(x, update=False)
max_label_idx = np.argmax(output)
return self.labels[max_label_idx]
def forward(self, x, update=True):
Calculate softmax(w^Tx+b) for x using current $w_i$ s.
#output = self.w.apply(lambda row: np.dot(row, x), axis=1)
output = np.dot(self.w, x)
output += self.bias
output = softmax(output)
if update:
self.output = output
return output
def backward(self, x, y, learning_rate):
Executes the weight update step
grad = (self.output - y)
for i in range(len(grad)):
dw[i] -= grad[i] * x
w -= learning_rate * dw
:param x: one sample vector.
:param y: One-hot encoded label for x.
# [y_hat1 - y1, y_hat2-y2, ... ]
y_hat_min_y = self.output - y
# Transpose the above to a column vector
# and then multiply x with each element
# to produce a 2D array (n_labels*n_features), same as w
error_grad = np.apply_along_axis(lambda z: z*x ,
1, np.atleast_2d(y_hat_min_y).T)
dw = learning_rate * error_grad
return dw
def print_weight_diff(self, i, w_old, diff_only=True):
if not diff_only:
print('Before Iteration [%s]: weights are: \n%s' %
(i+1, w_old))
print('After Iteration [%s]: weights are: \n%s' %
(i+1, self.w))
w_diff = np.abs(w_old - self.w)
print('After Iteration [%s]: weights diff: \n%s' %
(i+1, w_diff))
def _gen_minibatch(self, X, Y, mb_size):
Generates `mb_size` sized chunks from X and Y.
n_samples = X.shape[0]
indices = np.arange(n_samples)
np.random.shuffle(indices)
for start in range(0, n_samples, mb_size):
yield X[start:start+mb_size, :], Y[start:start+mb_size, :]
def _update_batch(self, i, X_batch, Y_batch, learning_rate, print_every=100):
w_old = self.w.copy()
dw = []
for x, y in zip(X_batch, Y_batch):
self.forward(x)
dw_item = self.backward(x, y, learning_rate)
dw.append(dw_item)
dw_batch = np.mean(dw, axis=0)
self.w -= dw_batch
if (i == 0) or ((i+1) % print_every == 0):
self.print_weight_diff(i, w_old)
def train(self, X, Y,
n_iters=1000,
learning_rate=0.2,
minibatch_size=30,
epsilon=1E-8):
Entry point for the Minibatch SGD training method.
Calls forward+backward for each (x_i, y_i) pair and adjusts the
weight w accordingly.
self.init_weights(X, Y)
Y = encode_1_of_n(self.labels, Y)
n_samples = X.shape[0]
# MiniBatch SGD
for i in range(n_iters):
for X_batch, Y_batch in self._gen_minibatch(X, Y, minibatch_size):
self._update_batch(i, X_batch, Y_batch, learning_rate)
# Set aside test data
label_grouper = iris_df.groupby('species')
test = label_grouper.head(10).set_index('species')
train = label_grouper.tail(100).set_index('species')
# Train the Network
X_train, Y_train = train.as_matrix(), train.index.values
nn = OneLayerNetworkWithSoftMax()
nn.train(X_train, Y_train)
# Test
results = test.apply(lambda row : nn.predict(row.as_matrix()), axis=1)
results.name = 'predicted_label'
results.index.name = 'expected_label'
results.reset_index()
import networkx as nx
from matplotlib import pylab
G = nx.DiGraph()
G.add_edges_from(
[('i', 'n1'),
('i', 'n2'),
('n1', 's1'),
('n2', 's1'),
('n1', 's2'),
('n2', 's2'),
('s1', 'y1'),
('s2', 'y2'),
])
pos = {'i': (1, 1),
'n1': (2, 0), 'n2': (2, 2),
's1': (3, 0), 's2': (3, 2),
'y1': (4, 0), 'y2': (4, 2),
}
labels = {'i': r'$x_i$',
'n1': r'$w_1$', 'n2': r'$w_2$',
's1': r'$s_1$', # r'$\frac {\exp(z_{i1})} {S_i}$',
's2': r'$s_2$', # r'$\frac {\exp(z_{i2})} {S_i}$'
}
edge_labels = {('i', 'n1'): r'$x_i$',
('i', 'n2'): r'$x_i$',
('n1', 's1'): r'$w_1^Tx_i$',
('n1', 's2'): r'$w_1^Tx_i$',
('n2', 's1'): r'$w_2^Tx_i$',
('n2', 's2'): r'$w_2^Tx_i$',
('n2', 's1'): r'$w_2^Tx_i$',
('s1', 'y1'): r'$\frac {\exp(z_{i1})} {S_i}$',
('s2', 'y2'): r'$\frac {\exp(z_{i2})} {S_i}$',
}
nx.draw(G, pos=pos, node_size=1000)
nx.draw_networkx_labels(G,pos,labels, font_size=15, color='white')
nx.draw_networkx_edge_labels(G, pos=pos,
edge_labels=edge_labels, font_size=15)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the database connection
Step3: Select data on the first hospital stay
Step4: Display the first few rows of the data
Step5: Create Table 1
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
from tableone import TableOne
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
import getpass
%matplotlib inline
plt.style.use('ggplot')
# Create a database connection
user = 'postgres'
host = 'localhost'
dbname = 'mimic'
schema = 'mimiciii_demo'
# Connect to the database
con = psycopg2.connect(dbname=dbname, user=user, host=host,
password=getpass.getpass(prompt='Password:'.format(user)))
cur = con.cursor()
cur.execute('SET search_path to {}'.format(schema))
# Run query and assign the results to a Pandas DataFrame
# Get first admission for each patient
query = \
WITH admit AS (
SELECT p.gender,
ROUND( (CAST(EXTRACT(epoch FROM a.dischtime - a.admittime)/(60*60*24) AS numeric)), 4) AS los_hospital,
ROUND( (CAST(EXTRACT(epoch FROM a.admittime - p.dob)/(60*60*24*365.242) AS numeric)), 4) AS age,
DENSE_RANK() OVER (PARTITION BY a.subject_id ORDER BY a.admittime) AS admission_seq,
a.ethnicity, a.admission_type, a.insurance, a.religion, a.marital_status, a.hospital_expire_flag
FROM patients p
LEFT JOIN admissions a
ON p.subject_id = a.subject_id)
SELECT *
FROM admit
WHERE admission_seq = 1;
data = pd.read_sql_query(query,con)
data.head()
# Columns to include in the summary table
columns = ['gender', 'los_hospital', 'age', 'ethnicity','admission_type', 'insurance',
'religion', 'marital_status','hospital_expire_flag']
# List of categorical variables
categorical = ['gender', 'ethnicity', 'insurance', 'religion', 'marital_status',
'hospital_expire_flag']
# Group the data
groupby = 'admission_type'
# Display the top n number of categorical variables
limit = 3
# Compute p values
pval = False
# Display a count of null values
isnull = False
t = TableOne(data, columns=columns, categorical=categorical,
groupby=groupby, limit=limit, pval=pval, isnull=isnull)
t.tableone
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: A clear choice for modeling the distribution over fixed donation amounts is the multinomial distribution. For modeling custom donations amounts, we could consider a continuous distribution over the set of positive numbers such as the log-normal distribution. Then we could model the distribution of over both fixed and custom amounts as a mixture between the two. For now, I will focus on just modeling the distribution over fixed amounts as they make up the vast majority of donations.
Step4: As we run the banner longer and more users enter the experiment, we should expect the distribution to concentrate around a tighter interval around the true return
Step6: Introducing Credible Intervals
Step7: Now you know how to generate the posterior distribution $\mathcal P \left({R |Data}\right)$ over the expected revenue per user of a single banner and sample from it. This is the basic building block for the next section on comparing the performance of banners.
Step8: On this particular run, banner A did better than expected and banner B did worse than expected. Judging from the data we observe, we are 97% certain that banner A is better than banner B. Looking at one particular run, is not particularly instructive. For a given true difference $R_a - R_b$ in expected rewards, the key factor that influences $\mathcal P \left({R_a > R_b | Data}\right)$ is the sample size of the test. The following plot characterizes the the distribution over our estimates of $\mathcal P \left({R_a > R_b | Data}\right)$ for different sample sizes.
Step9: The plot illustrates an intuitive fact
Step10: To test the accuracy of the method, we can repeat the exercise from above of repeatedly generating confidence intervals and seeing if our x% confidence intervals cover the true percent difference x% of the time. You will see that they do
|
<ASSISTANT_TASK:>
Python Code:
def plot_donation_amounts(counts):
keys = list(counts.keys())
values = list(counts.values())
fig = plt.figure(figsize=(15, 6))
ind = 1.5*np.arange(len(keys)) # the x locations for the groups
a_rects = plt.bar(ind, values, align='center', facecolor ='yellow', edgecolor='gray')
plt.xticks(ind)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x()+rect.get_width()/2., 1.01*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(a_rects)
plt.xlabel('Donation Amounts')
plt.ylabel('Amount Frequencies')
amounts = {3.0: 419, 5.0: 307, 10.0: 246, 20.0: 163, 30.0: 89, 50.0: 38, 100.0: 23, 1.0: 9, 15.0: 7, 25.0: 3, 2.0: 2, 200.0: 1, 9.0: 1, 18.0: 1, 48.0: 1, 35.0: 1, 52.0: 1, 36.0: 1}
plot_donation_amounts(amounts)
import numpy as np
from numpy.random import dirichlet
from numpy.random import multinomial
import matplotlib.pyplot as plt
import seaborn as sns
# Set of reward values for banner A
values_a = np.array([0.0, 2.0, 3.0, 4.0])
# True probability of each reward for banner A
p_a = np.array([0.4, 0.3, 0.2, 0.1])
# True expected return per user for banner A
return_a = p_a.dot(values_a.transpose())
def run_banner(p, n):
Simulate running a banner for n users
with true probability vector over rewards p.
Returns a vector of counts for how often each
reward occurred.
return multinomial(n, p, size=1)[0]
def get_posterior_expected_reward_sample(alpha, counts, values, n=40000):
To sample from the posterior distribution over revenue per user,
first draw a sample from the posterior distribution over the the
vector of probabilities for each reward value. Second, take the dot product
with the vector of reward values.
dirichlet_sample = dirichlet(counts + alpha, n)
return dirichlet_sample.dot(values.transpose())
#lets assume we know nothing about our banners and take a uniform prior
alpha = np.ones(values_a.shape)
#simulate running the banner 1000 times
counts = run_banner(p_a, 1000)
# get a sample from the distribution over expected revenue per user
return_distribution = get_posterior_expected_reward_sample(alpha, counts, values_a)
# plot the posterior distribution agains the true value
fig, ax = plt.subplots()
ax.hist(return_distribution, bins = 40, alpha = 0.6, normed = True, label = 'n = 1000')
ax.axvline(x=return_a, color = 'b')
plt.xlabel('Expected Reward per User')
counts = run_banner(p_a, 10000)
return_distribution = get_posterior_expected_reward_sample(alpha, counts, values_a)
ax.hist(return_distribution, bins = 40, alpha = 0.6, normed = True, label = 'n = 10000')
ax.axvline(x=return_a, color = 'b')
ax.legend()
fig
def get_credible_interval(dist, confidence):
lower = np.percentile(dist, (100.0 - confidence) /2.0)
upper = np.percentile(dist, 100.0 - (100.0 - confidence) /2.0)
return lower, upper
def interval_covers(interval,true_value):
Check if the credible interval covers the true parameter
if interval[1] < true_value:
return 0
if interval[0] > true_value:
return 0
return 1
# Simulate multiple runs and count what fraction of the time the interval covers
confidence = 95
iters = 10000
cover_count = 0.0
for i in range(iters):
#simulate running the banner
counts = run_banner(p_a, 1000)
# get the posterior distribution over rewards per user
return_distribution = get_posterior_expected_reward_sample(alpha, counts, values_a)
# form a credible interval over reward per user
return_interval = get_credible_interval(return_distribution, confidence)
# record if the interval covered the true reward per user
cover_count+= interval_covers(return_interval, return_a)
print ("%d%% credible interval covers true return %.3f%% of the time" %(confidence, 100*(cover_count/iters)))
# Set of returns for treatment B
values_b = np.array([0.0, 2.5, 3.0, 5.0])
# True probability of each reward for treatment B
p_b = np.array([0.60, 0.10, 0.12, 0.18])
# True expected reward per user for banner B
return_b = p_b.dot(values_b)
# simulate running both banners
counts_a = run_banner(p_a, 1000)
return_distribution_a = get_posterior_expected_reward_sample(alpha, counts_a, values_a)
counts_b = run_banner(p_b, 1000)
return_distribution_b = get_posterior_expected_reward_sample(alpha, counts_b, values_b)
#plot the posterior distributions
plt.figure()
plt.hist(return_distribution_a, bins = 40, alpha = 0.4, normed = True, label = 'A')
plt.axvline(x=return_a, color = 'b')
plt.hist(return_distribution_b, bins = 40, alpha = 0.4, normed = True, label = 'B')
plt.axvline(x=return_b, color = 'g')
plt.xlabel('Expected Revenue per User')
plt.legend()
#compute the probability that banner A is better than banner B
prob_a_better = (return_distribution_a > return_distribution_b).mean()
print ("P(R_A > R_B) = %0.4f" % prob_a_better)
results = []
for sample_size in range(500, 5000, 500):
for i in range(1000):
counts_a = run_banner(p_a, sample_size)
return_distribution_a = get_posterior_expected_reward_sample(alpha, counts_a, values_a)
counts_b = run_banner(p_b, sample_size)
return_distribution_b = get_posterior_expected_reward_sample(alpha, counts_b, values_b)
prob_a_better = (return_distribution_a > return_distribution_b).mean()
results.append({'n':sample_size, 'p': prob_a_better})
sns.boxplot(x='n', y = 'p', data = pd.DataFrame(results))
plt.xlabel('sample size')
plt.ylabel('P(R_A > R_B)')
true_percent_difference = 100 * ((return_a - return_b) / return_b)
# simulate running both banners
counts_a = run_banner(p_a, 4000)
return_distribution_a = get_posterior_expected_reward_sample(alpha, counts_a, values_a)
counts_b = run_banner(p_b, 4000)
return_distribution_b = get_posterior_expected_reward_sample(alpha, counts_b, values_b)
#compute distribution over percent differences
percent_difference_distribution = 100* ((return_distribution_a - return_distribution_b) / return_distribution_b)
#plot the posterior distributions
plt.figure()
plt.hist(percent_difference_distribution, bins = 40, alpha = 0.6, normed = True)
plt.axvline(x=true_percent_difference, color = 'b')
plt.xlabel('Percent Difference')
#compute the probability that banner A is better than banner B
lower, upper = get_credible_interval(percent_difference_distribution, 95)
print ("The percent lift that A has over B lies in the interval (%0.3f, %0.3f) with 95%% certainty" % (lower, upper))
# Simulate multiple runs and count what fraction of the time the interval covers
confidence = 95
iters = 10000
cover_count = 0.0
for i in range(iters):
#simulate running the banner
counts_a = run_banner(p_a, 4000)
counts_b = run_banner(p_b, 4000)
# get the posterior distribution over percent difference
return_distribution_a = get_posterior_expected_reward_sample(alpha, counts_a, values_a)
return_distribution_b = get_posterior_expected_reward_sample(alpha, counts_b, values_b)
percent_difference_distribution = 100* ((return_distribution_a - return_distribution_b) / return_distribution_b)
# get credible interval
interval = get_credible_interval(percent_difference_distribution, confidence)
# record if the interval covered the true reward per user
cover_count+= interval_covers(interval, true_percent_difference)
print ("%d%% credible interval covers true percent difference %.3f%% of the time" %(confidence, 100*(cover_count/iters)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read input tables
Step2: Block tables to get candidate set
Step3: From the plot we can see that 20003 is definitely an error. We will replace 20003 by 2003.
Step4: Debug blocking output
Step5: Match tuple pairs in candidate set
Step6: Selecting the best learning-based matcher using I
Step8: Debug X (Random Forest)
Step9: Compute accuracy of X (Decision Tree) on J
|
<ASSISTANT_TASK:>
Python Code:
import py_entitymatching as em
import profiler
import pandas as pd
## Read input tables
A = em.read_csv_metadata('dblp_demo.csv', key='id')
B = em.read_csv_metadata('acm_demo.csv', key='id')
len(A), len(B), len(A) * len(B)
A.head(2)
B.head(2)
# If the tables are large we can downsample the tables like this
A1, B1 = em.down_sample(A, B, 500, 1, show_progress=False)
len(A1), len(B1)
# But for the demo, we will use the entire table A and B
profiler.profile_table(A, 'paper year')
profiler.profile_table(B, 'paper year')
B.replace({'paper year':{
20003:2003
}}, inplace=True)
### Blocking plan
### A, B -- AttrEquivalence blocker [year]--------------------------| Candidate set
# Create attribute equivalence blocker
ab = em.AttrEquivalenceBlocker()
# Block tables using 'year' attribute : same year include in candidate set
C1 = ab.block_tables(A, B, 'paper year', 'paper year',
l_output_attrs=['title', 'authors', 'paper year'],
r_output_attrs=['title', 'authors', 'paper year']
)
len(C1)
C1.head(2)
# check whether the current blocking method has dropped a lot of potential matches
dbg = em.debug_blocker(C1, A, B)
dbg.head()
# em.view_table(dbg)
# Revised blocking plan
# A, B -- AttrEquivalence blocker [year] --------------------|
# |---> candidate set
# A, B -- Overlap blocker [title]---------------------------|
profiler.profile_table(A, 'title', plot=False)
profiler.profile_table(B, 'title', plot=False)
# Initialize overlap blocker
ob = em.OverlapBlocker()
# Block over title attribute
C2 = ob.block_tables(A, B, 'title', 'title', show_progress=False, overlap_size=1)
len(C2)
# Combine the outputs from attr. equivalence blocker and overlap blocker
C = em.combine_blocker_outputs_via_union([C1, C2])
len(C)
# Check again to see if we are dropping any potential matches
dbg = em.debug_blocker(C, A, B)
dbg.head()
# Sample candidate set
S = em.sample_table(C, 450)
# Label S
S = em.label_table(S, 'label')
# Load the pre-labeled data
S = em.read_csv_metadata('labeled_data_demo.csv',
key='_id',
ltable=A, rtable=B,
fk_ltable='ltable_id', fk_rtable='rtable_id')
len(S)
# Split S into I an J
IJ = em.split_train_test(S, train_proportion=0.5, random_state=0)
I = IJ['train']
J = IJ['test']
# Create a set of ML-matchers
dt = em.DTMatcher(name='DecisionTree', random_state=0)
svm = em.SVMMatcher(name='SVM', random_state=0)
rf = em.RFMatcher(name='RF', random_state=0)
lg = em.LogRegMatcher(name='LogReg', random_state=0)
ln = em.LinRegMatcher(name='LinReg')
# Generate a set of features
F = em.get_features_for_matching(A, B)
# List the feature names generated
F['feature_name']
# Convert the I into a set of feature vectors using F
H = em.extract_feature_vecs(I,
feature_table=F,
attrs_after='label',
show_progress=False)
# Select the best ML matcher using CV
result = em.select_matcher([dt, rf, svm, ln, lg], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric='f1', random_state=0)
result['cv_stats']
# Split H into P and Q
PQ = em.split_train_test(H, train_proportion=0.5, random_state=0)
P = PQ['train']
Q = PQ['test']
# Debug RF matcher using GUI
em.vis_debug_rf(rf, P, Q,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
target_attr='label')
# Add a feature to do Jaccard on title + authors and add it to F
# Create a feature declaratively
sim = em.get_sim_funs_for_matching()
tok = em.get_tokenizers_for_matching()
feature_string = jaccard(wspace((ltuple['title'] + ' ' + ltuple['authors']).lower()),
wspace((rtuple['title'] + ' ' + rtuple['authors']).lower()))
feature = em.get_feature_fn(feature_string, sim, tok)
# Add feature to F
em.add_feature(F, 'jac_ws_title_authors', feature)
# Print supported sim. functions
pd.DataFrame({'simfunctions':sorted(sim.keys())})
# Print supported tokenizers
pd.DataFrame({'tokenizers':sorted(tok.keys())})
F['feature_name']
# Convert I into feature vectors using updated F
H = em.extract_feature_vecs(I,
feature_table=F,
attrs_after='label',
show_progress=False)
# Check whether the updated F improves X (Random Forest)
result = em.select_matcher([rf], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric='f1', random_state=0)
result['cv_stats']
# Select the best matcher again using CV
result = em.select_matcher([dt, rf, svm, ln, lg], table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
k=5,
target_attr='label', metric='f1', random_state=0)
result['cv_stats']
# Train using feature vectors from I
dt.fit(table=H,
exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
target_attr='label')
# Convert J into a set of feature vectors using F
L = em.extract_feature_vecs(J, feature_table=F,
attrs_after='label', show_progress=False)
# Predict on L
predictions = dt.predict(table=L, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'],
append=True, target_attr='predicted', inplace=False)
# Evaluate the predictions
eval_result = em.eval_matches(predictions, 'label', 'predicted')
em.print_eval_summary(eval_result)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Sketching the Visualization
Step3: Note that the keyword arguments are keys from the VISUALIZATION_CONFIG dictionary. If you use a keyword argument not present in the dictionary, an Exception will be raised.
Step4: Visualization Scaffolding
Step5: What this does is to create a file named scaffolded_barchart.js which contains a reusable visualization. All variables declared in the arguments dictionary are available as property methods. The values specified when defining the arguments or when scaffolding will serve as defaults, but everything is changeable. Note that we did not specify a DataFrame this time!
|
<ASSISTANT_TASK:>
Python Code:
import matta
# we do this to load the required libraries when viewing on NBViewer
matta.init_javascript(path='https://rawgit.com/carnby/matta/master/matta/libs')
import pandas as pd
df = pd.read_csv('http://bl.ocks.org/mbostock/raw/3885304/964f9100166627a89c7e6c23ce8128f5aefd5510/data.tsv', delimiter='\t')
df.head()
barchart = matta.import_visualization('skeleton')
barchart(dataframe=df, x='letter', y='frequency', rotate_label=False, bar_color='purple')
barchart(dataframe=df, x='letter', y='frequency', rotate_label=False,
bar_color={'value': 'letter', 'palette': 'cubehelix', 'n_colors': df.shape[0], 'scale': 'ordinal'})
barchart(x='letter', y='frequency').scaffold(filename='./scaffolded_barchart.js')
from matta import dump_data
dump_data(df, './data.json')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This file loaded contains extra information to a normal instance catalog output. During the sprinkling process as new id numbers are given to the sprinkled AGN all of their ids are larger than 1e11 while those unsprinkled are not. These larger Ids contain information on the OM10-Twinkles system used in the sprinkler, the image number in that system and the associated lens galaxy. We have previously added columns containing this information to the instance catalog information for the AGN.
Step2: Validating Image Positions
Step3: Overall, differences between the instance catalogs and the OM10 inputs seem to be within 0.01 arcseconds. It appears that the positions in the instance catalog are accurate reproductions of the OM10 data they are based upon.
Step4: Check distributions of Lensed AGN
Step5: Varaibility Validation
Step6: As the previous cell shows there are no AGN with the same magnitude from the first visit to the next showing that variability is being calculated for each system. Below we plot a few light curves on the set of three visits.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.io import fits
plt.style.use('ggplot')
%matplotlib inline
om10_cat = fits.open('../../data/twinkles_lenses_v2.fits')[1].data
sprinkled_lens_gals = pd.read_csv('../../data/sprinkled_lens_galaxies_230.txt')
sprinkled_agn = pd.read_csv('../../data/sprinkled_agn_230.txt')
sprinkled_agn[:20]
x_sep_inst = []
y_sep_inst = []
x_sep_om10 = []
y_sep_om10 = []
for agn_row in range(len(sprinkled_agn)):
lens_gal_current = sprinkled_agn['lens_gal_id'][agn_row]
twinkles_current = sprinkled_agn['twinkles_id'][agn_row]
im_num_current = sprinkled_agn['image_num'][agn_row]
om10_current = om10_cat[om10_cat['twinklesId'] == twinkles_current]
lens_ra = sprinkled_lens_gals.query('id == %i' % lens_gal_current)['ra'].values[0]
lens_dec = sprinkled_lens_gals.query('id == %i' % lens_gal_current)['dec'].values[0]
x_sep_inst.append(((sprinkled_agn['ra'][agn_row] - lens_ra)*3600.)*np.cos(np.radians(lens_dec)))
y_sep_inst.append((sprinkled_agn['dec'][agn_row] - lens_dec)*3600)
x_sep_om10.append(om10_current['XIMG'][0][im_num_current])
y_sep_om10.append(om10_current['YIMG'][0][im_num_current])
fig = plt.figure(figsize=(12, 6))
fig.add_subplot(1,2,1)
p1 = plt.hist(100*(np.array(x_sep_inst) - np.array(x_sep_om10))/np.array(x_sep_om10))
plt.xlabel('Percent Difference')
plt.title('XIMG (ra)')
fig.add_subplot(1,2,2)
p2 = plt.hist(100*(np.array(y_sep_inst) - np.array(y_sep_om10))/np.array(y_sep_om10))
plt.xlabel('Percent Difference')
plt.title('YIMG (dec)')
plt.suptitle('Difference between OM10 separations and Instance Catalog separations between images and lens galaxy')
fig = plt.figure(figsize=(12, 6))
fig.add_subplot(1,2,1)
p1 = plt.hist((np.array(x_sep_inst) - np.array(x_sep_om10)))
plt.xlabel('Difference (arcsec)')
plt.title('XIMG (ra)')
fig.add_subplot(1,2,2)
p2 = plt.hist((np.array(y_sep_inst) - np.array(y_sep_om10)))
plt.xlabel('Difference (arcsec)')
plt.title('YIMG (dec)')
plt.suptitle('Difference between OM10 separations and Instance Catalog separations between images and lens galaxy')
plt.tight_layout()
plt.subplots_adjust(top=.9)
ellip = np.sqrt(1-((sprinkled_lens_gals['minor_axis']**2)/(sprinkled_lens_gals['major_axis']**2)))
sprinkled_lens_gals['r_eff'] = sprinkled_lens_gals['major_axis']*np.sqrt(1-ellip)
fig = plt.figure(figsize=(18,6))
fig.add_subplot(1,3,1)
p1 = plt.hist(sprinkled_lens_gals['mag_norm'])
plt.xlabel('MagNorm (mags @ 500 nm)')
fig.add_subplot(1,3,2)
p2 = plt.hist(sprinkled_lens_gals['redshift'])
plt.xlabel('Redshift')
fig.add_subplot(1,3,3)
p3 = plt.hist(sprinkled_lens_gals['r_eff'])
plt.xlabel('R_Eff (arcsec)')
#Use only the brightest AGN image in a system for magnitudes and only record the redshift once for each system
agn_magnorm = {}
agn_redshifts = []
for agn_row in range(len(sprinkled_agn)):
lens_gal_current = sprinkled_agn['lens_gal_id'][agn_row]
im_num_current = sprinkled_agn['image_num'][agn_row]
om10_current = om10_cat[om10_cat['twinklesId'] == twinkles_current]
try:
current_val = agn_magnorm[str(lens_gal_current)]
if sprinkled_agn['mag_norm'][agn_row] < current_val:
agn_magnorm[str(lens_gal_current)] = sprinkled_agn['mag_norm'][agn_row]
except KeyError:
agn_magnorm[str(lens_gal_current)] = sprinkled_agn['mag_norm'][agn_row]
agn_redshifts.append(sprinkled_agn['redshift'][agn_row])
fig = plt.figure(figsize=(12,6))
fig.add_subplot(1,2,1)
plt.hist(agn_magnorm.values())
plt.title('Magnitude for brightest AGN image in a system')
plt.xlabel('MagNorm (mags @ 500 nm)')
fig.add_subplot(1,2,2)
plt.hist(agn_redshifts)
plt.title('Lensed AGN Redshifts')
plt.xlabel('Source Redshifts')
plt.suptitle('Lensed AGN Properties')
plt.tight_layout()
plt.subplots_adjust(top=.9)
sprinkled_agn_2 = pd.read_csv('../../data/sprinkled_agn_185614.txt')
sprinkled_agn_3 = pd.read_csv('../../data/sprinkled_agn_204392.txt')
sprinkled_agn['mag_norm_185614'] = sprinkled_agn_2['mag_norm']
sprinkled_agn['mag_norm_204392'] = sprinkled_agn_3['mag_norm']
#Check if the mag_norm value stays the same from the first visit to the second for any AGN image
for agn_row in range(len(sprinkled_agn)):
if sprinkled_agn['mag_norm'][agn_row] == sprinkled_agn['mag_norm_185614'][agn_row]:
print agn_row
fig = plt.figure(figsize=(12,12))
for agn_row in range(10):
plt.plot([59580.1, 59825.3, 59857.2], [sprinkled_agn['mag_norm'][agn_row],
sprinkled_agn['mag_norm_185614'][agn_row],
sprinkled_agn['mag_norm_204392'][agn_row]], marker='+')
plt.xlabel('Visit MJD')
plt.ylabel('MagNorm (mags @ 500 nm)')
plt.title('AGN Image MagNorms for the first three r-band visits for 5 doubly-lensed AGN systems')
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we make a query to the datacube to find out what datasets we have.
Step2: Landsat Ecosystem Disturbance Adaptive Processing System (LEDAPS) is a NASA-funded project to map North American forest disturbance since 1975. We have datasets in the same format for Australia.
Step3: For Landsat 5, Band 1-3 are Blue, Green, and Red visible light spectrum bands.
Step4: Now we have created references to the green and blue bands, we can do simple band maths.
Step5: geo_xarray.reproject reprojects northings and eastings to longitude and latitude units.
Step6: The boundaries in long-lat are as follows
|
<ASSISTANT_TASK:>
Python Code:
from pprint import pprint
from datetime import datetime
import xarray as xr
import matplotlib
import matplotlib.image
%matplotlib inline
import datacube
from datacube.api import API, geo_xarray
from datacube.analytics.analytics_engine import AnalyticsEngine
from datacube.execution.execution_engine import ExecutionEngine
from datacube.analytics.utils.analytics_utils import plot
print('This example runs on Data Cube v2/{}.'.format(datacube.__version__))
dc_a = AnalyticsEngine()
dc_e = ExecutionEngine()
dc_api = API()
print(dc_api.list_field_values('product')) # 'LEDAPS' should be in the list
print(dc_api.list_field_values('platform')) # 'LANDSAT_5' should be in the list
query = {
'product': 'LEDAPS',
'platform': 'LANDSAT_5',
}
descriptor = dc_api.get_descriptor(query, include_storage_units=False)
pprint(descriptor)
dimensions = {
'x': {
'range': (140, 141)
},
'y': {
'range': (-35.5, -36.5)
},
'time': {
'range': (datetime(2011, 10, 17), datetime(2011, 10, 18))
}
}
red = dc_a.create_array(('LANDSAT_5', 'LEDAPS'), ['band3'], dimensions, 'red')
green = dc_a.create_array(('LANDSAT_5', 'LEDAPS'), ['band2'], dimensions, 'green')
blue = dc_a.create_array(('LANDSAT_5', 'LEDAPS'), ['band1'], dimensions, 'blue')
blue_result = dc_a.apply_expression([blue], 'array1', 'blue')
dc_e.execute_plan(dc_a.plan)
plot(dc_e.cache['blue'])
turbidity = dc_a.apply_expression([blue, green, red], '(array1 + array2 - array3) / 2', 'turbidity')
dc_e.execute_plan(dc_a.plan)
plot(dc_e.cache['turbidity'])
result = dc_e.cache['turbidity']['array_result']['turbidity']
reprojected = datacube.api.geo_xarray.reproject(result.isel(time=0), 'EPSG:3577', 'WGS84')
pprint(reprojected)
reprojected.plot.imshow()
matplotlib.image.imsave('turbidity.png', reprojected)
map(float, (reprojected.x[0], reprojected.x[-1], reprojected.y[0], reprojected.y[-1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make code compatible with AI Platform Training Service
Step2: Move code into a python package
Step3: Paste existing code into model.py
Step4: Modify code to read data from and write checkpoint files to GCS
Step5: Run trainer module package locally
Step6: Run your training package on Cloud AI Platform
Step7: (Optional) Run your training package using Docker container
Step8: Remark
|
<ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
PROJECT = <YOUR PROJECT>
BUCKET = <YOUR PROJECT>
REGION = <YOUR REGION>
import os
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = "2.1"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
!gsutil ls gs://$BUCKET/taxifare/data
ls ./taxifare/trainer/
%%writefile ./taxifare/trainer/model.py
#TODO 1
import datetime
import logging
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow import feature_column as fc
logging.info(tf.version.VERSION)
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
def load_dataset(pattern, batch_size, num_repeat):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS,
num_epochs=num_repeat,
)
return dataset.map(features_and_labels)
def create_train_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=None)
return dataset.prefetch(1)
def create_eval_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=1)
return dataset.prefetch(1)
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
@tf.function
def fare_thresh(x):
return 60 * activations.relu(x)
def transform(inputs, NUMERIC_COLS, STRING_COLS, nbuckets):
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
lambda x: (x + 78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
lambda x: (x - 37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# Adding Euclidean dist (no need to be accurate: NN will calibrate it)
transformed['euclidean'] = layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed['hourofday'] = layers.Lambda(
lambda x: tf.strings.to_number(
tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = fc.indicator_column(
fc.categorical_column_with_identity(
'hourofday', num_buckets=24))
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
ploc = fc.crossed_column(
[b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column(
[b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
feature_columns['pickup_and_dropoff'] = fc.embedding_column(
pd_pair, 100)
return transformed, feature_columns
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model(nbuckets, nnsize, lr):
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = (
set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
)
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(
inputs, NUMERIC_COLS, STRING_COLS, nbuckets=nbuckets)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
x = dnn_inputs
for layer, nodes in enumerate(nnsize):
x = layers.Dense(nodes, activation='relu', name='h{}'.format(layer))(x)
output = layers.Dense(1, name='fare')(x)
model = models.Model(inputs, output)
lr_optimizer = # TODO: Your code goes here
model.compile( # TODO: Your code goes here
return model
def train_and_evaluate(hparams):
batch_size = # TODO: Your code goes here
nbuckets = # TODO: Your code goes here
lr = # TODO: Your code goes here
nnsize = hparams['nnsize']
eval_data_path = hparams['eval_data_path']
num_evals = hparams['num_evals']
num_examples_to_train_on = hparams['num_examples_to_train_on']
output_dir = hparams['output_dir']
train_data_path = hparams['train_data_path']
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
savedmodel_dir = os.path.join(output_dir, 'export/savedmodel')
model_export_path = os.path.join(savedmodel_dir, timestamp)
checkpoint_path = os.path.join(output_dir, 'checkpoints')
tensorboard_path = os.path.join(output_dir, 'tensorboard')
if tf.io.gfile.exists(output_dir):
tf.io.gfile.rmtree(output_dir)
model = build_dnn_model(nbuckets, nnsize, lr)
logging.info(model.summary())
trainds = create_train_dataset(train_data_path, batch_size)
evalds = create_eval_dataset(eval_data_path, batch_size)
steps_per_epoch = num_examples_to_train_on // (batch_size * num_evals)
checkpoint_cb = callbacks.ModelCheckpoint(
checkpoint_path,
save_weights_only=True,
verbose=1
)
tensorboard_cb = callbacks.TensorBoard(tensorboard_path)
history = model.fit(
trainds,
validation_data=evalds,
epochs=num_evals,
steps_per_epoch=max(1, steps_per_epoch),
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[checkpoint_cb, tensorboard_cb]
)
# Exporting the model with default serving function.
tf.saved_model.save(model, model_export_path)
return history
%%writefile taxifare/trainer/task.py
# TODO 1
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# TODO: Your code goes here
# TODO: Your code goes here
# TODO: Your code goes here
parser.add_argument(
"--eval_data_path",
help="GCS location pattern of eval files",
required=True
)
parser.add_argument(
"--nnsize",
help="Hidden layer sizes (provide space-separated sizes)",
nargs="+",
type=int,
default=[32, 8]
)
parser.add_argument(
"--num_evals",
help="Number of times to evaluate model on eval data training.",
type=int,
default=5
)
parser.add_argument(
"--num_examples_to_train_on",
help="Number of examples to train on.",
type=int,
default=100
)
parser.add_argument(
"--output_dir",
help="GCS location to write checkpoints and export models",
required=True
)
parser.add_argument(
"--train_data_path",
help="GCS location pattern of train files containing eval URLs",
required=True
)
parser.add_argument(
"--job-dir",
help="this model ignores this field, but it is required by gcloud",
default="junk"
)
args = parser.parse_args()
hparams = args.__dict__
hparams.pop("job-dir", None)
model.train_and_evaluate(hparams)
%%bash
EVAL_DATA_PATH=./taxifare/tests/data/taxi-valid*
TRAIN_DATA_PATH=./taxifare/tests/data/taxi-train*
OUTPUT_DIR=./taxifare-model
test ${OUTPUT_DIR} && rm -rf ${OUTPUT_DIR}
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python3 -m trainer.task \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTPUT_DIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size 5 \
--num_examples_to_train_on 100 \
--num_evals 1 \
--nbuckets 10 \
--lr 0.001 \
--nnsize 32 8
%%bash
# TODO 2
# Output directory and jobID
OUTDIR=gs://${BUCKET}/taxifare/trained_model_$(date -u +%y%m%d_%H%M%S)
JOBID=taxifare_$(date -u +%y%m%d_%H%M%S)
echo ${OUTDIR} ${REGION} ${JOBID}
gsutil -m rm -rf ${OUTDIR}
# Model and training hyperparameters
BATCH_SIZE=50
NUM_EXAMPLES_TO_TRAIN_ON=100
NUM_EVALS=100
NBUCKETS=10
LR=0.001
NNSIZE="32 8"
# GCS paths
GCS_PROJECT_PATH=gs://$BUCKET/taxifare
DATA_PATH=$GCS_PROJECT_PATH/data
TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
gcloud ai-platform jobs submit training $JOBID \
--module-name= # TODO: Your code goes here
--package-path= # TODO: Your code goes here
--staging-bucket= # TODO: Your code goes here
--python-version= # TODO: Your code goes here
--runtime-version= # TODO: Your code goes here
--region= # TODO: Your code goes here
-- \
--eval_data_path # TODO: Your code goes here
--output_dir # TODO: Your code goes here
--train_data_path # TODO: Your code goes here
--batch_size # TODO: Your code goes here
--num_examples_to_train_on # TODO: Your code goes here
--num_evals # TODO: Your code goes here
--nbuckets # TODO: Your code goes here
--lr # TODO: Your code goes here
--nnsize # TODO: Your code goes here
%%writefile ./taxifare/Dockerfile
FROM gcr.io/deeplearning-platform-release/tf2-cpu
COPY . /code
RUN apt-get update && \
apt-get install --yes python3-pip && \
pip3 install /code
RUN python3 -m pip install --upgrade --quiet cloudml-hypertune
ENTRYPOINT ["python3", "/code/trainer/task.py"]
!gcloud auth configure-docker
%%bash
PROJECT_DIR=$(cd ./taxifare && pwd)
PROJECT_ID=$(gcloud config list project --format "value(core.project)")
IMAGE_NAME=taxifare_training_container
DOCKERFILE=$PROJECT_DIR/Dockerfile
IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_NAME
docker build $PROJECT_DIR -f $DOCKERFILE -t $IMAGE_URI
docker push $IMAGE_URI
%%bash
PROJECT_ID=$(gcloud config list project --format "value(core.project)")
BUCKET=$PROJECT_ID
REGION="us-central1"
# Output directory and jobID
OUTDIR=gs://${BUCKET}/taxifare/trained_model
JOBID=taxifare_container_$(date -u +%y%m%d_%H%M%S)
echo ${OUTDIR} ${REGION} ${JOBID}
gsutil -m rm -rf ${OUTDIR}
# Model and training hyperparameters
BATCH_SIZE=50
NUM_EXAMPLES_TO_TRAIN_ON=100
NUM_EVALS=100
NBUCKETS=10
NNSIZE="32 8"
# AI-Platform machines to use for training
MACHINE_TYPE=n1-standard-4
SCALE_TIER=CUSTOM
# GCS paths.
GCS_PROJECT_PATH=gs://$BUCKET/taxifare
DATA_PATH=$GCS_PROJECT_PATH/data
TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
IMAGE_NAME=taxifare_training_container
IMAGE_URI=gcr.io/$PROJECT_ID/$IMAGE_NAME
gcloud beta ai-platform jobs submit training $JOBID \
--staging-bucket=gs://$BUCKET \
--region=$REGION \
--master-image-uri=$IMAGE_URI \
--master-machine-type=$MACHINE_TYPE \
--scale-tier=$SCALE_TIER \
-- \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTDIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size $BATCH_SIZE \
--num_examples_to_train_on $NUM_EXAMPLES_TO_TRAIN_ON \
--num_evals $NUM_EVALS \
--nbuckets $NBUCKETS \
--nnsize $NNSIZE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using plotly offline mode
Step2: Reading the final dataset
Step3: List of Seasons [Spring, Summer, Fall(Autum), Winter] with corresponding months
Step4: List with
Step5: Function that return dicitonary data to plot the data point
Step6: Adding Data to figure and setting x-axis, y-axis, title, background of layout
Step7: Plotting the figure
|
<ASSISTANT_TASK:>
Python Code:
import random
import pandas as pd
from plotly.graph_objs import *
from plotly.offline import init_notebook_mode, iplot, plot
init_notebook_mode(connected=True)
dataset = pd.read_csv('finalDataset.csv')
dataset.head(3)
monthList = ['Mar|Apr|May', 'Jun|Jul|Aug', 'Sep|Oct|Nov', 'Dec|Jan|Feb']
finalList = [['Spring', 'Summer', 'Fall(Autumn)', 'Winter'] ,\
[int(dataset[dataset.RELEASED.str.contains(seasonMonths, na = False)]['ADJ. BOX OFFICE'].mean()) \
for seasonMonths in monthList] , \
[len(dataset[dataset.RELEASED.str.contains(seasonMonths, na = False)]) \
for seasonMonths in monthList] , \
[round(dataset[dataset.RELEASED.str.contains(seasonMonths, na = False)]['RATING'].mean(), 2) \
for seasonMonths in monthList]]
def data_(boxoffice, rating, movieCount, name):
return {
'x' : [boxoffice],
'y' : [rating],
'name' : name,
'mode' : 'markers',
'text' : 'Season: ' + name + \
'<br>Mean Rating: ' + str(rating) + \
'<br>Movie Count: ' + str(movieCount) + \
'<br>Mean BoxOffice: ' + str(round(boxoffice/1000000, 2)) + 'M',
'hoverinfo' : 'text',
'marker' : {
'size' : [movieCount],
'sizemode' : 'area',
'line' : {
'width' : 1
}
}
}
figure = {
'data': [data_(finalList[1][i], finalList[3][i], finalList[2][i], finalList[0][i]) for i in range(4)],
'layout' : {}
}
figure['layout']['xaxis'] = {'title' : 'Mean Box Office',
'titlefont': {
'size' : 16, 'family' : 'Droid Sans'
},
'showline' : True,
'ticks' : 'outside',
'tickwidth' : 2,
'gridcolor' : '#FFFFFF'}
figure['layout']['yaxis'] = {'title' : 'Mean Rating',
'titlefont' : {
'size' : 16 , 'family' : 'Droid Sans'
},
'showline' : True,
'ticks' : 'outside',
'tickwidth' : 2,
'gridcolor' : '#FFFFFF'}
figure['layout']['title'] = 'Season Movie Analysis'
figure['layout']['titlefont'] = {'size' : 20, 'family' : 'Times New Roman'}
figure['layout']['plot_bgcolor'] = 'rgb(223, 232, 243)'
# iplot(figure)
plot(figure)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Channel library setup
Step2: Pipeline setup
Step3:
Step4: Initialize software demodulation parameters. If these are not properly configured than the Channelizer filter will report 'insufficient decimation' or other errors. The integration boxcar parameters are then defined.
Step5: Taking Fake Data
Step6: Plotting the Results in the Notebook
Step7: Loading the Data Files and Plot Manually
Step8: Plotting and the Plot Server & Client
Step9: Everything is in sync with the database. Now we modify some property
Step10: We see that things have changed that haven't been committed to the database. This can be rectified with another commit, or optionally a rollback!
|
<ASSISTANT_TASK:>
Python Code:
import auspex.config as config
config.auspex_dummy_mode = True
from QGL import *
from auspex.qubit import *
import matplotlib.pyplot as plt
%matplotlib inline
cl = ChannelLibrary(":memory:")
pl = PipelineManager()
q1 = cl.new_qubit("q1")
aps2_1 = cl.new_APS2("BBNAPSa", address="192.168.2.4", trigger_interval=200e-6)
aps2_2 = cl.new_APS2("BBNAPSb", address="192.168.2.2")
dig_1 = cl.new_X6("Dig_1", address="1", sampling_rate=500e6, record_length=1024)
h1 = cl.new_source("Holz_1", "HolzworthHS9000", "HS9004A-009-1", reference='10MHz', power=-30)
h2 = cl.new_source("Holz_2", "HolzworthHS9000", "HS9004A-009-2", reference='10MHz', power=-30)
cl.set_measure(q1, aps2_1, dig_1.ch(1), trig_channel=aps2_1.ch("m2"), gate=False, generator=h1)
cl.set_control(q1, aps2_2, generator=h2)
cl.set_master(aps2_1, aps2_1.ch("m1"))
cl["q1"].measure_chan.frequency = 0e6
cl["q1"].measure_chan.autodyne_freq = 10e6
pl.create_default_pipeline()
pl["q1"].stream_type = "raw"
pl.recreate_pipeline(buffers=False)
pl.show_pipeline()
pl["q1"]["Demodulate"]["Integrate"]["Average"].add(Display(label="Plot Average", plot_dims=1), connector_out="partial_average")
pl["q1"]["Demodulate"]["Integrate"].add(Display(label="Plot Integrate", plot_dims=1))
pl.show_pipeline()
demod = pl["q1"]["Demodulate"]
demod.frequency = cl["q1"].measure_chan.frequency
demod.decimation_factor = 16
integ = pl["q1"]["Demodulate"]["Integrate"]
integ.box_car_start = 0.2e-6
integ.box_car_stop= 1.9e-6
amps = np.linspace(-1,1,51)
exp = QubitExperiment(RabiAmp(q1,amps),averages=50)
exp.set_fake_data(dig_1, np.cos(np.linspace(0, 2*np.pi,51)))
exp.run_sweeps()
exp.get_final_plots()
data, desc = exp.outputs_by_qubit["q1"][0].get_data()
plt.plot(desc["amplitude"], np.abs(data))
plt.xlabel("Amplitude"); plt.ylabel("Data");
cl.session.commit()
cl.session.dirty
aps2_1.ch(1).amp_factor = 0.95
cl.session.dirty
cl.session.rollback()
aps2_1.ch(1).amp_factor
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure Display Parameters
Step2: Use Python Logging facilities
Step5: Define Functions
Step6: Define Notebook Parameters
Step7: Load Data
Step8: Sanity Check Data
Step9: Preliminary Data Wrangling
Step10: Data Analysis and Visualization
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
# Standard libraries
import logging
import os
import pathlib
import sys
# 3rd party libraries
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
import sqlalchemy as sa
# Local libraries
import pudl
sns.set()
%matplotlib inline
mpl.rcParams['figure.figsize'] = (10,4)
mpl.rcParams['figure.dpi'] = 150
pd.options.display.max_columns = 100
pd.options.display.max_rows = 100
logger=logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.handlers = [handler]
def mcoe_by_fuel(mcoe_df, fuel_type=None):
Select only MCOE records pertaining to a particular fuel type.
Args:
mcoe_df (pandas.DataFrame): A PUDL MCOE dataframe.
fuel_type (str or None): A string indicating what value of
fuel_type_code_pudl should be selected from the input
dataframe. If None, all fuels are retained.
Returns:
pandas.DataFrame: A dataframe containing MCOE records for only a
single PUDL fuel type code.
out_df = mcoe_df
if fuel_type is not None:
out_df = mcoe_df[mcoe_df.fuel_type_code_pudl==fuel_type]
return out_df
def finite_distplot(df, data_col, wt_col=None, nbins=100, max_val=np.infty):
Plot weighted distribution of values less than a maximum value.
Args:
df (pandas.DataFrame): The dataframe containing the data and
weights to plot.
data_col (str): Label of the column containing the data.
wt_col (str or None): Label of the column to use to weight the data.
If None (the default) data is not weighted.
nbins (int): Number of histogram bins to use.
max_val (float): Maximum data value to allow in data visualized.
Returns:
None
df = df[df[data_col] < max_val]
weights = None
if wt_col is not None:
weights = df[wt_col]
_ = sns.distplot(df[data_col], bins=nbins, hist_kws={"weights": weights})
pudl_settings = pudl.workspace.setup.get_defaults()
display(pudl_settings)
ferc1_engine = sa.create_engine(pudl_settings['ferc1_db'])
display(ferc1_engine)
pudl_engine = sa.create_engine(pudl_settings['pudl_db'])
display(pudl_engine)
# What granularity should we aggregate MCOE data to?
mcoe_freq = "AS" # Annual
# What date range are we interested in here?
mcoe_start_date = "2015-01-01"
mcoe_end_date = "2018-12-31"
my_new_data_url = "https://mynewdata.website.gov/path/to/new/data.csv"
my_new_datadir = pathlib.Path(pudl_settings["data_dir"]) / "new_data_source"
# Store API keys and other secrets in environment variables
# and read them in here, if needed:
# EPA_API_KEY = os.environ["EIA_API_KEY"]
# BLS_API_KEY = os.environ["BLS_API_KEY"]
%%time
pudl_out = pudl.output.pudltabl.PudlTabl(
freq=mcoe_freq,
start_date=mcoe_start_date,
end_date=mcoe_end_date,
pudl_engine=pudl_engine,
)
mcoe_df = pudl_out.mcoe()
assert mcoe_df.capacity_factor.min() >= 0.0
assert mcoe_df.capacity_factor.max() <= 1.5
mean_hr = mcoe_df[np.isfinite(mcoe_df.heat_rate_mmbtu_mwh)].heat_rate_mmbtu_mwh.mean()
assert mean_hr > 5
assert mean_hr < 20
mcoe_coal = mcoe_by_fuel(mcoe_df, fuel_type="coal")
mcoe_gas = mcoe_by_fuel(mcoe_df, fuel_type="gas")
coal_ax = finite_distplot(mcoe_coal, "heat_rate_mmbtu_mwh", max_val=20)
plt.title("Coal heat rate distribution");
gas_ax = finite_distplot(mcoe_gas, "heat_rate_mmbtu_mwh", max_val=20)
plt.title("Gas heat rate distribution");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ๅๅฝๆฐๅฏ่ฟๅ1 - 1/3 + 1/5 - 1/7...็ๅn้กน็ๅใๅจไธป็จๅบไธญ๏ผๅๅซไปคn=1000ๅ100000๏ผๆๅฐ4ๅ่ฏฅๅฝๆฐ็ๅใ
Step2: ๅฐtask3ไธญ็็ปไน 1ๅ็ปไน 4ๆนๅไธบๅฝๆฐ๏ผๅนถ่ฟ่ก่ฐ็จใ
Step3: ๆๆๆง็ปไน ๏ผๅ็จๅบ๏ผๅฏไปฅๆฑไปๆดๆฐmๅฐๆดๆฐn็ดฏๅ ็ๅ๏ผ้ด้ไธบk๏ผๆฑๅ้จๅ้็จๅฝๆฐๅฎ็ฐ๏ผไธป็จๅบไธญ็ฑ็จๆท่พๅ
ฅm๏ผn๏ผk่ฐ็จๅฝๆฐ้ช่ฏๆญฃ็กฎๆงใ
|
<ASSISTANT_TASK:>
Python Code:
def compute_sum(n):
i=0
sum=0
while i<n:
i=i+1
sum+=i
return sum
m=int(input('plz input m: '))
n=int(input('plz input n: '))
k=int(input('plz input k: '))
print(compute_sum(m) + compute_sum(n) + compute_sum(k))
def compute_sum(n):
i=0
total=0
while i<n:
i+=1
if i%2==0:
total-=1/(2*i-1)
else:
total+=1/(2*i-1)
return total
print(compute_sum(1000))
print('*4: ',4*compute_sum(1000))
print(compute_sum(10000))
print('*4: ',4*compute_sum(10000))
def Constellation(n,m,d):
if (m>=3 and d>21) or (m<=4 and d<19):
return (n,'ไฝ ๆฏ็ฝ็พๅบง')
elif (m>=4 and d>20) or (m<=5 and d<20):
return (n,'ไฝ ๆฏ้็ๅบง')
elif (m>=5 and d>21) or (m<=6 and d<21):
return (n,'ไฝ ๆฏๅๅญๅบง')
elif (m>=6 and d>22) or (m<=7 and d<22):
return (n,'ไฝ ๆฏๅทจ่นๅบง')
elif (m>=7 and d>23) or (m<=8 and d<22):
return (n,'ไฝ ๆฏ็ฎๅญๅบง')
elif (m>=8 and d>23) or (m<=9 and d<22):
return (n,'ไฝ ๆฏๅคๅฅณๅบง')
elif (m>=9 and d>23) or (m<=10 and d<23):
return (n,'ไฝ ๆฏๅคฉ็งคๅบง')
elif (m>=10 and d>24) or (m<=11 and d<23):
return (n,'ไฝ ๆฏๅคฉ่ๅบง')
elif (m>=11 and d>23) or (m<=12 and d<21):
return (n,'ไฝ ๆฏๅฐๆๅบง')
elif (m>=12 and d>22) or (m<=1 and d<19):
return (n,'ไฝ ๆฏๆฉ็พฏๅบง')
elif (m>=1 and d>20) or (m<=2 and d<18):
return (n,'ไฝ ๆฏๆฐด็ถๅบง')
elif (m>=2 and d>19) or (m<=3 and d<20):
return (n,'ไฝ ๆฏๅ้ฑผๅบง')
n=str(input('plz input name:'))
m=int(input('plz input birth_mon: '))
k=int(input('plz input birth_day: '))
print(Constellation(n,m,k))
def Plurality(word):
if ( word.endswith('ch') or word.endswith('sh') or word.endswith('s') or word.endswith('x') ):
print(word,'es',sep='')
else:
print(word,'s',sep='')
w=str(input('plz input a word'))
Plurality(w)
def count_sum(m,n,k):
i=0
total=0
while i+k<n:
i=i+k
total+=i+k
return total
m=int(input('plz input m:'))
n=int(input('plz input n:'))
k=int(input('plz input k:'))
print(count_sum(m,n,k))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Load the data
Step2: Replace the argument below with the unique ID of the dataset that you've chosen in the web UI.
Step3: 3. Examine the data
Step4: 4. Graph the data
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import urllib2
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
plt.rcParams['figure.figsize'] = (10.0, 8.0)
def load_data(unique_id):
data = pd.read_csv(urllib2.urlopen("http://opennex/dataset/%s/data.csv" % (unique_id)))
for col in ['Model', 'Scenario', 'Variable']:
data[col] = data[col].astype('category')
data['Date'] = data['Date'].astype('datetime64')
data['Temperature'] = data['Value'] - 273.15
return data
data = load_data("Ky3KN")
data.shape
data.apply(lambda x: [x.unique()])
colors = {'historical':'black', 'rcp45':'green', 'rcp85':'red'}
def do_graph(df):
model = df.loc[1,'Model']
df['Month'] = df['Date'].map(lambda d: "%d-%02d-01" % (d.year, d.month)).astype('datetime64')
by_month = df.groupby(['Month', 'Scenario']).aggregate(np.mean).reset_index()
by_month['Year'] = by_month['Month'].map(lambda d: "%d-01-01" % (d.year)).astype('datetime64')
by_year = by_month.groupby(['Year', 'Scenario']).aggregate(max).loc[:,['Temperature']]
groups = by_year.reset_index().set_index('Year').groupby('Scenario')
for key, grp in groups:
plt.plot(grp.index, grp['Temperature'], color=colors[key], label=key)
plt.legend(loc='best')
plt.title("Maximum mean temperature for warmest month using model %s" % (model))
plt.xlabel("Year")
plt.ylabel("Temperature (Celsius)")
plt.show()
do_graph(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Show result
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif'
surfaces = mne.read_bem_surfaces(fname, patch_stats=True)
print("Number of surfaces : %d" % len(surfaces))
head_col = (0.95, 0.83, 0.83) # light pink
skull_col = (0.91, 0.89, 0.67)
brain_col = (0.67, 0.89, 0.91) # light blue
colors = [head_col, skull_col, brain_col]
# 3D source space
from mayavi import mlab # noqa
mlab.figure(size=(600, 600), bgcolor=(0, 0, 0))
for c, surf in zip(colors, surfaces):
points = surf['rr']
faces = surf['tris']
mlab.triangular_mesh(points[:, 0], points[:, 1], points[:, 2], faces,
color=c, opacity=0.3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Adiabatic batch reactor
Step2: Data Files
Step3: Reactor State Comparison
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 500
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
ls adiab/*.out
def read_file(fname):
with open(fname) as fp:
lines = fp.readlines()
for line in lines:
print(line)
read_file("adiab/kf.out")
ls adiab/*.csv
df = pd.read_csv(os.path.join('adiab', 'gas_mole_tr.csv'))
df.columns = df.columns.str.strip()
df["t_ms"] = df["t(s)"]*1e3
plt.clf()
ax1 = plt.subplot(1, 1, 1)
ax1.plot('t_ms', 'H', data=df, marker='^', markersize=0.5, label="H mole frac")
ax1.plot('t_ms', 'OH', data=df, marker='v', markersize=0.5, label="OH mole frac")
ax1.plot('t_ms', 'H2O', data=df, marker='*', markersize=0.5, label="H2O mole frac")
ax1.plot('t_ms', 'H2', data=df, marker='o', markersize=0.5, label="H2 mole frac")
ax1.plot('t_ms', 'O2', data=df, marker='<', markersize=0.5, label="O2 mole frac")
ax1.set_xlabel('Time (ms)')
ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax1.set_ylabel('Mass Fraction')
#ax1.set_xlim([0,1])
ax1.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('GRI30_Hdetonation_adiab_mole.png', dpi=500)
plt.clf()
ax1 = plt.subplot(1, 1, 1)
ax1.plot('t_ms', 'H', data=df, marker='^', markersize=0.5, label="H mole frac")
ax1.plot('t_ms', 'OH', data=df, marker='v', markersize=0.5, label="OH mole frac")
ax1.plot('t_ms', 'H2O', data=df, marker='*', markersize=0.5, label="H2O mole frac")
ax1.plot('t_ms', 'H2', data=df, marker='o', markersize=0.5, label="H2 mole frac")
ax1.plot('t_ms', 'O2', data=df, marker='<', markersize=0.5, label="O2 mole frac")
ax1.set_xlabel('Time (ms)')
ax1.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax1.set_ylabel('Mass Fraction')
#ax1.set_xlim([0,1])
ax1.legend(loc="upper left", bbox_to_anchor=(1,1))
ax1.set_xlim(0.25,0.4)
plt.tight_layout()
plt.savefig('GRI30_Hdetonation_adiab_mole_zoomed.png', dpi=500)
df_isothermal = pd.read_csv(os.path.join('isother', 'gas_mole_tr.csv'))
df_isothermal.columns = df_isothermal.columns.str.strip()
df_isothermal["t_ms"] = df_isothermal["t(s)"]*1e3
plt.clf()
ax = plt.subplot(1, 1, 1)
ax.plot('t_ms', 'H', data=df, marker='^', markersize=0.5, label="H mole frac")
ax.plot('t_ms', 'OH', data=df, marker='v', markersize=0.5, label="OH mole frac")
ax.plot('t_ms', 'H2O', data=df, marker='*', markersize=0.5, label="H2O mole frac")
ax.plot('t_ms', 'H2', data=df, marker='o', markersize=0.5, label="H2 mole frac")
ax.plot('t_ms', 'O2', data=df, marker='<', markersize=0.5, label="O2 mole frac")
ax.set_xlabel('Time (ms)')
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax.set_ylabel('Mass Fraction')
ax.set_xlim(0.25,0.4)
#ax1.set_xlim([0,1])
ax.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('GRI30_H-detonation_isother_mole.png', dpi=500)
adiab_state_df = pd.read_csv(os.path.join('adiab','rctr_state_tr.csv'))
isotherm_state_df = pd.read_csv(os.path.join('isother','rctr_state_tr.csv'))
adiab_state_df.columns = adiab_state_df.columns.str.strip()
isotherm_state_df.columns = isotherm_state_df.columns.str.strip()
isotherm_state_df["t_ms"] = isotherm_state_df["t(s)"]*1e3
adiab_state_df["t_ms"] = adiab_state_df["t(s)"]*1e3
plt.clf()
ax_comp = plt.subplot(1, 1, 1)
ax_comp.plot('t_ms', 'Temperature(K)', data=isotherm_state_df, marker='^', markersize=0.5, label="Isothermal T")
ax_comp.plot('t_ms', 'Temperature(K)', data=adiab_state_df, marker='v', markersize=0.5, label="Adiabatic T")
ax_comp.set_xlabel('Time (ms)')
ax_comp.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax_comp.set_ylabel('Temp (K)')
ax_comp.set_ylim([500,3000])
ax_comp.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('GRI30_H-detonation_T-comp.png', dpi=500)
plt.clf()
ax_comp = plt.subplot(1, 1, 1)
ax_comp.plot('t_ms', 'Pressure(Pa)', data=isotherm_state_df, marker='^', markersize=0.5, label="Isothermal T")
ax_comp.plot('t_ms', 'Pressure(Pa)', data=adiab_state_df, marker='v', markersize=0.5, label="Adiabatic T")
ax_comp.set_xlabel('Time (ms)')
ax_comp.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax_comp.set_ylabel('Press (Pa)')
#ax_comp.set_ylim([500,3000])
ax_comp.legend(loc="upper left", bbox_to_anchor=(1,1))
plt.tight_layout()
plt.savefig('GRI30_H-detonation_P-comp.png', dpi=500)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 18.1 Definition of B-trees
Step2: the number $n$ of keys statisfies the inequality
Step3: Creating an empty B-tree
Step4: Inserting a key into a B-tree
Step5: If $T$ is full tree, then create a new root node, and split them. (Height increase 1).
Step6: (c)
Step7: 18.3 Deleting a key from a B-tree
|
<ASSISTANT_TASK:>
Python Code:
plt.imshow(plt.imread('./res/fig18_3.png'))
plt.figure(figsize=(15,10))
plt.imshow(plt.imread('./res/fig18_4.png'))
#todo: code
#todo: code
plt.imshow(plt.imread('./res/fig18_5.png'))
plt.imshow(plt.imread('./res/fig18_6.png'))
#todo: exercises
plt.figure(figsize=(10,20))
plt.subplot(2,1,1)
plt.imshow(plt.imread('./res/fig18_8.png'))
plt.subplot(2,1,2)
plt.imshow(plt.imread('./res/fig18_9.png'))
#todo: exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Animations
Step2: Clean
|
<ASSISTANT_TASK:>
Python Code:
import ipyvolume
import numpy as np
ds = ipyvolume.datasets.aquariusA2.fetch()
ipyvolume.quickvolshow(ds.data, lighting=True)
stream = ipyvolume.datasets.animated_stream.fetch()
fig = ipyvolume.figure()
q = ipyvolume.quiver(*stream.data[:,0:50,:200], color="red", size=7)
ipyvolume.animation_control(q, interval=200)
ipyvolume.show()
u = np.linspace(-10, 10, 50)
x, y = np.meshgrid(u, u)
r = np.sqrt(x**2+y**2)
x = x.flatten()
y = y.flatten()
r = r.flatten()
time = np.linspace(0, np.pi*2, 50)
z = np.array([(np.cos(r + t) * np.exp(-r/5)) for t in time])
color = np.array([[np.cos(r + t), 1-np.abs(z[i]), 0.1+z[i]*0] for i, t in enumerate(time)])
size = (z+1)
color = np.transpose(color, (0, 2, 1))
ipyvolume.figure()
s = ipyvolume.scatter(x, z, y, color=color, marker="sphere")
ipyvolume.animation_control(s, interval=200)
ipyvolume.ylim(-3,3)
ipyvolume.show()
s.geo = "diamond"
s.size = 5
s.color = 1 - s.color
from ipywidgets import Widget
Widget.close_all()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. COUNTING BOBS
Step2: 3. Counting and Grouping
Step3: Problem Set 02
Step4: 2. PAYING DEBT OFF IN A YEAR
Step5: 3. USING BISECTION SEARCH TO MAKE THE PROGRAM FASTER
|
<ASSISTANT_TASK:>
Python Code:
s= 'wordsmith'
vowels = {'a','e','i','o','u'}
count = 0
for char in s:
if char in vowels:
count+=1
print "Number of vowels: " + str(count)
s = 'azcbobobegghakl'
pattern = 'bob'
count =0
for position in range(0,len(s)):
if s[position:position+3]==pattern:
count+=1
print count
def item_order(order):
dishes = {'salad':0,'hamburger':0,'water':0}
for dish_ordered in order.split(' '):
if dish_ordered in dishes.keys():
dishes[dish_ordered] +=1
string = 'salad:' + str(dishes['salad']) + ' hamburger:' + str(dishes['hamburger']) + ' water:' + str(dishes['water'])
return string
order = "hamburger water hamburger"
item_order(order)
balance = 5000
months=12
payment_min = balance * monthlyPaymentRate
balance_remaining = balance - payment_min
interest = balance_remaining * (annualInterestRate/12.0)
def remainingBalance(balance,annualInterestRate,monthlyPaymentRate,months=12):
balance_remaining =balance
payment_min = 0
for month in range(0,months):
payment_min = balance_remaining * monthlyPaymentRate
balance_remaining = balance_remaining - payment_min
interest = balance_remaining * ((annualInterestRate)/12.0)
balance_remaining += interest
#print (payment_min,balance_remaining,interest)
return round(balance_remaining,2)
remainingBalance(balance=5000,annualInterestRate=0.18,monthlyPaymentRate=0.02,months=12)
balance = 5000
months=12
payment_min = balance * monthlyPaymentRate
balance_remaining = balance - payment_min
interest = balance_remaining * (annualInterestRate/12.0)
def remainingBalance(balance,annualInterestRate,monthlyPaymentRate,months):
balance_remaining =balance
payment_min = 0
for month in range(0,months):
payment_min = monthlyPaymentRate
balance_remaining = balance_remaining - payment_min
interest = balance_remaining * ((annualInterestRate)/12.0)
balance_remaining += interest
return round(balance_remaining,2)
def payOffDebt(balance,annualInterestRate,monthlyPaymentRate=0.0,months=12):
monthlyPaymentRate+=10
balance_remaining = remainingBalance(balance,annualInterestRate,monthlyPaymentRate,months)
rate = monthlyPaymentRate
if balance_remaining<=0:
print 'Lowest Payment: ' + str(rate)
else:
#print balance_remaining
payOffDebt(balance,annualInterestRate,monthlyPaymentRate,months)
payOffDebt(3329,0.2)
payOffDebt(4773,0.2)
payOffDebt(3926,0.2)
range(12)
def payOffDebtBisection(balance,annualInterestRate,months=12):
balance_remaining = balance
monthlyInterestRate = annualInterestRate/12.0
lower_limit = balance/12.0
upper_limit = balance * ((1+monthlyInterestRate)**12)/12.0
tolerance =0.01
while abs(balance_remaining) > tolerance:
balance_remaining = balance
payment = ( lower_limit + upper_limit )/2
for month in range(12):
balance_remaining = balance_remaining-payment
balance_remaining = balance_remaining*(1+monthlyInterestRate)
if balance_remaining<=0:
upper_limit = payment
else:
lower_limit = payment
print 'Lowest Payment: ' + str(round(payment, 2))
payOffDebtBisection(320000,0.2)
payOffDebtBisection(999999,0.18)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Split data into training and testing.
Step3: Learning a multiple regression model
Step4: Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows
Step5: Making Predictions
Step6: Compute RSS
Step7: Test your function by computing the RSS on TEST data for the example model
Step8: Create some new features
Step9: Next create the following 4 new features as column in both TEST and TRAIN data
Step10: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.
Step11: Learning Multiple Models
Step12: Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients
Step13: Quiz Question
Step14: Quiz Question
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
train_data,test_data = sales.random_split(.8,seed=0)
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
predictions = model.predict(data)
# Then compute the residuals/errors
residuals = predictions - outcome
# Then square and add them up
RSS = (residuals * residuals).sum()
return(RSS)
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
from math import log
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
train_data['bed_bath_rooms'] = train_data['bedrooms'] * train_data['bathrooms']
test_data['bed_bath_rooms'] = test_data['bedrooms'] * test_data['bathrooms']
train_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x: log(x))
test_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x: log(x))
train_data['lat_plus_long'] = train_data['lat'] + train_data['long']
test_data['lat_plus_long'] = test_data['lat'] + test_data['long']
test_data['bedrooms_squared'].mean()
test_data['bed_bath_rooms'].mean()
test_data['log_sqft_living'].mean()
test_data['lat_plus_long'].mean()
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
# Learn the three models: (don't forget to set validation_set = None)
model_1 = graphlab.linear_regression.create(train_data, target='price', features=model_1_features, validation_set=None)
model_2 = graphlab.linear_regression.create(train_data, target='price', features=model_2_features, validation_set=None)
model_3 = graphlab.linear_regression.create(train_data, target='price', features=model_3_features, validation_set=None)
# Examine/extract each model's coefficients:
print model_1.get('coefficients')
print model_2.get('coefficients')
# Compute the RSS on TRAINING data for each of the three models and record the values:
print get_residual_sum_of_squares(model_1, train_data, train_data['price'])
print get_residual_sum_of_squares(model_2, train_data, train_data['price'])
print get_residual_sum_of_squares(model_3, train_data, train_data['price'])
# Compute the RSS on TESTING data for each of the three models and record the values:
print get_residual_sum_of_squares(model_1, test_data, test_data['price'])
print get_residual_sum_of_squares(model_2, test_data, test_data['price'])
print get_residual_sum_of_squares(model_3, test_data, test_data['price'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we will make a default NormalFault.
Step2: This fault has a strike of NE and dips to the SE. Thus the uplifted nodes (shown in yellow) are in the NW half of the domain.
Step3: We can add functionality to the NormalFault with other keyword arguments. We can change the fault strike and dip, as well as specify a time series of fault uplift through time.
Step4: By reversing the order of (x1, y1) and (x2, y2) we can reverse the location of the upthrown nodes (all else equal).
Step5: We can also specify complex time-rock uplift rate histories, but we'll explore that later in the tutorial.
Step6: As we can see, the upper left portion of the grid has been uplifted an a stream network has developed over the whole domain.
Step7: We can see that when the boundary nodes are not included, the faulted region is impacted by the edge boundary conditions differently. Depending on your application, one or the other of these boundary condition options may suite your problem better.
Step8: The default value for uplift rate is 0.001 (units unspecified as it will depend on the x and t units in a model, but in this example we assume time units of years and length units of meters).
Step9: A technical note
Step10: As you can see the resulting topography is very different than in the case with continuous uplift.
Step11: Next we create the grid and run the model.
Step12: We can also examine the soil thickness and soil production rate. Here in the soil depth, we see it is highest along the ridge crests.
Step13: The soil production rate is highest where the soil depth is low, as we would expect given the exponential form.
|
<ASSISTANT_TASK:>
Python Code:
# start by importing necessary modules
import matplotlib.pyplot as plt
import numpy as np
from landlab import HexModelGrid, RasterModelGrid
from landlab.components import (
FastscapeEroder,
FlowAccumulator,
NormalFault,
StreamPowerEroder,
)
from landlab.plot import imshow_grid
%matplotlib inline
grid = RasterModelGrid((6, 6), xy_spacing=10)
grid.add_zeros("topographic__elevation", at="node")
nf = NormalFault(grid)
plt.figure()
imshow_grid(grid, nf.faulted_nodes.astype(int), cmap="viridis")
plt.plot(grid.x_of_node, grid.y_of_node, "c.")
plt.show()
nf = NormalFault(grid, include_boundaries=True)
plt.figure()
imshow_grid(grid, nf.faulted_nodes.astype(int), cmap="viridis")
plt.plot(grid.x_of_node, grid.y_of_node, "c.")
plt.show()
grid = RasterModelGrid((60, 100), xy_spacing=10)
z = grid.add_zeros("topographic__elevation", at="node")
nf = NormalFault(grid, fault_trace={"x1": 0, "y1": 200, "y2": 30, "x2": 600})
imshow_grid(grid, nf.faulted_nodes.astype(int), cmap="viridis")
grid = RasterModelGrid((60, 100), xy_spacing=10)
z = grid.add_zeros("topographic__elevation", at="node")
nf = NormalFault(grid, fault_trace={"y1": 30, "x1": 600, "x2": 0, "y2": 200})
imshow_grid(grid, nf.faulted_nodes.astype(int), cmap="viridis")
# here are the parameters to change
K = 0.0005 # stream power coefficient, bigger = streams erode more quickly
U = 0.0001 # uplift rate in meters per year
dt = 1000 # time step in years
dx = 10 # space step in meters
nr = 60 # number of model rows
nc = 100 # number of model columns
# instantiate the grid
grid = HexModelGrid((nr, nc), dx, node_layout="rect")
# add a topographic__elevation field with noise
z = grid.add_zeros("topographic__elevation", at="node")
z[grid.core_nodes] += 100.0 + np.random.randn(grid.core_nodes.size)
fr = FlowAccumulator(grid)
fs = FastscapeEroder(grid, K_sp=K)
nf = NormalFault(grid, fault_trace={"x1": 0, "x2": 800, "y1": 0, "y2": 500})
# Run this model for 300 100-year timesteps (30,000 years).
for i in range(300):
nf.run_one_step(dt)
fr.run_one_step()
fs.run_one_step(dt)
z[grid.core_nodes] += 0.0001 * dt
# plot the final topography
imshow_grid(grid, z)
# instantiate the grid
grid = HexModelGrid((nr, nc), 10, node_layout="rect")
# add a topographic__elevation field with noise
z = grid.add_zeros("topographic__elevation", at="node")
z[grid.core_nodes] += 100.0 + np.random.randn(grid.core_nodes.size)
fr = FlowAccumulator(grid)
fs = FastscapeEroder(grid, K_sp=K)
nf = NormalFault(
grid, fault_trace={"x1": 0, "x2": 800, "y1": 0, "y2": 500}, include_boundaries=True
)
# Run this model for 300 100-year timesteps (30,000 years).
for i in range(300):
nf.run_one_step(dt)
fr.run_one_step()
fs.run_one_step(dt)
z[grid.core_nodes] += U * dt
# plot the final topography
imshow_grid(grid, z)
time = (
np.array(
[
0.0,
7.99,
8.00,
8.99,
9.0,
17.99,
18.0,
18.99,
19.0,
27.99,
28.00,
28.99,
29.0,
]
)
* 10
* dt
)
rate = np.array([0, 0, 0.01, 0.01, 0, 0, 0.01, 0.01, 0, 0, 0.01, 0.01, 0])
plt.figure()
plt.plot(time, rate)
plt.plot([0, 300 * dt], [0.001, 0.001])
plt.xlabel("Time [years]")
plt.ylabel("Fault Throw Rate [m/yr]")
plt.show()
t = np.arange(0, 300 * dt, dt)
rate_constant = np.interp(t, [0, 300 * dt], [0.001, 0.001])
rate_variable = np.interp(t, time, rate)
cumulative_rock_uplift_constant = np.cumsum(rate_constant) * dt
cumulative_rock_uplift_variable = np.cumsum(rate_variable) * dt
plt.figure()
plt.plot(t, cumulative_rock_uplift_constant)
plt.plot(t, cumulative_rock_uplift_variable)
plt.xlabel("Time [years]")
plt.ylabel("Cumulative Fault Throw [m]")
plt.show()
# instantiate the grid
grid = HexModelGrid((nr, nc), 10, node_layout="rect")
# add a topographic__elevation field with noise
z = grid.add_zeros("topographic__elevation", at="node")
z[grid.core_nodes] += 100.0 + np.random.randn(grid.core_nodes.size)
fr = FlowAccumulator(grid)
fs = FastscapeEroder(grid, K_sp=K)
nf = NormalFault(
grid,
fault_throw_rate_through_time={"time": time, "rate": rate},
fault_trace={"x1": 0, "x2": 800, "y1": 0, "y2": 500},
include_boundaries=True,
)
# Run this model for 300 100-year timesteps (30,000 years).
for i in range(300):
nf.run_one_step(dt)
fr.run_one_step()
fs.run_one_step(dt)
z[grid.core_nodes] += U * dt
# plot the final topography
imshow_grid(grid, z)
from landlab.components import DepthDependentDiffuser, ExponentialWeatherer
# here are the parameters to change
K = 0.0005 # stream power coefficient, bigger = streams erode more quickly
U = 0.0001 # uplift rate in meters per year
max_soil_production_rate = (
0.001
) # Maximum weathering rate for bare bedrock in meters per year
soil_production_decay_depth = 0.7 # Characteristic weathering depth in meters
linear_diffusivity = 0.01 # Hillslope diffusivity and m2 per years
soil_transport_decay_depth = 0.5 # Characteristic soil transport depth in meters
dt = 100 # time step in years
dx = 10 # space step in meters
nr = 60 # number of model rows
nc = 100 # number of model columns
?ExponentialWeatherer
# instantiate the grid
grid = HexModelGrid((nr, nc), 10, node_layout="rect")
# add a topographic__elevation field with noise
z = grid.add_zeros("topographic__elevation", at="node")
z[grid.core_nodes] += 100.0 + np.random.randn(grid.core_nodes.size)
# create a field for soil depth
d = grid.add_zeros("soil__depth", at="node")
# create a bedrock elevation field
b = grid.add_zeros("bedrock__elevation", at="node")
b[:] = z - d
fr = FlowAccumulator(grid, depression_finder="DepressionFinderAndRouter", routing="D4")
fs = FastscapeEroder(grid, K_sp=K)
ew = ExponentialWeatherer(
grid,
soil_production__decay_depth=soil_production_decay_depth,
soil_production__maximum_rate=max_soil_production_rate,
)
dd = DepthDependentDiffuser(
grid,
linear_diffusivity=linear_diffusivity,
soil_transport_decay_depth=soil_transport_decay_depth,
)
nf = NormalFault(
grid,
fault_throw_rate_through_time={"time": [0, 30], "rate": [0.001, 0.001]},
fault_trace={"x1": 0, "x2": 800, "y1": 0, "y2": 500},
include_boundaries=False,
)
# Run this model for 300 100-year timesteps (30,000 years).
for i in range(300):
# Move normal fault
nf.run_one_step(dt)
# Route flow
fr.run_one_step()
# Erode with water
fs.run_one_step(dt)
# We must also now erode the bedrock where relevant. If water erosion
# into bedrock has occurred, the bedrock elevation will be higher than
# the actual elevation, so we simply re-set bedrock elevation to the
# lower of itself or the current elevation.
b = grid.at_node["bedrock__elevation"]
b[:] = np.minimum(b, grid.at_node["topographic__elevation"])
# Calculate regolith-production rate
ew.calc_soil_prod_rate()
# Generate and move soil around. This component will update both the
# soil thickness and topographic elevation fields.
dd.run_one_step(dt)
# uplift the whole domain, we need to do this to both bedrock and topography
z[grid.core_nodes] += U * dt
b[grid.core_nodes] += U * dt
# plot the final topography
imshow_grid(grid, "topographic__elevation")
# and the soil depth
imshow_grid(grid, "soil__depth", cmap="viridis")
# and the soil production rate
imshow_grid(grid, "soil_production__rate", cmap="viridis")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'sandbox-1', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ๊ทธ๋ํ ๋ฐ tf.function ์๊ฐ
Step2: ๊ทธ๋ํ ์ด์ฉํ๊ธฐ
Step3: ๊ฒ๋ณด๊ธฐ์ Function์ TensorFlow ์ฐ์ฐ์ ์ฌ์ฉํ์ฌ ์์ฑํ๋ ์ผ๋ฐ ํจ์์ฒ๋ผ ๋ณด์
๋๋ค. ๊ทธ๋ฌ๋ ๊ทธ ์์ ๋ค์ฌ๋ค ๋ณด๋ฉด ๋งค์ฐ ๋ค๋ฆ
๋๋ค. Function๋ ํ๋์ API ๋ค์์ ์ฌ๋ฌ tf.Graph๋ฅผ ์บก์ํํฉ๋๋ค. ๋ฐ๋ก ์ด๋ฐ ์ด์ ๋ก Function์ด ์๋ ๋ฐ ๋ฐฐํฌ ๊ฐ๋ฅ์ฑ๊ณผ ๊ฐ์ ๊ทธ๋ํ ์คํ์ ์ด์ ์ ์ ๊ณตํ๋ ๊ฒ์
๋๋ค.
Step4: TensorFlow 1.x๋ฅผ ์ฌ์ฉํ ๊ฒฝ์ฐ Placeholder ๋๋ tf.Sesssion์ ์ ์ํ ํ์๊ฐ ์์์์ ์ ์ ์์ต๋๋ค.
Step5: ๊ทธ๋ํ๋ฅผ ์ง์ ๋ณผ ํ์๋ ์๊ฒ ์ง๋ง ๊ฒฐ๊ณผ๋ฅผ ๊ฒ์ฌํ์ฌ ์ ํํ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํ ์ ์์ต๋๋ค. ์ฝ๊ธฐ๊ฐ ์ฝ์ง ์์ผ๋ฏ๋ก ๋๋ฌด ์ฃผ์ ๊น๊ฒ ๋ณผ ํ์๋ ์์ต๋๋ค!
Step6: ๋๋ถ๋ถ์ ๊ฒฝ์ฐ, tf.function์ ํน๋ณํ ๊ณ ๋ ค์์ด ์๋ํฉ๋๋ค. ๊ทธ๋ฌ๋ ๋ช ๊ฐ์ง ์ฃผ์ ์ฌํญ์ด ์์ผ๋ฉฐ <a>tf.function ์๋ด์</a>์ ์ ์ฒด AutoGraph ์ฐธ์กฐ์๊ฐ ๋์์ด ๋ ์ ์์ต๋๋ค.
Step7: Function์ด ์ด ์๋ช
์ผ๋ก ์ด๋ฏธ ํธ์ถ๋ ๊ฒฝ์ฐ, Function์ ์ tf.Graph๋ฅผ ์์ฑํ์ง ์์ต๋๋ค.
Step8: ์ฌ๋ฌ ๊ทธ๋ํ๋ก ๋ท๋ฐ์นจ๋๋ค๋ ์ ์์ Function๋ ๋คํ์ฑ์
๋๋ค. ๊ทธ ๊ฒฐ๊ณผ, ๋จ์ผ tf.Graph๋ก ๋ํ๋ผ ์ ์๋ ๊ฒ๋ณด๋ค ๋ ๋ง์ ์
๋ ฅ ์ ํ์ ์ง์ํ ์ ์์๋ฟ๋ง ์๋๋ผ tf.Graph๊ฐ ๋ ์ฐ์ํ ์ฑ๋ฅ์ ๊ฐ๋๋ก ์ต์ ํํ ์ ์์ต๋๋ค.
Step9: tf.function ์ฌ์ฉํ๊ธฐ
Step10: Function์ ๊ทธ๋ํ๊ฐ ๋๋ฑํ Python ํจ์์ ๊ฐ์ ๊ณ์ฐ์ ์ํํ๋์ง ํ์ธํ๊ธฐ ์ํด tf.config.run_functions_eagerly(True)๋ฅผ ์ด์ฉํด ์ฆ์ ์คํํ๋๋ก ํ ์ ์์ต๋๋ค. <strong data-md-type="raw_html">์ฝ๋๋ฅผ ์ ์์ ์ผ๋ก ์คํํ๋ ๋์ ๊ทธ๋ํ๋ฅผ ์์ฑํ๊ณ ์คํํ๋ Function์ ์ญํ ์ ํด์ </strong>์ํฌ ๋ ์ค์์น์ ๊ฐ์ด ์ด์ฉ๋๋ ์ฝ๋์
๋๋ค.
Step11: ๊ทธ๋ฌ๋ Function์ ๊ทธ๋ํ ๋ฐ ์ฆ์ ์คํ์์ ์๋ก ๋ค๋ฅด๊ฒ ๋์ํ ์ ์์ต๋๋ค. Python print ํจ์๋ ์ด ๋ ๋ชจ๋๊ฐ ์ด๋ป๊ฒ ๋ค๋ฅธ์ง ๋ณด์ฌ์ฃผ๋ ํ ๊ฐ์ง ์์ ์
๋๋ค. print ๋ฌธ์ ์ฝ์
ํ๊ณ ์ด๋ฅผ ๋ฐ๋ณต์ ์ผ๋ก ํธ์ถํ ๋ ์ด๋ค ์ผ์ด ๋ฐ์ํ๋์ง ์ดํด๋ณด๊ฒ ์ต๋๋ค.
Step12: ์ธ์๋ ๋ด์ฉ์ ์ ์ดํด๋ด
๋๋ค.
Step13: ์ถ๋ ฅ ๊ฒฐ๊ณผ๊ฐ ๋๋์ง ์๋์? get_MSE๋ ์ธ ๋ฒ ํธ์ถ๋์์ง๋ง ํ ๋ฒ๋ง ์ธ์๋์์ต๋๋ค.
Step14: print๋ Python์ ๋ถ์์ฉ์ด๋ฉฐ ํจ์๋ฅผ Function์ผ๋ก ๋ณํํ ๋ ์๊ณ ์์ด์ผ ํ๋ <a>๋ค๋ฅธ ์ฐจ์ด์ ๋ค</a>์ด ์์ต๋๋ค. tf.function์ผ๋ก ์ฑ๋ฅ ํฅ์ํ๊ธฐ ๊ฐ์ด๋์ ํ๊ณ ์น์
์์ ์์ธํ ์์๋ณด์ธ์.
Step15: tf.function ๋ชจ๋ฒ ์ฌ๋ก
Step16: tf.function์ ์ผ๋ฐ์ ์ผ๋ก ํ๋ จ ๋ฃจํ์ ์๋๋ฅผ ๋์ด๋ ๋ฐ ์ฌ์ฉ๋๋ฉฐ, Keras๋ฅผ ์ฌ์ฉํ์ฌ ์ฒ์๋ถํฐ ํ๋ จ ๋ฃจํ ์์ฑํ๊ธฐ์์ ์์ธํ ์์๋ณผ ์ ์์ต๋๋ค.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import timeit
from datetime import datetime
# Define a Python function.
def a_regular_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# `a_function_that_uses_a_graph` is a TensorFlow `Function`.
a_function_that_uses_a_graph = tf.function(a_regular_function)
# Make some tensors.
x1 = tf.constant([[1.0, 2.0]])
y1 = tf.constant([[2.0], [3.0]])
b1 = tf.constant(4.0)
orig_value = a_regular_function(x1, y1, b1).numpy()
# Call a `Function` like a Python function.
tf_function_value = a_function_that_uses_a_graph(x1, y1, b1).numpy()
assert(orig_value == tf_function_value)
def inner_function(x, y, b):
x = tf.matmul(x, y)
x = x + b
return x
# Use the decorator to make `outer_function` a `Function`.
@tf.function
def outer_function(x):
y = tf.constant([[2.0], [3.0]])
b = tf.constant(4.0)
return inner_function(x, y, b)
# Note that the callable will create a graph that
# includes `inner_function` as well as `outer_function`.
outer_function(tf.constant([[1.0, 2.0]])).numpy()
def simple_relu(x):
if tf.greater(x, 0):
return x
else:
return 0
# `tf_simple_relu` is a TensorFlow `Function` that wraps `simple_relu`.
tf_simple_relu = tf.function(simple_relu)
print("First branch, with graph:", tf_simple_relu(tf.constant(1)).numpy())
print("Second branch, with graph:", tf_simple_relu(tf.constant(-1)).numpy())
# This is the graph-generating output of AutoGraph.
print(tf.autograph.to_code(simple_relu))
# This is the graph itself.
print(tf_simple_relu.get_concrete_function(tf.constant(1)).graph.as_graph_def())
@tf.function
def my_relu(x):
return tf.maximum(0., x)
# `my_relu` creates new graphs as it observes more signatures.
print(my_relu(tf.constant(5.5)))
print(my_relu([1, -1]))
print(my_relu(tf.constant([3., -3.])))
# These two calls do *not* create new graphs.
print(my_relu(tf.constant(-2.5))) # Signature matches `tf.constant(5.5)`.
print(my_relu(tf.constant([-1., 1.]))) # Signature matches `tf.constant([3., -3.])`.
# There are three `ConcreteFunction`s (one for each graph) in `my_relu`.
# The `ConcreteFunction` also knows the return type and shape!
print(my_relu.pretty_printed_concrete_signatures())
@tf.function
def get_MSE(y_true, y_pred):
sq_diff = tf.pow(y_true - y_pred, 2)
return tf.reduce_mean(sq_diff)
y_true = tf.random.uniform([5], maxval=10, dtype=tf.int32)
y_pred = tf.random.uniform([5], maxval=10, dtype=tf.int32)
print(y_true)
print(y_pred)
get_MSE(y_true, y_pred)
tf.config.run_functions_eagerly(True)
get_MSE(y_true, y_pred)
# Don't forget to set it back when you are done.
tf.config.run_functions_eagerly(False)
@tf.function
def get_MSE(y_true, y_pred):
print("Calculating MSE!")
sq_diff = tf.pow(y_true - y_pred, 2)
return tf.reduce_mean(sq_diff)
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
# Now, globally set everything to run eagerly to force eager execution.
tf.config.run_functions_eagerly(True)
# Observe what is printed below.
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
error = get_MSE(y_true, y_pred)
tf.config.run_functions_eagerly(False)
def unused_return_eager(x):
# Get index 1 will fail when `len(x) == 1`
tf.gather(x, [1]) # unused
return x
try:
print(unused_return_eager(tf.constant([0.0])))
except tf.errors.InvalidArgumentError as e:
# All operations are run during eager execution so an error is raised.
print(f'{type(e).__name__}: {e}')
@tf.function
def unused_return_graph(x):
tf.gather(x, [1]) # unused
return x
# Only needed operations are run during graph exection. The error is not raised.
print(unused_return_graph(tf.constant([0.0])))
x = tf.random.uniform(shape=[10, 10], minval=-1, maxval=2, dtype=tf.dtypes.int32)
def power(x, y):
result = tf.eye(10, dtype=tf.dtypes.int32)
for _ in range(y):
result = tf.matmul(x, result)
return result
print("Eager execution:", timeit.timeit(lambda: power(x, 100), number=1000))
power_as_graph = tf.function(power)
print("Graph execution:", timeit.timeit(lambda: power_as_graph(x, 100), number=1000))
@tf.function
def a_function_with_python_side_effect(x):
print("Tracing!") # An eager-only side effect.
return x * x + tf.constant(2)
# This is traced the first time.
print(a_function_with_python_side_effect(tf.constant(2)))
# The second time through, you won't see the side effect.
print(a_function_with_python_side_effect(tf.constant(3)))
# This retraces each time the Python argument changes,
# as a Python argument could be an epoch count or other
# hyperparameter.
print(a_function_with_python_side_effect(2))
print(a_function_with_python_side_effect(3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download wikipedia dump files
Step2: Convert two wikipedia dump files
Step3: Initial training
Step4: Japanese new idol group, "Babymetal", weren't known worldwide in 2010, so that the word, "babymetal", is not in oldmodel vocaburary.
Step5: Online update
Step6: Model Comparison
Step7: After online training, the word, "babymetal", is added in model. This word is simillar with rock and metal bands.
Step8: The word, "Zootopia", become disney movie through the years.
|
<ASSISTANT_TASK:>
Python Code:
%%bash
git clone -b online-w2v git@github.com:isohyt/gensim.git
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models.word2vec import Word2Vec, LineSentence
from pprint import pprint
from copy import deepcopy
from multiprocessing import cpu_count
%%bash
wget https://dumps.wikimedia.org/archive/2010/2010-11/enwiki/20101011/enwiki-20101011-pages-articles.xml.bz2
wget https://dumps.wikimedia.org/enwiki/20160820/enwiki-20160820-pages-articles.xml.bz2
old, new = [WikiCorpus('enwiki-{}-pages-articles.xml.bz2'.format(ymd)) for ymd in ['20101011', '20160820']]
def write_wiki(wiki, name, titles = []):
with open('{}.wiki'.format(name), 'wb') as f:
wiki.metadata = True
for text, (page_id, title) in wiki.get_texts():
if title not in titles:
f.write(b' '.join(text)+b'\n')
titles.append(title)
return titles
old_titles = write_wiki(old, 'old')
all_titles = write_wiki(new, 'new', old_titles)
oldwiki, newwiki = [LineSentence(f+'.wiki') for f in ['old', 'new']]
%%time
model = Word2Vec(oldwiki, min_count = 0, workers=cpu_count())
# model = Word2Vec.load('oldmodel')
oldmodel = deepcopy(model)
oldmodel.save('oldmodel')
try:
print(oldmodel.most_similar('babymetal'))
except KeyError as e:
print(e)
%%time
model.build_vocab(newwiki, update=True)
model.train(newwiki)
model.save('newmodel')
# model = Word2Vec.load('newmodel')
for m in ['oldmodel', 'model']:
print('The vocabulary size of the', m, 'is', len(eval(m).vocab))
try:
pprint(model.most_similar('babymetal'))
except KeyError as e:
print(e)
w = 'zootopia'
for m in ['oldmodel', 'model']:
print('The count of the word,'+w+', is', eval(m).vocab[w].count, 'in', m)
pprint(eval(m).most_similar(w))
print('')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Eyes on the data!
Step4: Check out the colors at rapidtables.com/web/color/RGB_Color, but don't forget to flip order of the channels to BGR.
Step5: Do some sanity checks
Step7: Get labels
Step8: Create DataFrame
Step11: Basic Feature Engineering
Step13: These coarse features look pretty bad individually. Most of this is due to features capturing absolute pixel values. But photo lighting could vary significantly between different image shots. What we end up with is a lot of noise.
|
<ASSISTANT_TASK:>
Python Code:
# Enter your username:
YOUR_GMAIL_ACCOUNT = '******' # Whatever is before @gmail.com in your email address
# Libraries for this section:
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
import cv2
import warnings
warnings.filterwarnings('ignore')
# Grab the filenames:
TRAINING_DIR = os.path.join('/home', YOUR_GMAIL_ACCOUNT, 'data/training_small/')
files = os.listdir(TRAINING_DIR) # Grab all the files in the VM images directory
print(files[0:5]) # Let's see some filenames
def show_pictures(filelist, dir, img_rows=2, img_cols=3, figsize=(20, 10)):
Display the first few images.
Args:
filelist: list of filenames to pull from
dir: directory where the files are stored
img_rows: number of rows of images to display
img_cols: number of columns of images to display
figsize: sizing for inline plots
Returns:
None
plt.close('all')
fig = plt.figure(figsize=figsize)
for i in range(img_rows * img_cols):
a=fig.add_subplot(img_rows, img_cols,i+1)
img = mpimg.imread(os.path.join(dir, filelist[i]))
plt.imshow(img)
plt.show()
show_pictures(files, TRAINING_DIR)
# What does the actual image matrix look like? There are three channels:
img = cv2.imread(os.path.join(TRAINING_DIR, files[0]))
print('\n***Colors in the middle of the first image***\n')
print('Blue channel:')
print(img[63:67,63:67,0])
print('Green channel:')
print(img[63:67,63:67,1])
print('Red channel:')
print(img[63:67,63:67,2])
def show_bgr(filelist, dir, img_rows=2, img_cols=3, figsize=(20, 10)):
Make histograms of the pixel color matrices of first few images.
Args:
filelist: list of filenames to pull from
dir: directory where the files are stored
img_rows: number of rows of images to display
img_cols: number of columns of images to display
figsize: sizing for inline plots
Returns:
None
plt.close('all')
fig = plt.figure(figsize=figsize)
color = ('b','g','r')
for i in range(img_rows * img_cols):
a=fig.add_subplot(img_rows, img_cols, i + 1)
img = cv2.imread(os.path.join(TRAINING_DIR, files[i]))
for c,col in enumerate(color):
histr = cv2.calcHist([img],[c],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.ylim([0,500])
plt.show()
show_bgr(files, TRAINING_DIR)
# Pull in blue channel for each image, reshape to vector, count unique values:
unique_colors = []
landscape = []
for f in files:
img = np.array(cv2.imread(os.path.join(TRAINING_DIR, f)))[:,:,0]
# Determine if landscape is more likely than portrait by comparing
#amount of zero channel in 3rd row vs 3rd col:
landscape_likely = (np.count_nonzero(img[:,2]) > np.count_nonzero(img[2,:])) * 1
# Count number of unique blue values:
col_count = len(set(img.ravel()))
# Append to array:
unique_colors.append(col_count)
landscape.append(landscape_likely)
unique_colors = pd.DataFrame({'files': files, 'unique_colors': unique_colors,
'landscape': landscape})
unique_colors = unique_colors.sort_values(by=['unique_colors'])
print(unique_colors[0:10])
# Plot the pictures with the lowest diversity of unique color values:
suspicious = unique_colors['files'].tolist()
show_pictures(suspicious, TRAINING_DIR, 1)
def get_label(str):
Split out the label from the filename of the image, where we stored it.
Args:
str: filename string.
Returns:
label: an integer 1 or 0
split_filename = str.split('_')
label = int(split_filename[-1].split('.')[0])
return(label)
# Example:
get_label('12550_0.1574_1.png')
df = unique_colors[:]
df['label'] = df['files'].apply(lambda x: get_label(x))
df['landscape_likely'] = df['landscape']
df = df.drop(['landscape', 'unique_colors'], axis=1)
df[:10]
def general_img_features(band):
Define a set of features that we can look at for each color band
Args:
band: array which is one of blue, green, or red
Returns:
features: unique colors, nonzero count, mean, standard deviation,
min, and max of the channel's pixel values
return [len(set(band.ravel())), np.count_nonzero(band),
np.mean(band), np.std(band),
band.min(), band.max()]
def concat_all_band_features(file, dir):
Extract features from a single image.
Args:
file - single image filename
dir - directory where the files are stored
Returns:
features - descriptive statistics for pixels
img = cv2.imread(os.path.join(dir, file))
features = []
blue = np.float32(img[:,:,0])
green = np.float32(img[:,:,1])
red = np.float32(img[:,:,2])
features.extend(general_img_features(blue)) # indices 0-4
features.extend(general_img_features(green)) # indices 5-9
features.extend(general_img_features(red)) # indices 10-14
return features
# Let's see an example:
print(files[0] + '\n')
example = concat_all_band_features(files[0], TRAINING_DIR)
print(example)
# Apply it to our dataframe:
feature_names = ['blue_unique', 'blue_nonzero', 'blue_mean', 'blue_sd', 'blue_min', 'blue_max',
'green_unique', 'green_nonzero', 'green_mean', 'green_sd', 'green_min', 'green_max',
'red_unique', 'red_nonzero', 'red_mean', 'red_sd', 'red_min', 'red_max']
# Compute a series holding all band features as lists
band_features_series = df['files'].apply(lambda x: concat_all_band_features(x, TRAINING_DIR))
# Loop through lists and distribute them across new columns in the dataframe
for i in range(len(feature_names)):
df[feature_names[i]] = band_features_series.apply(lambda x: x[i])
df[:10]
# Are these features good for finding cats?
# Let's look at some basic correlations.
df.corr().round(2)
THRESHOLD = 0.05
def show_harris(filelist, dir, band=0, img_rows=4, img_cols=4, figsize=(20, 10)):
Display Harris corner detection for the first few images.
Args:
filelist: list of filenames to pull from
dir: directory where the files are stored
band: 0 = 'blue', 1 = 'green', 2 = 'red'
img_rows: number of rows of images to display
img_cols: number of columns of images to display
figsize: sizing for inline plots
Returns:
None
plt.close('all')
fig = plt.figure(figsize=figsize)
def plot_bands(src, band_img):
a=fig.add_subplot(img_rows, img_cols, i + 1)
dst = cv2.cornerHarris(band_img, 2, 3, 0.04)
dst = cv2.dilate(dst,None) # dilation makes the marks a little bigger
# Threshold for an optimal value, it may vary depending on the image.
new_img = src.copy()
new_img[dst > THRESHOLD * dst.max()]=[0, 0, 255]
# Note: openCV reverses the red-green-blue channels compared to matplotlib,
# so we have to flip the image before showing it
imgplot = plt.imshow(cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB))
for i in range(img_rows * img_cols):
img = cv2.imread(os.path.join(dir, filelist[i]))
plot_bands(img, img[:,:,band])
plt.show()
show_harris(files, TRAINING_DIR)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The equation of motion
Step2: For the case of a uniform magnetic field
Step3: Assuming $E_z = 0$ and $E_y = 0$
Step4: Motion is uniform along the $z$-axis
Step5: The constants of integration can be found from the initial conditions $z(0) = 0$ and $v_z(0) = v$
Step6: So that
Step7: Now, the equation for $y$ can be integrated
Step8: For initial conditions $x(0) = x_0, y'(0) = 0$
Step9: This equation can be substituted into the equation for $x$-coorditante
Step10: An expression for $E_x$ can be taken from the example on ribbon beam in free space $E_x = \dfrac{ 2 \pi I_0 }{v}$
Step11: This is an oscillator-type equation
Step12: It's solution is given by
Step13: From initial conditions $x(0) = x_0, v_0 = 0$
Step14: So that
Step15: Taking into account that
Step16: It is possible to rewrite the solution as
Step17: From the laws of motion for $x(t)$ and $z(t)$
Step18: it is possible to obtain a trajectory equation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from sympy import *
init_printing()
Ex, Ey, Ez = symbols("E_x, E_y, E_z")
Bx, By, Bz, B = symbols("B_x, B_y, B_z, B")
x, y, z = symbols("x, y, z")
vx, vy, vz, v = symbols("v_x, v_y, v_z, v")
t = symbols("t")
q, m = symbols("q, m")
c, eps0 = symbols("c, epsilon_0")
eq_x = Eq( diff(x(t), t, 2), q / m * Ex + q / c / m * (vy * Bz - vz * By) )
eq_y = Eq( diff(y(t), t, 2), q / m * Ey + q / c / m * (-vx * Bz + vz * Bx) )
eq_z = Eq( diff(z(t), t, 2), q / m * Ez + q / c / m * (vx * By - vy * Bx) )
display( eq_x, eq_y, eq_z )
uni_mgn_subs = [ (Bx, 0), (By, 0), (Bz, B) ]
eq_x = eq_x.subs(uni_mgn_subs)
eq_y = eq_y.subs(uni_mgn_subs)
eq_z = eq_z.subs(uni_mgn_subs)
display( eq_x, eq_y, eq_z )
zero_EyEz_subs = [ (Ey, 0), (Ez, 0) ]
eq_x = eq_x.subs(zero_EyEz_subs)
eq_y = eq_y.subs(zero_EyEz_subs)
eq_z = eq_z.subs(zero_EyEz_subs)
display( eq_x, eq_y, eq_z )
z_eq = dsolve( eq_z, z(t) )
vz_eq = Eq( z_eq.lhs.diff(t), z_eq.rhs.diff(t) )
display( z_eq, vz_eq )
z_0 = 0
v_0 = v
c1_c2_system = []
initial_cond_subs = [(t, 0), (z(0), z_0), (diff(z(t),t).subs(t,0), v_0) ]
c1_c2_system.append( z_eq.subs( initial_cond_subs ) )
c1_c2_system.append( vz_eq.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
z_sol = z_eq.subs( c1_c2 )
vz_sol = vz_eq.subs( c1_c2 )
display( z_sol, vz_sol )
v_as_diff = [ (vx, diff(x(t),t)), (vy, diff(y(t),t)), (vz, diff(z_sol.lhs,t)) ]
eq_y = eq_y.subs( v_as_diff )
eq_y = Eq( integrate( eq_y.lhs, (t, 0, t) ), integrate( eq_y.rhs, (t, 0, t) ) )
eq_y
x_0 = Symbol('x_0')
vy_0 = 0
initial_cond_subs = [(x(0), x_0), (diff(y(t),t).subs(t,0), vy_0) ]
vy_sol = eq_y.subs( initial_cond_subs )
vy_sol
eq_x = eq_x.subs( vy, vy_sol.rhs )
eq_x = Eq( eq_x.lhs, collect( expand( eq_x.rhs ), B *q / c / m ) )
eq_x
I0 = symbols('I_0')
Ex_subs = [ (Ex, 2 * pi * I0 / v) ]
eq_x = eq_x.subs( ex_subs )
eq_x
eq_a = Eq(a, eq_x.rhs.expand().coeff(x(t), 1))
eq_b = Eq( b, eq_x.rhs.expand().coeff(x(t), 0) )
display( eq_a , eq_b )
a, b, c = symbols("a, b, c")
osc_eqn = Eq( diff(x(t),t,2), - abs(a)*x(t) + b)
display( osc_eqn )
osc_eqn_sol = dsolve( osc_eqn )
osc_eqn_sol
x_0 = symbols( 'x_0' )
v_0 = 0
c1_c2_system = []
initial_cond_subs = [(t, 0), (x(0), x_0), (diff(x(t),t).subs(t,0), v_0) ]
c1_c2_system.append( osc_eqn_sol.subs( initial_cond_subs ) )
osc_eqn_sol_diff = Eq( osc_eqn_sol.lhs.diff(t), osc_eqn_sol.rhs.diff(t) )
c1_c2_system.append( osc_eqn_sol_diff.subs( initial_cond_subs ) )
c1, c2 = symbols("C1, C2")
c1_c2 = solve( c1_c2_system, [c1, c2] )
c1_c2
x_sol = osc_eqn_sol.subs( c1_c2 )
x_sol
b_over_a = simplify( eq_b.rhs / abs( eq_a.rhs ).subs( abs( eq_a.rhs ), -eq_a.rhs ) )
Eq( b/abs(a), b_over_a )
omega_g = symbols('omega_g')
eq_omega_g = Eq( omega_g, q * B / m / c )
A = symbols('A')
eq_A = Eq( A, b_over_a - x_0 )
subs_list = [ (b/abs(a), b_over_a), ( sqrt( abs(a) ), omega_g ), ( eq_A.rhs, eq_A.lhs) ]
x_sol = x_sol.subs( subs_list )
display( x_sol, eq_A, eq_omega_g )
display( x_sol, z_sol )
t_from_z = solve( z_sol.subs(z(t),z), t )[0]
x_z_traj = Eq( x_sol.lhs.subs( t, z ), x_sol.rhs.subs( [(t, t_from_z)] ) )
display( x_z_traj, eq_A, eq_omega_g )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We use OpenAIGym to create the environment.
Step2: To implement better exploration by the Actor network, we use noisy perturbations,
Step3: The Buffer class implements Experience Replay.
Step4: Here we define the Actor and Critic networks. These are basic Dense models
Step5: policy() returns an action sampled from our Actor network plus some noise for
Step6: Training hyperparameters
Step7: Now we implement our main training loop, and iterate over episodes.
Step8: If training proceeds correctly, the average episodic reward will increase with time.
|
<ASSISTANT_TASK:>
Python Code:
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
problem = "Pendulum-v0"
env = gym.make(problem)
num_states = env.observation_space.shape[0]
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.action_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = target_actor(next_state_batch, training=True)
y = reward_batch + gamma * target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = critic_model([state_batch, action_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(
zip(critic_grad, critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = actor_model(state_batch, training=True)
critic_value = critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(
zip(actor_grad, actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.action_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(256, activation="relu")(inputs)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1, activation="tanh", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
outputs = outputs * upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic():
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def policy(state, noise_object):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + noise
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)]
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
actor_model = get_actor()
critic_model = get_critic()
target_actor = get_actor()
target_critic = get_critic()
# Making the weights equal initially
target_actor.set_weights(actor_model.get_weights())
target_critic.set_weights(critic_model.get_weights())
# Learning rate for actor-critic models
critic_lr = 0.002
actor_lr = 0.001
critic_optimizer = tf.keras.optimizers.Adam(critic_lr)
actor_optimizer = tf.keras.optimizers.Adam(actor_lr)
total_episodes = 100
# Discount factor for future rewards
gamma = 0.99
# Used to update target networks
tau = 0.005
buffer = Buffer(50000, 64)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = policy(tf_prev_state, ou_noise)
# Recieve state and reward from environment.
state, reward, done, info = env.step(action)
buffer.record((prev_state, action, reward, state))
episodic_reward += reward
buffer.learn()
update_target(target_actor.variables, actor_model.variables, tau)
update_target(target_critic.variables, critic_model.variables, tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
# Save the weights
actor_model.save_weights("pendulum_actor.h5")
critic_model.save_weights("pendulum_critic.h5")
target_actor.save_weights("pendulum_target_actor.h5")
target_critic.save_weights("pendulum_target_critic.h5")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can solve the system by solving two back-substitution problems
Step2: Note that the numpy decomposition uses partial pivoting (matrix rows are permuted to use the largest pivot). This is because small pivots can lead to numerical instability. Another reason why one should use library functions whenever possible!
Step3: Cholesky decomposition is about twice as fast as LU decomposition (though both scale as $n^3$).
Step4: NB
Step5: Singular Values
Step6: Note that the tiny perturbations in the outcome vector $b$ cause large differences in the solution! When this happens, we say that the matrix $A$ ill-conditioned. This happens when a matrix is 'close' to being singular (i.e. non-invertible).
Step7: Preconditioning
Step8: 3. Write a function in Python to solve a system
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.linalg as la
np.set_printoptions(suppress=True)
A = np.array([[1,3,4],[2,1,3],[4,1,2]])
L = np.array([[1,0,0],[2,1,0],[4,11/5,1]])
U = np.array([[1,3,4],[0,-5,-5],[0,0,-3]])
print(L.dot(U))
print(L)
print(U)
import numpy as np
import scipy.linalg as la
np.set_printoptions(suppress=True)
A = np.array([[1,3,4],[2,1,3],[4,1,2]])
print(A)
P, L, U = la.lu(A)
print(np.dot(P.T, A))
print
print(np.dot(L, U))
print(P)
print(L)
print(U)
A = np.array([[1,3,5],[3,13,23],[5,23,42]])
L = la.cholesky(A)
print(np.dot(L.T, L))
print(L)
print(A)
A = np.array([[0,1,1],[2,1,0],[3,4,5]])
u, V = la.eig(A)
print(np.dot(V,np.dot(np.diag(u), la.inv(V))))
print(u)
A = np.array([[0,1],[-1,0]])
print(A)
u, V = la.eig(A)
print(np.dot(V,np.dot(np.diag(u), la.inv(V))))
print(u)
# If you know the eigenvalues must be real
# because A is a positive definite (e.g. covariance) matrix
# use real_if_close
A = np.array([[0,1,1],[2,1,0],[3,4,5]])
u, V = la.eig(A)
print(u)
print np.real_if_close(u)
A = np.array([[8,6,4,1],[1,4,5,1],[8,4,1,1],[1,4,3,6]])
b = np.array([19,11,14,14])
la.solve(A,b)
b = np.array([19.01,11.05,14.07,14.05])
la.solve(A,b)
U, s, V = np.linalg.svd(A)
print(s)
print(max(s)/min(s))
# Your code here
# Your code here
def svdsolver(A,b):
U, s, V = np.linalg.svd(A)
if np.prod(s) == 0:
print("Matrix is singular")
else:
return np.dot(np.dot((V.T).dot(np.diag(s**(-1))), U.T),b)
A = np.array([[1,1],[1,2]])
b = np.array([3,1])
print(np.linalg.solve(A,b))
print(svdsolver(A,b))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.