text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Encode and Decode Sample Drawings
Step2: Temperature Interpolation
Step3: Latent Space Interpolation
Stepping through latent space between two sample images ($z_0$ and $z_1$)
Step4: Unconditional (Decoder-Only) Generation
Step5: Generate sketches using random IID gaussian latent vectors
|
<ASSISTANT_TASK:>
Python Code:
# import the required libraries
import numpy as np
import time
import random
import cPickle
import codecs
import collections
import os
import math
import json
import tensorflow as tf
from six.moves import xrange
# libraries required for visualisation:
from IPython.display import SVG, display
import svgwrite # conda install -c omnia svgwrite=1.1.6
import PIL
from PIL import Image
import matplotlib.pyplot as plt
# set numpy output to something sensible
np.set_printoptions(precision=8, edgeitems=6, linewidth=200, suppress=True)
# tells which version of tensorflow is being used
tf.logging.info("TensorFlow Version: %s", tf.__version__)
# import command line tools
from magenta.models.sketch_rnn.sketch_rnn_train import *
from magenta.models.sketch_rnn.model import *
from magenta.models.sketch_rnn.utils import *
from magenta.models.sketch_rnn.rnn import *
# this function displays vector images, and saves them to .svg
# you can invoke the "draw_strokes" function anytime you want to render an image -
# specify source, destination filename, and random scale factor (defaults below)
def draw_strokes(data, svg_filename = 'sample.svg', factor=0.2):
tf.gfile.MakeDirs(os.path.dirname(svg_filename))
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims,fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in xrange(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i,0])/factor
y = float(data[i,1])/factor
lift_pen = data[i, 2]
p += command+str(x)+","+str(y)+" "
the_color = "black"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color,stroke_width).fill("none"))
dwg.save()
display(SVG(dwg.tostring()))
# generate a 2D grid of many vector drawings
def make_grid_svg(s_list, grid_space=10.0, grid_space_x=16.0):
def get_start_and_end(x):
x = np.array(x)
x = x[:, 0:2]
x_start = x[0]
x_end = x.sum(axis=0)
x = x.cumsum(axis=0)
x_max = x.max(axis=0)
x_min = x.min(axis=0)
center_loc = (x_max+x_min)*0.5
return x_start-center_loc, x_end
x_pos = 0.0
y_pos = 0.0
result = [[x_pos, y_pos, 1]]
for sample in s_list:
s = sample[0]
grid_loc = sample[1]
grid_y = grid_loc[0]*grid_space+grid_space*0.5
grid_x = grid_loc[1]*grid_space_x+grid_space_x*0.5
start_loc, delta_pos = get_start_and_end(s)
loc_x = start_loc[0]
loc_y = start_loc[1]
new_x_pos = grid_x+loc_x
new_y_pos = grid_y+loc_y
result.append([new_x_pos-x_pos, new_y_pos-y_pos, 0])
result += s.tolist()
result[-1][2] = 1
x_pos = new_x_pos+delta_pos[0]
y_pos = new_y_pos+delta_pos[1]
return np.array(result)
# these global variables define the relative path to the pre-trained model
# and original dataset
data_dir = 'datasets/' # this is where your .npz file lives
models_root_dir = 'models/' # this is where trained models live
model_dir = 'models/sheep' # change "sheep" to whatever name you like
# note! you must create the "datasets/" and "models/" folders
# in the sketch_rnn directory before running this.
# you will also need to place model files (generated in training process)
# into the "models/sheep" (or whatever you've named it) folder -
# i.e. a checkpoint file, a model_config.json file, and vector data, index,
# and meta files too.
# note! model_dir value only handles two levels of recursion (i.e. models/sheep)
# subfolders break the next step (i.e. you can't do models/sheep/layer_norm)
# populates the above global variables throughout the sketch_rnn project files
[train_set, valid_set, test_set, hps_model, eval_hps_model, sample_hps_model] = load_env(data_dir, model_dir)
#construct the sketch-rnn model:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
def encode(input_strokes):
strokes = to_big_strokes(input_strokes).tolist()
strokes.insert(0, [0, 0, 1, 0, 0])
seq_len = [len(input_strokes)]
draw_strokes(to_normal_strokes(np.array(strokes)))
return sess.run(eval_model.batch_z, feed_dict={eval_model.input_data: [strokes], eval_model.sequence_lengths: seq_len})[0]
def decode(z_input=None, temperature=0.1, factor=0.2):
z = None
if z_input is not None:
z = [z_input]
sample_strokes, m = sample(sess, sample_model, seq_len=eval_model.hps.max_seq_len, temperature=temperature, z=z)
strokes = to_normal_strokes(sample_strokes)
return strokes
# get a sample drawing from the test set, and render it to .svg
example_drawing = test_set.random_sample()
draw_strokes(example_drawing)
#encode the sample drawing into latent vector z
z = encode(example_drawing)
# convert z back to drawing, using a "temperature" of 0.1
decoded_drawing = decode(z, temperature=0.1)
draw_strokes(decoded_drawing, 'sample3.svg', 0.2)
#specify the input source, the filename to save to (in the same directory as this notebook), and the random scale factor (default is 0.2), and
#Create a series of drawings stepping through various "temperatures" from 0.1 to 1.0
stroke_list = []
for i in range(10):
stroke_list.append([decode(z, temperature=0.1*i+0.1), [0, i]])
stroke_grid = make_grid_svg(stroke_list)
draw_strokes(stroke_grid, 'sample-interp-temp.svg') #if two arguments are given to draw_strokes, they are input vector and output filename
#z0 is the first sample
z0 = z #use the random sample we'd already selected
decoded_drawing = decode(z0)
#each time it's decoded from the latent vector it's slightly different
draw_strokes(decoded_drawing)
#uses default file destination of 'sample.svg' and default random scale factor of 0.2
#z1 is the second sample
z1 = encode(test_set.random_sample()) #grab a new random sample and encode it
decoded_drawing2 = decode(z1) #then decode it
draw_strokes(decoded_drawing2)
#the top drawing is the encoded version, the bottom is the decoded version
z_list = [] # interpolate spherically between z0 and z1
N = 10 # change this number to add more steps
for t in np.linspace(0, 1, N):
z_list.append(slerp(z0, z1, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i]), [0, i]])
#draw the interpolation steps
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid, 'sample-interp1.svg')
model_dir = '/tmp/sketch_rnn/models/flamingo/lstm_uncond'
[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
# randomly unconditionally generate 10 examples
N = 10
reconstructions = []
for i in range(N):
reconstructions.append([decode(temperature=0.1), [0, i]])
#experiment with different temperature values to get more variety
#draw 10 examples
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid)
#other models available:
#model_dir = '/tmp/sketch_rnn/models/owl/lstm'
#model_dir = '/tmp/sketch_rnn/models/catbus/lstm'
model_dir = '/tmp/sketch_rnn/models/elephantpig/lstm'
[hps_model, eval_hps_model, sample_hps_model] = load_model(model_dir)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess, model_dir)
#randomly select a latent vector z_0
z_0 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_0)
draw_strokes(_)
#randomly select a second vector z_1
z_1 = np.random.randn(eval_model.hps.z_size)
_ = decode(z_1)
draw_strokes(_)
z_list = [] # interpolate spherically between z_0 and z_1
N = 10
for t in np.linspace(0, 1, N):
z_list.append(slerp(z_0, z_1, t))
# for every latent vector in z_list, sample a vector image
reconstructions = []
for i in range(N):
reconstructions.append([decode(z_list[i], temperature=0.1), [0, i]])
#draw the interpolation
stroke_grid = make_grid_svg(reconstructions)
draw_strokes(stroke_grid, 'sample-interp2.svg')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in the data into a data frame
Step2: How many players?
Step3: Total MLB payroll
Step4: Get a list of teams
Step5: Total payroll by team
Step6: Typical salary for DH
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('data/mlb.csv')
# use head to check it out
df.head()
df.count()
df.SALARY.sum()
df.TEAM.unique()
df[['TEAM', 'SALARY']].groupby('TEAM') \
.sum() \
.reset_index() \
.set_index('TEAM') \
.sort_values('SALARY', ascending=False)
# filter data for designated hitters
dh = df[df['POS'] == 'DH']
# get median
dh.SALARY.median()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The source dataset
Step3: Let's create a BigQuery client that we can use throughout the notebook.
Step4: Let's now examine the result of a BiqQuery call in a Pandas DataFrame using our newly created client.
Step6: First, let's get the set of all valid column names in the natality dataset. We can do this by accessing the INFORMATION_SCHEMA for the table from the dataset.
Step7: We can print our valid columns set to see all of the possible columns we have available in the dataset. Of course, you could also find this information by going to the Schema tab when selecting the table in the BigQuery UI.
Step10: Lab Task #1
Step12: Lab Task #2
Step13: Make a bar plot to see is_male with avg_wt linearly scaled and num_babies logarithmically scaled.
Step14: Make a bar plot to see mother_age with avg_wt linearly scaled and num_babies linearly scaled.
Step15: Make a bar plot to see plurality with avg_wt linearly scaled and num_babies logarithmically scaled.
Step16: Make a bar plot to see gestation_weeks with avg_wt linearly scaled and num_babies logarithmically scaled.
|
<ASSISTANT_TASK:>
Python Code:
%%bash
sudo pip freeze | grep google-cloud-bigquery==1.6.1 || \
sudo pip install google-cloud-bigquery==1.6.1
from google.cloud import bigquery
query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(
CONCAT(
CAST(YEAR AS STRING),
CAST(month AS STRING)
)
) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
bq = bigquery.Client()
# Call BigQuery and examine in dataframe
df = bigquery.Client().query(query + " LIMIT 100").to_dataframe()
df.head()
# Query to get all column names within table schema
sql =
SELECT
column_name
FROM
publicdata.samples.INFORMATION_SCHEMA.COLUMNS
WHERE
table_name = "natality"
# Send query through BigQuery client and store output to a dataframe
valid_columns_df = bq.query(sql).to_dataframe()
# Convert column names in dataframe to a set
valid_columns_set = valid_columns_df["column_name"].tolist()
print(valid_columns_set)
# TODO: Create function that gets distinct value statistics from BigQuery
def get_distinct_values(valid_columns_set, column_name):
Gets distinct value statistics of BigQuery data column.
Args:
valid_columns_set: set, the set of all possible valid column names in
table.
column_name: str, name of column in BigQuery.
Returns:
Dataframe of unique values, their counts, and averages.
assert column_name in valid_columns_set, (
"{column_name} is not a valid column_name".format(
column_name=column_name))
sql =
pass
# TODO: Create function that plots distinct value statistics from BigQuery
def plot_distinct_values(valid_columns_set, column_name, logy=False):
Plots distinct value statistics of BigQuery data column.
Args:
valid_columns_set: set, the set of all possible valid column names in
table.
column_name: str, name of column in BigQuery.
logy: bool, if plotting counts in log scale or not.
pass
# TODO: Plot is_male
# TODO: Plot mother_age
# TODO: Plot plurality
# TODO: Plot gestation_weeks
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utility functions
Step2: Read and clean SPINE data
Step3: Read and clean census data
Step4: Read and clean workforce data
Step5: Read and clean spending data
Step6: Read and clean deprivation data
Step7: Read and clean key stage 2 final data
Step8: Merge datasets
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
def is_int(value):
try:
int(value)
return True
except ValueError:
return False
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
to_float = lambda x: float(x if is_float(x) else np.nan)
to_int = lambda x: int(x) if is_int(x) else np.nan
def clean_spine(file_path):
def la_codes(file_path):
la_codes = pd.read_csv(file_path, usecols=['LEA', 'LA Name'])
la_codes.columns = ['la', 'la name']
return la_codes
la_codes = la_codes('/project/uk-schools-clustering/data/meta/la_and_region_codes_meta.csv')
spine = pd.read_csv(
file_path,
usecols=['URN', 'LA', 'SCHNAME', 'LOCALITY', 'TOWN', 'POSTCODE'],
dtype={
'URN': 'object'
}
)
spine['POSTCODE'] = spine['POSTCODE'].str.replace(' ', '')
spine.columns = ['urn', 'la', 'name', 'locality', 'town', 'postcode']
return pd.merge(spine, la_codes, on=['la']).drop_duplicates('urn')
clean_spine('/project/uk-schools-clustering/data/raw/2016-2017_england_spine.csv').sample(5)
def clean_census(file_path):
census = pd.read_csv(
file_path, usecols=['URN', 'NOR', 'NORG', 'NORB', 'NUMEAL', 'NUMENGFL', 'PNUMFSM'],
converters={
'NOR': to_int,
'NORG': to_int,
'NORB': to_int,
'NUMEAL': to_int,
'NUMENGFL': to_int,
'PNUMFSM': to_float
}
)
census['on free meal'] = (census['NORG']*census['PNUMFSM']) / 100
census['on free meal'] = round(census['on free meal'])
census.drop(inplace=True, columns=['PNUMFSM'])
census.columns = ['urn', 'total pupils on roll',
'girls on roll', 'boys on roll',
'english not first language', 'english first language', 'on free meal']
return census.drop_duplicates('urn')
clean_census('/project/uk-schools-clustering/data/raw/2016-2017_england_census.csv')
def clean_workforce(file_path):
clean_salary = lambda x : x.replace('£', '').replace(',','.')
workforce = pd.read_csv(
file_path,
usecols=['URN', 'Total Number of Teachers (Headcount)', 'Mean Gross FTE Salary of All Teachers'],
dtype={'URN': object},
converters={
'Total Number of Teachers (Headcount)': to_int,
'Mean Gross FTE Salary of All Teachers': lambda x: to_float(clean_salary(x))
}
)
workforce.columns = ['urn', 'teacher headcount', 'mean salary fte']
return workforce
clean_workforce('/project/uk-schools-clustering/data/raw/2016-2017_england_swf.csv')
def clean_spending(file_path):
clean_value = lambda x : x.replace(',','.')
to_float = lambda x: float(clean_value(x) if is_float(clean_value(x)) else np.nan)
spending = pd.read_csv(
file_path,
usecols=['URN', 'TOTALINCOME', 'TOTALEXPENDITURE'],
dtype={
'URN': 'object'
},
converters={
'TOTALINCOME': lambda x : to_float(x),
'TOTALEXPENDITURE': lambda x : to_float(x)
}
)
spending.columns = ['urn', 'total income pp', 'total expenditure pp']
return spending
clean_spending('/project/uk-schools-clustering/data/raw/2016-2017_england_cfr.csv')
def clean_deprivation(file_path):
deprivation = pd.read_csv(
file_path,
usecols=['Postcode', 'Income Score', 'Employment Score', 'IDACI Score'],
converters={
'Postcode' : lambda s : s.replace(' ', ''),
'Income Score': lambda x : to_float(x),
'Employment Score': lambda x : to_float(x),
'IDACI Score': lambda x : to_float(x)
}
)
deprivation.columns = ['postcode', 'income score', 'empl score', 'idaci score']
return deprivation
clean_deprivation('/project/uk-schools-clustering/data/raw/deprivation-by-postcode-2015.csv')
def clean_k2final(file_path):
def clean_percent(percent_str):
percent_candidate = percent_str.replace('%', '')
return to_float(percent_candidate) / 100
k2final = pd.read_csv(
file_path,
usecols=['URN', 'PTREAD_EXP',
'PTMAT_EXP', 'PTGPS_EXP', 'PTWRITTA_EXP',
'READ_AVERAGE', 'GPS_AVERAGE', 'MAT_AVERAGE'
],
converters={
'PTREAD_EXP' : clean_percent,
'PTMAT_EXP' : clean_percent,
'PTGPS_EXP' : clean_percent,
'PTWRITTA_EXP' : clean_percent,
'READ_AVERAGE' : to_int,
'GPS_AVERAGE' : to_int,
'MAT_AVERAGE' : to_int
}
)
k2final.rename(columns={
'URN':'urn',
'PTREAD_EXP': 'perc pupils meeting reading standard',
'PTMAT_EXP': 'perc pupils meeting math standard',
'PTGPS_EXP': 'perc pupils meeting grammar standard',
'PTWRITTA_EXP': 'perc pupils meeting writing standard',
'READ_AVERAGE': 'avg reading scaled score',
'GPS_AVERAGE': 'avg grammar scaled score',
'MAT_AVERAGE': 'avg math scaled score'
}, inplace=True)
return k2final
clean_k2final('/project/uk-schools-clustering/data/raw/2016-2017_england_ks2final.csv')
def get_data(save_to = None, columns = None):
spine = clean_spine('/project/uk-schools-clustering/data/raw/2016-2017_england_spine.csv')
census = clean_census('/project/uk-schools-clustering/data/raw/2016-2017_england_census.csv')
workforce = clean_workforce('/project/uk-schools-clustering/data/raw/2016-2017_england_swf.csv')
spending = clean_spending('/project/uk-schools-clustering/data/raw/2016-2017_england_cfr.csv')
deprivation = clean_deprivation('/project/uk-schools-clustering/data/raw/deprivation-by-postcode-2015.csv')
k2final = clean_k2final('/project/uk-schools-clustering/data/raw/2016-2017_england_ks2final.csv')
result = pd.merge(spine, census, on=['urn'])
result = pd.merge(result, deprivation, on=['postcode'])
result = pd.merge(result, workforce, on=['urn'])
result = pd.merge(result, spending, on=['urn'])
result = pd.merge(result, k2final, on=['urn'])
result.dropna(axis=0, subset=[
'total income pp',
'idaci score',
'mean salary fte',
'perc pupils meeting reading standard',
'perc pupils meeting grammar standard',
'perc pupils meeting math standard',
'avg reading scaled score',
'avg grammar scaled score',
'avg math scaled score'
], how='any', inplace=True)
# result.dropna(axis=0, how='all', inplace=True)
if columns is None:
columns_to_select = result.columns
else:
columns_to_select = columns
if save_to is not None:
result[columns_to_select].to_csv(save_to, index=False)
return result[columns_to_select]
get_data(
'/project/uk-schools-clustering/data/derived/2016-2017_england.csv',
columns=['urn', 'name', 'english first language', 'girls on roll',
'english not first language','total income pp', 'total pupils on roll', 'on free meal',
'idaci score', 'teacher headcount','boys on roll', 'mean salary fte', 'total expenditure pp',
'income score', 'empl score', 'perc pupils meeting reading standard',
'perc pupils meeting math standard', 'perc pupils meeting grammar standard', 'perc pupils meeting writing standard',
'avg reading scaled score','avg grammar scaled score','avg math scaled score']
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Attribute Information
Step2: Find basic summary metrics for the train dataframe
Step3: *Where did the remaining columns go? *
Step4: Bi-variate plot
Step5: Multivariate plot
Step6: Multivariate plot
Step7: 4. Refine
Step8: 5. Model
Step9: Benchmark Model
Step10: First Model
Step11: Second Model
Step12: Third Model
Step13: Fourth Model
Step14: Fifth Model
Step15: Decision Tree is prone to overfitting !
Step16: Sixth Model
Step17: Let's change model parameters
Step18: Cross Validation
Step19: Find mean CV error
Step20: Run for different parameters, different models and find mean CV error and for different KFolds
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (12, 8)
# Load the train dataset
train = pd.read_csv("../Data/train.csv")
# Load the test dataset
test = pd.read_csv("../Data/test.csv")
# View the first 5 records of train
train.head()
# View the last 10 records of test
test.tail(10)
# List the attributes/feature names/columns in train dataset
train.columns
# List the attributes in test dataset.
test.columns
type(test.columns)
train.columns.values
test.columns.values
# Do they match with train?
[x in test.columns.values for x in train.columns.values]
train.dtypes
# Find unique values in deposit for train dataset
pd.unique(train.deposit)
# Find unique values in deposit for test dataset. Are they the same?
pd.unique(test.deposit)
pd.unique(test['month'])
# Find frequency of deposit in train dataset
train.deposit.value_counts()
# Find frequency of deposit in test dataset
test.deposit.value_counts()
type(train.deposit.value_counts())
# Is the distribution of deposit similar in train and test?
print("train:",train.deposit.value_counts()[1]/train.shape[0]*100)
print("test:",test.deposit.value_counts()[1]/test.shape[0]*100)
# Find number of rows and columns in train
train.shape
# Find number of rows and columns in test
test.shape
train.describe()
# Create labels: that has 0 for No and 1 for Yes in train dataset
labels = np.where(train.deposit=="no", 0, 1)
# Display number of 0 and 1 - check if it is the same as what we saw above?
np.unique(labels, return_counts=True)
train.head()
train.loc[:,['deposit','age']]
bivariate_plot_deposit_age = train.loc[:,["deposit", "age"]].copy()
bivariate_plot_deposit_age.head()
bivariate_plot_deposit_age.age.hist()
sns.stripplot(x="deposit", y = "age", data = bivariate_plot_deposit_age,
jitter = True, alpha = 0.1)
train.plot(kind="scatter", x = 'age', y = 'pdays', color = labels, alpha = 0.5, s=50)
train.plot(kind="scatter", x = 'day', y = 'duration', color = labels,
alpha = 0.5, s=50)
import sklearn
from sklearn import preprocessing
# Find the columns that are categorical
train.select_dtypes(include=['object'])
train_to_convert = train.select_dtypes(include=["object_"]).copy()
test_to_convert = test.select_dtypes(include=["object_"]).copy()
train_np = np.array(train_to_convert)
test_np = np.array(test_to_convert)
for i in range(train_np.shape[1]):
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train_np[:, i]))
train_np[:,i] = lbl.transform(train_np[:, i])
test_np[:,i] = lbl.transform(test_np[:, i])
# Display train_np
train_np
# How would you transform test?
test_np
train_np
test_np
# Now, merge the numeric and encoded train variables into one single dataset
train_numeric = np.array(train.select_dtypes(exclude=["object_"]).copy())
train_numeric.shape
train_encoded = np.concatenate([train_numeric, train_np], axis=1)
# Now, merge the numeric and encoded test variables into one single dataset
test_numeric = np.array(test.select_dtypes(exclude=["object_"]).copy())
test_encoded = np.concatenate([test_numeric, test_np], axis=1)
# Create train X and train Y
xlen = train_encoded.shape[1]-1
train_encoded_X = train_encoded[:, :xlen]
train_encoded_Y = np.array(train_encoded[:, -1], dtype=float)
train_encoded_Y
# Create test X
test_encoded_X = test_encoded[:, :xlen]
# Create test Y
test_encoded_Y = np.array(test_encoded[:, -1], dtype=float)
model_allzero = test_encoded_Y.copy()
model_allzero = 0
# The mean square error on AllZero model
print("Mean Squared Error on all zero model: %.2f"
% (np.mean((model_allzero - test_encoded_Y) ** 2)*100))
from sklearn import linear_model
model_linear = linear_model.LinearRegression()
model_linear.fit(train_encoded_X, train_encoded_Y)
# The coefficients
print('Coefficients: \n', model_linear.coef_)
# Prediction
model_linear_prediction = model_linear.predict(test_encoded_X)
model_linear_prediction
model_linear_prediction = np.where(model_linear_prediction>0.5, 1, 0)
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_linear.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_linear.predict(test_encoded_X) - test_encoded_Y) ** 2)*100))
model_logistic_L2 = linear_model.LogisticRegression()
model_logistic_L2.fit(train_encoded_X, train_encoded_Y)
# The coefficients
print('Coefficients: \n', model_logistic_L2.coef_)
# Prediction
model_logistic_L2_prediction = model_logistic_L2.predict(test_encoded_X)
np.unique(model_logistic_L2_prediction)
np.sum(model_logistic_L2_prediction)
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_logistic_L2.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_logistic_L2_prediction - test_encoded_Y) ** 2)*100))
# Code here. Report evaulation
model_logistic_L1 = linear_model.LogisticRegression(penalty = 'l1')
model_logistic_L1.fit(train_encoded_X, train_encoded_Y)
# The coefficients
print('Coefficients: \n', model_logistic_L1.coef_)
# Prediction
model_logistic_L1_prediction = model_logistic_L1.predict(test_encoded_X)
np.unique(model_logistic_L1_prediction)
np.sum(model_logistic_L1_prediction)
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_logistic_L1.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_logistic_L1_prediction - test_encoded_Y) ** 2)*100))
model_logistic_L2C = linear_model.LogisticRegression(C = 2)
model_logistic_L2C.fit(train_encoded_X, train_encoded_Y)
# The coefficients
print('Coefficients: \n', model_logistic_L2C.coef_)
# Prediction
model_logistic_L2C_prediction = model_logistic_L2C.predict(test_encoded_X)
np.unique(model_logistic_L2C_prediction)
np.sum(model_logistic_L2C_prediction)
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_logistic_L2C.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_logistic_L2C_prediction - test_encoded_Y) ** 2)*100))
from sklearn import tree
from sklearn.externals.six import StringIO
# import pydot
model_DT = tree.DecisionTreeClassifier()
#Let's use only the first two columns as features for the model
model_DT.fit(train_encoded_X[:,1:3], train_encoded_Y)
# dot_data = StringIO()
# tree.export_graphviz(model_DT, out_file=dot_data)
# graph = pydot.graph_from_dot_data(dot_data.getvalue())
# graph.write_pdf("dt1.pdf")
# Prediction
model_DT_prediction = model_DT.predict(test_encoded_X[:,1:3])
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_DT.predict(train_encoded_X[:,1:3]) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_DT_prediction - test_encoded_Y) ** 2)*100))
model_DTAll = tree.DecisionTreeClassifier()
model_DTAll.fit(train_encoded_X, train_encoded_Y)
# Prediction
model_DTAll_prediction = model_DTAll.predict(test_encoded_X)
# The mean square error on train
print("Mean Squared Error on train: %.2f"
% (np.mean((model_DTAll.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Squared Error on test: %.2f"
% (np.mean((model_DTAll_prediction - test_encoded_Y) ** 2)*100))
from sklearn.ensemble import RandomForestClassifier
?RandomForestClassifier
model_RF = RandomForestClassifier()
model_RF.fit(train_encoded_X, train_encoded_Y)
# Prediction
model_RF_prediction = model_RF.predict(test_encoded_X)
# The mean square error on train
print("Mean Percentage Error on train: %.2f"
% (np.mean((model_RF.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Percentage Error on test: %.2f"
% (np.mean((model_RF_prediction - test_encoded_Y) ** 2)*100))
?RandomForestClassifier
model_RFMod = RandomForestClassifier(max_depth = 8, oob_score = True, n_estimators = 400 )
model_RFMod.fit(train_encoded_X, train_encoded_Y)
# Prediction
model_RFMod_prediction = model_RFMod.predict(test_encoded_X)
# The mean square error on train
print("Mean Percentage Error on train: %.2f"
% (np.mean((model_RFMod.predict(train_encoded_X) - train_encoded_Y) ** 2)*100))
# The mean square error on test
print("Mean Percentage Error on test: %.2f"
% (np.mean((model_RFMod_prediction - test_encoded_Y) ** 2)*100))
from sklearn.cross_validation import StratifiedKFold
?StratifiedKFold
skf = StratifiedKFold(train_encoded_Y, 5, random_state=1131, shuffle=True)
for train, test in skf:
print("%s %s" % (train, test))
print(train.shape, test.shape)
model_RF = RandomForestClassifier()
for k, (train, test) in enumerate(skf):
model_RF.fit(train_encoded_X[train], train_encoded_Y[train])
print("fold:", k+1, model_RF.score(train_encoded_X[test], train_encoded_Y[test]))
cv_error = []
for k, (train, test) in enumerate(skf):
model_RF.fit(train_encoded_X[train], train_encoded_Y[train])
print(k)
cv_error.append(np.mean((model_RFMod.predict(train_encoded_X) - train_encoded_Y) ** 2)*100)
cv_error
# code here
np.mean(cv_error)
train = pd.read_csv("../Data/train.csv")
test = pd.read_csv("../Data/test.csv")
train_one_hot = pd.get_dummies(train)
train_one_hot.head()
test_one_hot = pd.get_dummies(test)
test_one_hot.head()
# Check if columns are the same
[x in test_one_hot.columns.values for x in train_one_hot.columns.values]
#Create train X , train Y, test X , test Y
train_X = train_one_hot.ix[:,:train_one_hot.shape[1]-2 ]
train_X.columns
train_Y = train_one_hot.ix[:, -1]
train_Y.head()
test_X = test_one_hot.ix[:,:test_one_hot.shape[1]-2 ]
test_Y = test_one_hot.ix[:, -1]
# Run Random Forest and check accuracy
model_RF = RandomForestClassifier(n_estimators=400, max_depth=8, oob_score=True, n_jobs=-1)
model_RF.fit(train_X, train_Y)
# Prediction
model_RF_prediction = model_RF.predict(test_X)
# The mean square error on train
print("Mean Percentage Error on train: %.2f"
% (np.mean((model_RF.predict(train_X) - train_Y) ** 2)*100))
# The mean square error on test
print("Mean Percentage Error on test: %.2f"
% (np.mean((model_RF_prediction - test_Y) ** 2)*100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cross validation
Step3: Valid options are ['accuracy', 'adjusted_rand_score', 'average_precision', 'f1', 'f1_macro', 'f1_micro', 'f1_samples', 'f1_weighted', 'log_loss', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'precision', 'precision_macro', 'precision_micro', 'precision_samples', 'precision_weighted', 'r2', 'recall', 'recall_macro', 'recall_micro', 'recall_samples', 'recall_weighted', 'roc_auc'
Step4: Greedy selection of features
Step5: Madelon dataset
Step6: Univariate selection of features
Step7: Recursive feature selection
Step8: Regularization
Step9: Grid search for optimal parameters
Step10: Random Search
Step11: Lasso
Step12: Elasticnet
Step13: Stability selection
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn.datasets import load_boston
boston = load_boston()
dataset = pd.DataFrame(boston.data, columns=boston.feature_names)
dataset['target'] = boston.target
observations = len(dataset)
variables = dataset.columns[:-1]
X = dataset.ix[:,:-1]
y = dataset['target'].values
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=101)
print ("Train dataset sample size: %i" % len(X_train))
print ("Test dataset sample size: %i" % len(X_test))
X_train, X_out_sample, y_train, y_out_sample = train_test_split(X, y, test_size=0.40, random_state=101)
X_validation, X_test, y_validation, y_test = train_test_split(X_out_sample, y_out_sample, test_size=0.50, random_state=101)
print ("Train dataset sample size: %i" % len(X_train))
print ("Validation dataset sample size: %i" % len(X_validation))
print ("Test dataset sample size: %i" % len(X_test))
from sklearn.cross_validation import cross_val_score, KFold, StratifiedKFold
from sklearn.metrics import make_scorer
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
import numpy as np
def RMSE(y_true, y_pred):
return np.sum((y_true -y_pred)**2)
lm = LinearRegression()
cv_iterator = KFold(n=len(X), n_folds=10, shuffle=True, random_state=101)
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
second_order=PolynomialFeatures(degree=2, interaction_only=False)
third_order=PolynomialFeatures(degree=3, interaction_only=True)
over_param_X = second_order.fit_transform(X)
extra_over_param_X = third_order.fit_transform(X)
cv_score = cross_val_score(lm, over_param_X, y, cv=cv_iterator, scoring='mean_squared_error', n_jobs=1)
print (cv_score)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
cv_score = cross_val_score(lm, over_param_X, y, cv=stratified_cv_iterator, scoring='mean_squared_error', n_jobs=1)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
import random
def Bootstrap(n, n_iter=3, random_state=None):
Random sampling with replacement cross-validation generator.
For each iter a sample bootstrap of the indexes [0, n) is
generated and the function returns the obtained sample
and a list of all the excluded indexes.
if random_state:
random.seed(random_state)
for j in range(n_iter):
bs = [random.randint(0, n-1) for i in range(n)]
out_bs = list({i for i in range(n)} - set(bs))
yield bs, out_bs
boot = Bootstrap(n=10, n_iter=5, random_state=101)
for train_idx, validation_idx in boot:
print (train_idx, validation_idx)
import numpy as np
boot = Bootstrap(n=len(X), n_iter=10, random_state=101)
lm = LinearRegression()
bootstrapped_coef = np.zeros((10,13))
for k, (train_idx, validation_idx) in enumerate(boot):
lm.fit(X.ix[train_idx,:],y[train_idx])
bootstrapped_coef[k,:] = lm.coef_
print(bootstrapped_coef[:,10])
print(bootstrapped_coef[:,6])
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=3)
lm = LinearRegression()
lm.fit(X_train,y_train)
print ('Train (cases, features) = %s' % str(X_train.shape))
print ('Test (cases, features) = %s' % str(X_test.shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(X_train)))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(X_test)))
from sklearn.preprocessing import PolynomialFeatures
second_order=PolynomialFeatures(degree=2, interaction_only=False)
third_order=PolynomialFeatures(degree=3, interaction_only=True)
lm.fit(second_order.fit_transform(X_train),y_train)
print ('(cases, features) = %s' % str(second_order.fit_transform(X_train).shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(second_order.fit_transform(X_train))))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(second_order.fit_transform(X_test))))
lm.fit(third_order.fit_transform(X_train),y_train)
print ('(cases, features) = %s' % str(third_order.fit_transform(X_train).shape))
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(third_order.fit_transform(X_train))))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(third_order.fit_transform(X_test))))
try:
import urllib.request as urllib2
except:
import urllib2
import numpy as np
train_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.data'
validation_data = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_valid.data'
train_response = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/MADELON/madelon_train.labels'
validation_response = 'https://archive.ics.uci.edu/ml/machine-learning-databases/madelon/madelon_valid.labels'
try:
Xt = np.loadtxt(urllib2.urlopen(train_data))
yt = np.loadtxt(urllib2.urlopen(train_response))
Xv = np.loadtxt(urllib2.urlopen(validation_data))
yv = np.loadtxt(urllib2.urlopen(validation_response))
except:
# In case downloading the data doesn't works,
# just manually download the files into the working directory
Xt = np.loadtxt('madelon_train.data')
yt = np.loadtxt('madelon_train.labels')
Xv = np.loadtxt('madelon_valid.data')
yv = np.loadtxt('madelon_valid.labels')
print ('Training set: %i observations %i feature' % (Xt.shape))
print ('Validation set: %i observations %i feature' % (Xv.shape))
from scipy.stats import describe
print (describe(Xt))
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
def visualize_correlation_matrix(data, hurdle = 0.0):
R = np.corrcoef(data, rowvar=0)
R[np.where(np.abs(R)<hurdle)] = 0.0
heatmap = plt.pcolor(R, cmap=mpl.cm.coolwarm, alpha=0.8)
heatmap.axes.set_frame_on(False)
plt.xticks(rotation=90)
plt.tick_params(axis='both', which='both', bottom='off', top='off', left = 'off',
right = 'off')
plt.colorbar()
plt.show()
visualize_correlation_matrix(Xt[:,100:150], hurdle=0.0)
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression()
logit.fit(Xt,yt)
from sklearn.metrics import roc_auc_score
print ('Training area under the curve: %0.3f' % roc_auc_score(yt,logit.predict_proba(Xt)[:,1]))
print ('Validation area under the curve: %0.3f' % roc_auc_score(yv,logit.predict_proba(Xv)[:,1]))
from sklearn.feature_selection import SelectPercentile, f_classif
selector = SelectPercentile(f_classif, percentile=50)
selector.fit(Xt,yt)
variable_filter = selector.get_support()
plt.hist(selector.scores_, bins=50, histtype='bar')
plt.grid()
plt.show()
variable_filter = selector.scores_ > 10
print ("Number of filtered variables: %i" % np.sum(variable_filter))
from sklearn.preprocessing import PolynomialFeatures
interactions = PolynomialFeatures(degree=2, interaction_only=True)
Xs = interactions.fit_transform(Xt[:,variable_filter])
print ("Number of variables and interactions: %i" % Xs.shape[1])
logit.fit(Xs,yt)
Xvs = interactions.fit_transform(Xv[:,variable_filter])
print ('Validation area Under the Curve before recursive selection: %0.3f' % roc_auc_score(yv,logit.predict_proba(Xvs)[:,1]))
# Execution time: 3.15 s
from sklearn.feature_selection import RFECV
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
lm = LinearRegression()
cv_iterator = KFold(n=len(X_train), n_folds=10, shuffle=True, random_state=101)
recursive_selector = RFECV(estimator=lm, step=1, cv=cv_iterator, scoring='mean_squared_error')
recursive_selector.fit(second_order.fit_transform(X_train),y_train)
print ('Initial number of features : %i' % second_order.fit_transform(X_train).shape[1])
print ('Optimal number of features : %i' % recursive_selector.n_features_)
a = second_order.fit_transform(X_train)
print (a)
essential_X_train = recursive_selector.transform(second_order.fit_transform(X_train))
essential_X_test = recursive_selector.transform(second_order.fit_transform(X_test))
lm.fit(essential_X_train, y_train)
print ('cases = %i features = %i' % essential_X_test.shape)
print ('In-sample mean squared error %0.3f' % mean_squared_error(y_train,lm.predict(essential_X_train)))
print ('Out-sample mean squared error %0.3f' % mean_squared_error(y_test,lm.predict(essential_X_test)))
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
essential_X = recursive_selector.transform(second_order.fit_transform(X))
cv_score = cross_val_score(lm, essential_X, y, cv=stratified_cv_iterator, scoring='mean_squared_error', n_jobs=1)
print ('Cv score: mean %0.3f std %0.3f' % (np.mean(np.abs(cv_score)), np.std(cv_score)))
from sklearn.linear_model import Ridge
ridge = Ridge(normalize=True)
# The following commented line is to show a logistic regression with L2 regularization
# lr_l2 = LogisticRegression(C=1.0, penalty='l2', tol=0.01)
ridge.fit(second_order.fit_transform(X), y)
lm.fit(second_order.fit_transform(X), y)
print ('Average coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.mean(lm.coef_), np.mean(ridge.coef_)))
print ('Min coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.min(lm.coef_), np.min(ridge.coef_)))
print ('Max coefficient: Non regularized = %0.3f Ridge = %0.3f' % (np.max(lm.coef_), np.max(ridge.coef_)))
from sklearn.grid_search import GridSearchCV
edges = np.histogram(y, bins=5)[1]
binning = np.digitize(y, edges)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
search = GridSearchCV(estimator=ridge, param_grid={'alpha':np.logspace(-4,2,7)}, scoring = 'mean_squared_error',
n_jobs=1, refit=True, cv=stratified_cv_iterator)
search.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search.best_score_))
search.grid_scores_
# Alternative: sklearn.linear_model.RidgeCV
from sklearn.linear_model import RidgeCV
auto_ridge = RidgeCV(alphas=np.logspace(-4,2,7), normalize=True, scoring = 'mean_squared_error', cv=None)
auto_ridge.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_ridge.alpha_)
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
search_func=RandomizedSearchCV(estimator=ridge, param_distributions={'alpha':np.logspace(-4,2,100)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
from sklearn.linear_model import Lasso
lasso = Lasso(alpha=1.0, normalize=True, max_iter=2*10**5)
#The following comment shows an example of L1 logistic regression
#lr_l1 = LogisticRegression(C=1.0, penalty='l1', tol=0.01)
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
stratified_cv_iterator = StratifiedKFold(binning, n_folds=10, shuffle=True, random_state=101)
search_func=RandomizedSearchCV(estimator=lasso, param_distributions={'alpha':np.logspace(-5,2,100)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
print ('Zero value coefficients: %i out of %i' % (np.sum(~(search_func.best_estimator_.coef_==0.0)),
len(search_func.best_estimator_.coef_)))
# Alternative: sklearn.linear_model.LassoCV
# Execution time: 54.9 s
from sklearn.linear_model import LassoCV
auto_lasso = LassoCV(alphas=np.logspace(-5,2,100), normalize=True, n_jobs=1, cv=None, max_iter=10**6)
auto_lasso.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_lasso.alpha_)
# Execution time: 1min 3s
from sklearn.linear_model import ElasticNet
elasticnet = ElasticNet(alpha=1.0, l1_ratio=0.15, normalize=True, max_iter=10**6, random_state=101)
from sklearn.grid_search import RandomizedSearchCV
from scipy.stats import expon
np.random.seed(101)
search_func=RandomizedSearchCV(estimator=elasticnet, param_distributions={'alpha':np.logspace(-5,2,100),
'l1_ratio':np.arange(0.0, 1.01, 0.05)}, n_iter=10,
scoring='mean_squared_error', n_jobs=1, iid=False, refit=True, cv=stratified_cv_iterator)
search_func.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % search_func.best_params_['alpha'])
print ('Best l1_ratio: %0.5f' % search_func.best_params_['l1_ratio'])
print ('Best CV mean squared error: %0.3f' % np.abs(search_func.best_score_))
print ('Zero value coefficients: %i out of %i' % (np.sum(~(search_func.best_estimator_.coef_==0.0)),
len(search_func.best_estimator_.coef_)))
# Alternative: sklearn.linear_model.ElasticNetCV
from sklearn.linear_model import ElasticNetCV
auto_elastic = ElasticNetCV(alphas=np.logspace(-5,2,100), normalize=True, n_jobs=1, cv=None, max_iter=10**6)
auto_elastic.fit(second_order.fit_transform(X), y)
print ('Best alpha: %0.5f' % auto_elastic.alpha_)
print ('Best l1_ratio: %0.5f' % auto_elastic.l1_ratio_)
from sklearn.cross_validation import cross_val_score
from sklearn.linear_model import RandomizedLogisticRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
threshold = 0.03
stability_selection = RandomizedLogisticRegression(n_resampling=300, n_jobs=1, random_state=101, scaling=0.15,
sample_fraction=0.50, selection_threshold=threshold)
interactions = PolynomialFeatures(degree=4, interaction_only=True)
model = make_pipeline(stability_selection, interactions, logit)
model.fit(Xt,yt)
print ('Number of features picked by stability selection: %i' % np.sum(model.steps[0][1].all_scores_ >= threshold))
from sklearn.metrics import roc_auc_score
print ('Area Under the Curve: %0.3f' % roc_auc_score(yv,model.predict_proba(Xv)[:,1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def single_text_to_ids(text, vocab_to_int, add_EOS):
id_text = []
for sentence in text.split('\n'):
id_sentence = []
for word in sentence.split():
id_sentence.append(vocab_to_int[word])
if add_EOS:
id_sentence.append(vocab_to_int['<EOS>'])
#print(sentence)
#print(id_sentence)
id_text.append(id_sentence)
#print(id_text)
return id_text
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
#print(source_text)
#print(target_text)
#print(source_vocab_to_int)
#print(target_vocab_to_int)
source_id_text = single_text_to_ids(source_text, source_vocab_to_int, False)
target_id_text = single_text_to_ids(target_text, target_vocab_to_int, True)
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
input = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return input, targets, learning_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
enc_cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
_, enc_state = tf.nn.dynamic_rnn(enc_cell, rnn_inputs, dtype=tf.float32)
return enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
#with tf.variable_scope("decoding") as decoding_scope:
# Training Decoder
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
dec_cell, train_decoder_fn, dec_embed_input, sequence_length, scope=decoding_scope)
# Apply output function
train_logits = output_fn(train_pred)
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
# Inference Decoder
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope)
return inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
# Decoder RNNs
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
dec_cell = tf.contrib.rnn.MultiRNNCell([lstm] * num_layers)
with tf.variable_scope("decoding") as decoding_scope:
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
#with tf.variable_scope("decoding") as decoding_scope:
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
maximum_length = sequence_length - 1
inference_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
#Apply embedding to the input data for the encoder.
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
#Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
#Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
#Apply embedding to the target data for the decoder.
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
#Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
train_logits, inference_logits = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 20
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 200
decoding_embedding_size = 200
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
if batch_i % 10 == 0:
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
lower_sentence = sentence.lower()
id_seq = []
for word in lower_sentence.split():
id_seq.append(vocab_to_int.get(word, vocab_to_int['<UNK>']))
return id_seq
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Clean up
Step2: As is visible from the last column (age_on_platform) data type, Pandas is not recognising it as date type format.
Step3: Validate if email i'd is correctly formatted and the email i'd really exists
Step4: Remove duplicates
Step5: Clustering using Mean shift
Step6: Binning based on age_on_platform
|
<ASSISTANT_TASK:>
Python Code:
%reset
# Import the required modules
import pandas as pd
import numpy as np
import scipy as sp
# simple function to read in the user data file.
# the argument parse_dates takes in a list of colums, which are to be parsed as date format
user_data_raw_csv = pd.read_csv("/home/eyebell/local_bin/janacare/janCC/datasets/Habits-Data_upto-7th-May.csv",\
parse_dates = [-3, -2, -1])
# import the pyexcel module
#import pyexcel as pe
#from pyexcel.ext import xls
# load the file
#records = pe.get_records(file_name="/home/eyebell/local_bin/janacare/datasets/Habits-Data_upto-7th-May.xls")
#len(records)
#for record in records:
#print record
# data metrics
user_data_raw_csv.shape # Rows , colums
# data metrics
user_data_raw_csv.dtypes # data type of colums
user_data_to_clean = user_data_raw_csv.copy()
# Some basic statistical information on the data
#user_data_to_clean.describe()
# Lets check the health of the data set
user_data_to_clean.info()
# Run a loop through the data frame and check each row for this anamoly, if found drop,
# this is being done ONLY for selected columns
import datetime
swapped_count = 0
first_login_count = 0
last_activity_count = 0
email_count = 0
userid_count = 0
for index, row in user_data_to_clean.iterrows():
if row.last_activity == pd.NaT or row.last_activity != row.last_activity:
last_activity_count = last_activity_count + 1
#print row.last_activity
user_data_to_clean.drop(index, inplace=True)
elif row.first_login > row.last_activity:
user_data_to_clean.drop(index, inplace=True)
swapped_count = swapped_count + 1
elif row.first_login != row.first_login or row.first_login == pd.NaT:
user_data_to_clean.drop(index, inplace=True)
first_login_count = first_login_count + 1
elif row.email != row.email: #or row.email == '' or row.email == ' ':
user_data_to_clean.drop(index, inplace=True)
email_count = email_count + 1
elif row.user_id != row.user_id:
user_data_to_clean.drop(index, inplace=True)
userid_count = userid_count + 1
print "last_activity_count=%d\tswapped_count=%d\tfirst_login_count=%d\temail_count=%d\tuserid_count=%d" \
% (last_activity_count, swapped_count, first_login_count, email_count, userid_count)
user_data_to_clean.shape
# Create new column 'age_on_platform' which has the corresponding value in date type format
user_data_to_clean["age_on_platform"] = user_data_to_clean["last_activity"] - user_data_to_clean["first_login"]
user_data_to_clean.info()
from validate_email import validate_email
email_count_invalid = 0
for index, row in user_data_to_clean.iterrows():
if not validate_email(row.email): # , verify=True) for checking if email i'd actually exits
user_data_to_clean.drop(index, inplace=True)
email_count_invalid = email_count_invalid + 1
print "Number of email-id invalid: %d" % (email_count_invalid)
# Check the result of last operation
user_data_to_clean.info()
user_data_to_deDuplicate = user_data_to_clean.copy()
user_data_deDuplicateD = user_data_to_deDuplicate.loc[~user_data_to_deDuplicate.email.str.strip().duplicated()]
len(user_data_deDuplicateD)
user_data_deDuplicateD.info()
# Now its time to convert the timedelta64 data type column named age_on_platform to seconds
def convert_timedelta64_to_sec(td64):
ts = (td64 / np.timedelta64(1, 's'))
return ts
user_data_deDuplicateD_timedelta64_converted = user_data_deDuplicateD.copy()
temp_copy = user_data_deDuplicateD.copy()
user_data_deDuplicateD_timedelta64_converted.drop("age_on_platform", 1)
user_data_deDuplicateD_timedelta64_converted['age_on_platform'] = temp_copy['age_on_platform'].apply(convert_timedelta64_to_sec)
user_data_deDuplicateD_timedelta64_converted.info()
# Clustering using Kmeans, not working
'''
y = [1,1,5,6,1,5,10,22,23,23,50,51,51,52,100,112,130,500,512,600,12000,12230]
y_float = map(float, y)
x = range(len(y))
x_float = map(float, x)
m = np.matrix([x_float, y_float]).transpose()
from scipy.cluster.vq import kmeans
kclust = kmeans(m, 5)
kclust[0][:, 0]
assigned_clusters = [abs(cluster_indices - e).argmin() for e in x]
'''
user_data_binned = user_data_deDuplicateD_timedelta64_converted.copy()
# function to convert age_on_platform in seconds to hours
convert_sec_to_hr = lambda x: x/3600
user_data_binned["age_on_platform"] = user_data_binned['age_on_platform'].map(convert_sec_to_hr).copy()
# filter rows based on first_login value after 30th April
user_data_binned_post30thApril = user_data_binned[user_data_binned.first_login < datetime.datetime(2016, 4, 30)]
for index, row in user_data_binned_post30thApril.iterrows():
if row["age_on_platform"] < 25:
user_data_binned_post30thApril.set_value(index, 'bin', 1)
elif row["age_on_platform"] >= 25 and row["age_on_platform"] < 49:
user_data_binned_post30thApril.set_value(index, 'bin', 2)
elif row["age_on_platform"] >= 49 and row["age_on_platform"] < 169: #168 hrs = 1 week
user_data_binned_post30thApril.set_value(index, 'bin', 3)
elif row["age_on_platform"] >=169 and row["age_on_platform"] < 337: # 336 hrs = 2 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 4)
elif row["age_on_platform"] >=337 and row["age_on_platform"] < 505: # 504 hrs = 3 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 5)
elif row["age_on_platform"] >=505 and row["age_on_platform"] < 673: # 672 hrs = 4 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 6)
elif row["age_on_platform"] >=673 and row["age_on_platform"] < 1009: # 1008 hrs = 6 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 7)
elif row["age_on_platform"] >=1009 and row["age_on_platform"] < 1345: # 1344 hrs = 8 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 8)
elif row["age_on_platform"] >=1345 and row["age_on_platform"] < 2017: # 2016 hrs = 12 weeks
user_data_binned_post30thApril.set_value(index, 'bin', 9)
elif row["age_on_platform"] >=2017 and row["age_on_platform"] < 4381: # 4380 hrs = 6 months
user_data_binned_post30thApril.set_value(index, 'bin', 10)
elif row["age_on_platform"] >=4381 and row["age_on_platform"] < 8761: # 8760 hrs = 12 months
user_data_binned_post30thApril.set_value(index, 'bin', 11)
elif row["age_on_platform"] > 8761: # Rest, ie. beyond 1 year
user_data_binned_post30thApril.set_value(index, 'bin', 12)
else:
user_data_binned_post30thApril.set_value(index, 'bin', 0)
user_data_binned_post30thApril.info()
print "Number of users with age_on_platform equal to 1 day or less, aka 0th day = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 1])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 1].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_0day.csv", index=False)
print "Number of users with age_on_platform between 1st and 2nd days = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 2])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 2].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_1st-day.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 2 complete days and less than 1 week = %d" % \
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 3])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 3].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_1st-week.csv", index=False)
print "Number of users with age_on_platform between 2nd week = %d" % \
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 4])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 4].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_2nd-week.csv", index=False)
print "Number of users with age_on_platform between 3rd weeks = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 5])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 5].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_3rd-week.csv", index=False)
print "Number of users with age_on_platform between 4th weeks = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 6])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 6].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_4th-week.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 4 weeks and less than 6 weeks = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 7])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 7].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_4th-to-6th-week.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 6 weeks and less than 8 weeks = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 8])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 8].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_6th-to-8th-week.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 8 weeks and less than 12 weeks = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 9])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 9].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_8th-to-12th-week.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 12 weeks and less than 6 months = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 10])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 10].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_12thweek-to-6thmonth.csv", index=False)
print "Number of users with age_on_platform greater than or equal to 6 months and less than 1 year = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 11])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 11].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_6thmonth-to-1year.csv", index=False)
print "Number of users with age_on_platform greater than 1 year = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 12])
user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 12].to_csv\
("/home/eyebell/local_bin/janacare/janCC/datasets/user_retention_email-campaign/user_data_binned_post30thApril_beyond-1year.csv", index=False)
print "Number of users with age_on_platform is wierd = %d" %\
len(user_data_binned_post30thApril[user_data_binned_post30thApril.bin == 0])
# Save dataframe with binned values as CSV
#user_data_binned_post30thApril.to_csv('user_data_binned_post30thApril.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>Import dependencies. </b>
Step2: <b>Optionally, enable logging for debugging.</b>
Step3: <b> Previous step saved Sequence Examples into GCS. Let's examine file size and location of the Sequence Examples we will use of the inference. </b>
Step4: <h2> 2. Deploy and Run ML Model on Cloud ML</h2>
Step5: <b>2b. List all the models deployed currently in the Cloud ML Engine</b>
Step6: <b>2c. Optionally run following cell to delete previously deployed model. </b>
Step7: <b>2d. Run following cell to create a new model if it does not exist </b>
Step8: <b> 2e. List versions of the Model</b>
Step9: <b> 2f. Run following cell to create a new version of the model. Increment the version number like v1, v2, v3 </b> <br />
Step10: <b> 2g. Run an inference job on CloudML engine </b>
Step11: <b>You can check the status of the job and other information on <a href="https
|
<ASSISTANT_TASK:>
Python Code:
import os
GCP_PROJECT = 'dp-workspace'
GCS_BUCKET = 'gs://cluster19-bkt'
GCS_REGION = 'us-central1'
os.putenv("REGION", GCS_REGION)
TF_RECORD_SEQEX = GCS_BUCKET+'/synthea/serv/seqex*'
os.putenv("SEQEX_IN_GCS", TF_RECORD_SEQEX)
MODEL_PATH = GCS_BUCKET+'/synthea/model/'
os.putenv("MODEL_IN_GCS", MODEL_PATH+"*")
SAVED_MODEL_PATH = MODEL_PATH + 'export'
os.putenv("SAVED_MODEL_IN_GCS", SAVED_MODEL_PATH+"*")
SERVING_DATASET = GCS_BUCKET+'/synthea/serv/seqex-00002-of-00003.tfrecords'
os.putenv("SERVING_DATASET", SERVING_DATASET)
INFERENCE_PATH = MODEL_PATH + 'infer'
os.putenv("INFERENCE_PATH", INFERENCE_PATH)
os.putenv("MODEL_NAME", "tf_fhir_los")
# from apache_beam.options.pipeline_options import PipelineOptions
# from apache_beam.options.pipeline_options import GoogleCloudOptions
# from apache_beam.options.pipeline_options import StandardOptions
# import apache_beam as beam
from tensorflow.core.example import example_pb2
import tensorflow as tf
import time
from proto import version_config_pb2
from proto.stu3 import fhirproto_extensions_pb2
from proto.stu3 import resources_pb2
from google.protobuf import text_format
from py.google.fhir.labels import label
from py.google.fhir.labels import bundle_to_label
from py.google.fhir.seqex import bundle_to_seqex
from py.google.fhir.models import model
from py.google.fhir.models.model import make_estimator
import logging
logger = logging.getLogger()
#logger.setLevel(logging.INFO)
logger.setLevel(logging.ERROR)
%bash
gsutil ls -l ${SEQEX_IN_GCS}
from py.google.fhir.models.model import get_serving_input_fn
hparams = model.create_hparams()
time_crossed_features = [
cross.split(':') for cross in hparams.time_crossed_features if cross
]
LABEL_VALUES = ['less_or_equal_3', '3_7', '7_14', 'above_14']
estimator = make_estimator(hparams, LABEL_VALUES, MODEL_PATH)
serving_input_fn = get_serving_input_fn(hparams.dedup, hparams.time_windows, hparams.include_age, hparams.categorical_context_features, hparams.sequence_features, time_crossed_features)
export_dir = estimator.export_savedmodel(SAVED_MODEL_PATH, serving_input_fn)
os.putenv("MODEL_BINARY", export_dir)
%%bash
gcloud ml-engine models list
%%bash
gcloud ml-engine versions delete v1 --model ${MODEL_NAME} -q
gcloud ml-engine models delete $MODEL_NAME -q
%%bash
gcloud ml-engine models create $MODEL_NAME --regions=$REGION
%%bash
gcloud ml-engine versions list --model ${MODEL_NAME}
%%bash
#gcloud ml-engine versions delete v1 --model ${MODEL_NAME} -q
gcloud ml-engine versions create v1 \
--model ${MODEL_NAME} \
--origin ${MODEL_BINARY} \
--runtime-version 1.12
%%bash
INFER_JOB_NAME="job_inf_$(date +%Y%m%d_%H%M%S)"
gcloud ml-engine jobs submit prediction $INFER_JOB_NAME --model $MODEL_NAME --version v1 --data-format tf-record --region $REGION --input-paths $SERVING_DATASET --output-path $INFERENCE_PATH
%%bash
gsutil cat ${INFERENCE_PATH}/prediction.results-00000-of-00001
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Prep
Step2: Exercise 1
Step3: Y-Axis Labels
Step4: Benefits and Liabilities
Step5: Findings
Step6: Findings
Step7: Exercise 6
|
<ASSISTANT_TASK:>
Python Code:
from k2datascience import movies
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
%matplotlib inline
mov = movies.BoxOffice()
print(f'Data Types:\n{mov.data.dtypes}\n\n')
print(f'Data Shape:\n{mov.data.shape}\n\n')
print(f'Missing Data:\n{mov.data.isnull().sum()}\n\n')
mov.data.head()
mov.data.tail()
mov.data.describe()
mov.distribution_plot()
mov.kde_plot()
mov.domestic_gross_vs_release_date_plot()
mov.domestic_gross_vs_runtime_plot()
mov.runtime_vs_release_plot()
mov.rating_plot()
mov.domestic_gross_rating_plot()
directors = mov.director_performance()
print('Top 10 Directors')
directors.head(10)
print('Top 10 Directors with more than one release')
directors.query('qty > 1').head(10)
mov.domestic_gross_vs_months()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data loading
Step2: Features
Step3: Labels
Step4: Data analysis
Step5: Histogram of #positive labels.
Step8: Dataset creation
Step9: Evaluation
Step11: Loss between a ground truth and a prediction.
Step12: Binary relevance baseline
Step13: Result analysis
Step15: Binary relevance with exponential loss
Step16: Check gradient.
Step18: Binary relevance with bipartite ranking
Step19: Check gradient.
Step22: Ranking loss
Step23: Check gradient
Step27: p-classification loss
Step28: Check gradient
Step29: Results for different hyper-parameter configurations
Step30: Top push loss
Step31: Speed test.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os, sys, time
import pickle as pkl
import numpy as np
import pandas as pd
import sklearn as sk
import cython
import itertools
from scipy.io import arff
from scipy.optimize import minimize
from scipy.optimize import check_grad
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
import matplotlib.pyplot as plt
import seaborn as sns
data_dir = 'data'
#yeast_ftrain = os.path.join(data_dir, 'yeast/yeast-train.arff')
#yeast_ftest = os.path.join(data_dir, 'yeast/yeast-test.arff')
#bibtex_ftrain = os.path.join(data_dir, 'bibtex/bibtex-train.arff')
#bibtex_ftest = os.path.join(data_dir, 'bibtex/bibtex-test.arff')
#fbookmarks = os.path.join(data_dir, 'bookmarks/bookmarks.arff')
mm_ftrain = os.path.join(data_dir, 'mediamill/mediamill-train.arff')
mm_ftest = os.path.join(data_dir, 'mediamill/mediamill-test.arff')
SEED = 123456789
#data_train, meta_train = arff.loadarff(yeast_ftrain)
#data_train, meta_train = arff.loadarff(open(bibtex_ftrain))
#data_bookmarks = arff.loadarff(open(fbookmarks))
data_train, meta_train = arff.loadarff(mm_ftrain)
#data_test, meta_test = arff.loadarff(yeast_ftest)
#data_test, meta_test = arff.loadarff(bibtex_ftest)
data_test, meta_test = arff.loadarff(mm_ftest)
type(data_train)
print(data_train[0])
len(list(data_train[0]))
len(list(data_train[0])[120:])
len(list(data_train[0])[:120])
#nFeatures = np.array(list(data_train[0])[:-14], dtype=np.float).shape[0]
nFeatures = np.array(list(data_train[0])[:120], dtype=np.float).shape[0]
print('#features:', nFeatures)
#np.array(list(data_train[0])[:-14], dtype=np.float)
#nLabels = np.array(list(data_train[0])[-14:], dtype=np.int).shape[0]
nLabels = np.array(list(data_train[0])[120:], dtype=np.int).shape[0]
print('#labels:', nLabels)
#np.array(list(data_train[0])[-14:], dtype=np.int)
print('#training examples:', len(data_train))
print('#test examples:', len(data_test))
#nPositives = [np.sum(np.array(list(data_train[ix])[-14:], dtype=np.int)) for ix in range(len(data_train))]
nPositives = [np.sum(np.array(list(data_train[ix])[120:], dtype=np.int)) for ix in range(len(data_train))]
pd.Series(nPositives).hist(bins=10)
def create_dataset(label_ix, data):
Create the labelled dataset for a given label index
Input:
- label_ix: label index, number in { 0, ..., # labels }
- data: original data with features + labels
Output:
- (Feature, Label) pair (X, y)
X comprises the features for each example
y comprises the labels of the corresponding example
assert(label_ix >= 0)
assert(label_ix < nLabels)
N = len(data)
d = nFeatures
#magic = -14
magic = 120
X = np.zeros((N, d), dtype = np.float)
y = np.zeros(N, dtype = np.int)
for i in range(N):
X[i, :] = list(data[i])[:magic]
y[i] = list(data[i])[magic:][label_ix]
return X, y
def create_dataset_v2(data):
Create the labelled dataset for a given label index
Input:
- data: original data with features + labels
Output:
- (Feature, Label) pair (X, y)
X comprises the features for each example
Y comprises the labels of the corresponding example
N = len(data)
D = nFeatures
L = nLabels
#magic = -14
magic = 120
X = np.zeros((N, D), dtype = np.float)
Y = np.zeros((N, L), dtype = np.int)
for i in range(N):
X[i, :] = list(data[i])[:magic]
Y[i, :] = list(data[i])[magic:]
return X, Y
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def evalPred(truth, pred, lossType = 'Hamming'):
Compute loss given ground truth and prediction
Input:
- truth: binary array of true labels
- pred: real-valued array of predictions
- lossType: can be subset 0-1, Hamming, ranking, and Precision@K where K = # positive labels.
assert(len(truth) == len(pred))
L = len(truth)
nPos = np.sum(truth)
predBin = np.array((pred > 0), dtype=np.int)
if lossType == 'Subset01':
return 1 - int(np.all(truth == predBin))
elif lossType == 'Hamming':
return np.sum(truth != predBin) / L
elif lossType == 'Ranking':
loss = 0
for i in range(L-1):
for j in range(i+1, L):
if truth[i] > truth[j]:
if pred[i] < pred[j]:
loss += 1
if pred[i] == pred[j]:
loss += 0.5
#return loss / (nPos * (L-nPos))
return loss
elif lossType == 'Precision@K':
# sorted indices of the labels most likely to be +'ve
idx = np.argsort(pred)[::-1]
# true labels according to the sorted order
y = truth[idx]
# fraction of +'ves in the top K predictions
return np.mean(y[:nPos])if nPos > 0 else 0
elif lossType == 'Precision@3':
# sorted indices of the labels most likely to be +'ve
idx = np.argsort(pred)[::-1]
# true labels according to the sorted order
y = truth[idx]
# fraction of +'ves in the top K predictions
return np.mean(y[:3])
elif lossType == 'Precision@5':
# sorted indices of the labels most likely to be +'ve
idx = np.argsort(pred)[::-1]
# true labels according to the sorted order
y = truth[idx]
# fraction of +'ves in the top K predictions
return np.mean(y[:5])
else:
assert(False)
def avgPrecisionK(allTruths, allPreds):
losses = []
lossType = 'Precision@K'
for i in range(allPreds.shape[0]):
pred = allPreds[i, :]
truth = allTruths[i, :]
losses.append(evalPred(truth, pred, lossType))
return np.mean(losses)
def printEvaluation(allTruths, allPreds):
N = allTruths.shape[0]
print(N)
for lossType in ['Precision@K']:
# ['Subset01', 'Hamming', 'Ranking', 'Precision@K', 'Precision@3', 'Precision@5']:
losses = [ ]
for i in range(allPreds.shape[0]):
pred = allPreds[i, :]
truth = allTruths[i, :]
losses.append(evalPred(truth, pred, lossType))
#print(allPreds[i])
#print(pred)
#print(truth)
#break
#print('%24s: %1.4f' % ('Average %s Loss' % lossType, np.mean(losses)))
print('%s: %1.4f, %.3f' % ('Average %s' % lossType, np.mean(losses), np.std(losses) / np.sqrt(N)))
#plt.hist(aucs, bins = 10);
classifiers = [ LogisticRegression(class_weight = 'balanced', C = 10**0) for i in range(nLabels) ]
#classifiers = [ LogisticRegression(class_weight = 'balanced', C = 10) for i in range(nLabels) ]
allPreds_train = [ ]
allPreds_test = [ ]
allTruths_train = [ ]
allTruths_test = [ ]
coefMat = [ ]
labelIndices = [ ]
for label_ix in range(nLabels):
print('Training for Label %d' % (label_ix+1))
X_train, y_train = create_dataset(label_ix, data = data_train)
X_test, y_test = create_dataset(label_ix, data = data_test)
allTruths_train.append(y_train)
allTruths_test.append(y_test)
assert( (not np.all(y_train == 0)) and (not np.all(y_train == 1)) )
classifiers[label_ix].fit(X_train, y_train)
allPreds_train.append(classifiers[label_ix].decision_function(X_train))
allPreds_test.append(classifiers[label_ix].decision_function(X_test))
coefMat.append(classifiers[label_ix].coef_.reshape(-1))
#labelIndices.append(label_ix)
#print(classifiers[label_ix].coef_)
#print(classifiers[label_ix].intercept_)
allTruths_train = np.array(allTruths_train).T
allTruths_test = np.array(allTruths_test).T
allPreds_train = np.array(allPreds_train).T
allPreds_test = np.array(allPreds_test).T
print(allPreds_test.shape)
print(allTruths_test.shape)
#allPreds[0]
print('Training set:')
printEvaluation(allTruths_train, allPreds_train)
print('Test set:')
printEvaluation(allTruths_test, allPreds_test)
coefMat = np.array(coefMat).T
coefMat.shape
#sns.heatmap(coefMat[:, :30])
def obj_exp(w, X, y, C):
Objective with L2 regularisation and exponential loss
Input:
- w: current weight vector
- X: feature matrix, N x D
- y: label vector, N x 1
- C: regularisation constant
assert(len(y) == X.shape[0])
assert(len(w) == X.shape[1])
assert(C >= 0)
N, D = X.shape
J = 0.0 # cost
g = np.zeros_like(w) # gradient
for n in range(N):
x = X[n, :]
prod = np.dot(w, x)
# negative label
if y[n] == 0:
t1 = np.exp(prod)
J += t1
g = g + t1 * x
# positive label
else:
t2 = np.exp(-prod)
J += t2
g = g - t2 * x
J = 0.5 * C * np.dot(w, w) + J / N
g = C * w + g / N
return (J, g)
#%%script false
X_train_, y_train_ = create_dataset(3, data = data_train)
w0 = np.random.rand(X_train_.shape[1])
C = 1
check_grad(lambda w: obj_exp(w, X_train_, y_train_, C)[0], \
lambda w: obj_exp(w, X_train_, y_train_, C)[1], w0)
params = [ ]
allPreds_train = [ ]
allPreds_test = [ ]
allTruths_train = [ ]
allTruths_test = [ ]
np.random.seed(SEED)
C = 1
for label_ix in range(nLabels):
#sys.stdout.write('\r%d / %d' % (label_ix + 1, nLabels))
#sys.stdout.flush()
print('\r%d / %d ' % (label_ix + 1, nLabels))
X_train, y_train = create_dataset(label_ix, data = data_train)
X_test, y_test = create_dataset(label_ix, data = data_test)
allTruths_train.append(y_train)
allTruths_test.append(y_test)
assert( (not np.all(y_train == 0)) and (not np.all(y_train == 1)) )
opt_method = 'BFGS' #'Newton-CG'
#opt_method = 'nelder-mead'
options = {'disp': True}
w0 = np.random.rand(X_train.shape[1]) # initial guess
opt = minimize(obj_exp, w0, args=(X_train, y_train, C), method=opt_method, jac=True, options=options)
if opt.success == True:
w = opt.x
params.append(w)
#allPreds.append(sigmoid(np.dot(X_test, w)))
allPreds_train.append(np.dot(X_train, w))
allPreds_test.append(np.dot(X_test, w))
else:
sys.stderr.write('Optimisation failed, label_ix=%d\n' % label_ix)
w = np.zeros(X_train.shape[1])
params.append(w)
#allPreds_test.append(np.dot(X_test, w))
allTruths_train = np.array(allTruths_train).T
allTruths_test = np.array(allTruths_test).T
allPreds_train = np.array(allPreds_train).T
allPreds_test = np.array(allPreds_test).T
print(allPreds_test.shape)
print(allTruths_test.shape)
#allPreds[0]
print('Training set:')
printEvaluation(allTruths_train, allPreds_train)
print('Test set:')
printEvaluation(allTruths_test, allPreds_test)
#%load_ext Cython
#%%cython -a
import numpy as np
#cimport numpy as np
#cpdef obj_biranking(w, X, y):
def obj_biranking(w, X, y, C):
Objective with L2 regularisation and bipartite ranking loss
Input:
- w: current weight vector
- X: feature matrix, N x D
- y: label vector, N x 1
- C: regularisation constant
assert(len(y) == X.shape[0])
assert(len(w) == X.shape[1])
assert(C >= 0)
#cdef int nPos, nNeg, i, j
#cdef double J, term, denom
nPos = np.sum(y) # num of positive examples
nNeg = len(y) - nPos # num of negative examples
ixPos = np.nonzero(y)[0].tolist() # indices positive examples
ixNeg = list(set(np.arange(len(y))) - set(ixPos)) # indices negative examples
J = 0.0 # cost
g = np.zeros_like(w) # gradient
scorePos = X[ixPos, :].dot(w)[:,np.newaxis] # nPos x 1
scoreNeg = X[ixNeg, :].dot(w)[:,np.newaxis] # nNeg x 1
scoreDif = scorePos - scoreNeg.T # nPos x nNeg
#J = np.mean(np.log(1 + np.exp(-scoreDif)))
J = 0.5 * C * np.dot(w, w) + np.mean(np.log1p(np.exp(-scoreDif)))
A = -1/(1 + np.exp(scoreDif))
T1 = X[ixPos, :].T.dot(A.sum(axis = 1))
T2 = X[ixNeg, :].T.dot(A.sum(axis = 0))
g = C * w + 1/(nPos * nNeg) * (T1 - T2)
return (J, g)
X_train_, y_train_ = create_dataset(6, data = data_train)
#%%script false
w0 = w = np.random.rand(X_train_.shape[1])
C = 1
check_grad(lambda w: obj_biranking(w, X_train_, y_train_, C)[0], \
lambda w: obj_biranking(w, X_train_, y_train_, C)[1], w0)
#1.1331503772158218e-06 * np.sqrt(nLabels)
params = [ ]
allPreds_train = [ ]
allPreds_test = [ ]
allTruths_train = [ ]
allTruths_test = [ ]
np.random.seed(SEED)
C = 1
for label_ix in range(nLabels):
#sys.stdout.write('\r%d / %d' % (label_ix + 1, nLabels))
#sys.stdout.flush()
print('\r%d / %d ' % (label_ix + 1, nLabels))
X_train, y_train = create_dataset(label_ix, data = data_train)
X_test, y_test = create_dataset(label_ix, data = data_test)
allTruths_train.append(y_train)
allTruths_test.append(y_test)
assert( (not np.all(y_train == 0)) and (not np.all(y_train == 1)) )
opt_method = 'BFGS' #'Newton-CG'
#opt_method = 'nelder-mead'
options = {'disp': True}
w0 = np.random.rand(X_train.shape[1]) # initial guess
opt = minimize(obj_biranking, w0, args=(X_train, y_train, C), method=opt_method, jac=True, options=options)
if opt.success == True:
w = opt.x
params.append(w)
#allPreds.append(sigmoid(np.dot(X_test, w)))
allPreds_train.append(np.dot(X_train, w))
allPreds_test.append(np.dot(X_test, w))
else:
sys.stderr.write('Optimisation failed, label_ix=%d\n' % label_ix)
w = np.zeros(X_train.shape[1])
params.append(w)
allPreds_test.append(np.dot(X_test, w))
allTruths_train = np.array(allTruths_train).T
allTruths_test = np.array(allTruths_test).T
allPreds_train = np.array(allPreds_train).T
allPreds_test = np.array(allPreds_test).T
print(allPreds_test.shape)
print(allTruths_test.shape)
#allPreds[0]
print('Training set:')
printEvaluation(allTruths_train, allPreds_train)
print('Test set:')
printEvaluation(allTruths_test, allPreds_test)
def obj_ranking_loop(w, X, Y, C):
Objective with L2 regularisation and ranking loss
Input:
- w: current weight vector, flattened L x D
- X: feature matrix, N x D
- Y: label matrix, N x L
- C: regularisation constant
N, D = X.shape
L = Y.shape[1]
assert(w.shape[0] == L * D)
W = w.reshape(L, D) # reshape weight matrix
J = 0.0 # cost
G = np.zeros_like(W) # gradient matrix
for n in range(N):
Jn = 0.0
Gn = np.zeros_like(W)
x = X[n, :]
y = Y[n, :]
nPos = np.sum(y) # num of positive examples
nNeg = L - nPos # num of negative examples
denom = nPos * nNeg
ixPos = np.nonzero(y)[0].tolist() # indices positive examples
ixNeg = list(set(np.arange(L)) - set(ixPos)) # indices negative examples
for i in ixPos:
for j in ixNeg:
wDiff = W[i, :] - W[j, :]
sDiff = np.dot(wDiff, x)
term = np.exp(sDiff)
Jn += np.log1p(1.0 / term)
Gn[i, :] = Gn[i, :] - x / (1 + term)
#for j in ixNeg:
# for i in ixPos:
# wDiff = W[i, :] - W[j, :]
# sDiff = np.dot(wDiff, x)
# term = np.exp(sDiff)
Gn[j, :] = Gn[j, :] + x / (1 + term)
J += Jn / denom
G = G + Gn / denom
J = 0.5 * C * np.dot(w, w) + J / N
G = C * W + G / N
return (J, G.ravel())
#np.tile([1,2,3], (3,1)) * np.array([0.1, 0.2, 0.3])[:, None]
#np.tile([1,2,3], (3,1)) / np.array([0.1, 0.2, 0.3])[:, None]
#np.tile([1,2,3], (3,1)) * np.array([0.1, 0.2, 0.3])[:,]
#np.tile([1,2,3], (3,1)) / np.array([0.1, 0.2, 0.3])[:,]
def obj_ranking(w, X, Y, C):
Objective with L2 regularisation and ranking loss
Input:
- w: current weight vector, flattened L x D
- X: feature matrix, N x D
- Y: label matrix, N x L
- C: regularisation constant
N, D = X.shape
L = Y.shape[1]
assert(w.shape[0] == L * D)
W = w.reshape(L, D) # reshape weight matrix
J = 0.0 # cost
G = np.zeros_like(W) # gradient matrix
for n in range(N):
Jn = 0.0
Gn = np.zeros_like(W)
x = X[n, :]
y = Y[n, :]
nPos = np.sum(y) # num of positive examples
nNeg = L - nPos # num of negative examples
denom = nPos * nNeg
ixPos = np.nonzero(y)[0].tolist() # indices positive examples
ixNeg = list(set(np.arange(L)) - set(ixPos)) # indices negative examples
ixmat = np.array(list(itertools.product(ixPos, ixNeg))) # shape: ixPos*ixNeg by 2
dW = W[ixmat[:, 0], :] - W[ixmat[:, 1], :]
sVec = np.dot(dW, x)
Jn = np.sum(np.log1p(np.exp(-sVec)))
coeffVec = np.divide(1, 1 + np.exp(sVec))
coeffPos = pd.DataFrame(coeffVec)
coeffPos['gid'] = ixmat[:, 0]
coeffPos = coeffPos.groupby('gid', sort=False).sum()
coeffNeg = pd.DataFrame(coeffVec)
coeffNeg['gid'] = ixmat[:, 1]
coeffNeg = coeffNeg.groupby('gid', sort=False).sum()
#print(coeffPos)
#print(coeffNeg)
coeffs = np.ones(L)
coeffs[ixPos] = -coeffPos.loc[ixPos].values.squeeze()
coeffs[ixNeg] = coeffNeg.loc[ixNeg].values.squeeze()
#print(coeffs)
Gn = np.tile(x, (L, 1)) * coeffs[:, None]
J += Jn / denom
G = G + Gn / denom
J = 0.5 * C * np.dot(w, w) + J / N
G = C * W + G / N
return (J, G.ravel())
X_train, Y_train = create_dataset_v2(data = data_train)
X_test, Y_test = create_dataset_v2(data = data_test)
#%%script false
C = 1
w0 = np.random.rand(nFeatures * nLabels)
check_grad(lambda w: obj_ranking(w, X_train[:10], Y_train[:10], C)[0], \
lambda w: obj_ranking(w, X_train[:10], Y_train[:10], C)[1], w0)
allTruths_train = Y_train
allTruths_test = Y_test
allPreds_train = None
allPreds_test = None
np.random.seed(SEED)
opt_method = 'BFGS' #'Newton-CG'
#opt_method = 'nelder-mead'
options = {'disp': True}
C = 1
w0 = np.random.rand(nFeatures * nLabels) # initial guess
opt = minimize(obj_ranking, w0, args=(X_train, Y_train, C), method=opt_method, jac=True, options=options)
if opt.success == True:
w = opt.x
#allPreds = sigmoid(np.dot(X_test, w.reshape(nLabels, nFeatures).T))
allPreds_train = np.dot(X_train, w.reshape(nLabels, nFeatures).T)
allPreds_test = np.dot(X_test, w.reshape(nLabels, nFeatures).T)
else:
sys.stderr.write('Optimisation failed')
print(allPreds_test.shape)
print(allTruths_test.shape)
print('Training set:')
printEvaluation(allTruths_train, allPreds_train)
print('Test set:')
printEvaluation(allTruths_test, allPreds_test)
def obj_pnorm_push_loop(w, X, Y, p, C):
Objective with L2 regularisation and p-norm push loss
Input:
- w: current weight vector, flattened L x D
- X: feature matrix, N x D
- Y: label matrix, N x L
- p: constant for p-norm push loss
- C: regularisation constant
N, D = X.shape
L = Y.shape[1]
assert(w.shape[0] == L * D)
assert(p >= 1)
assert(C >= 0)
W = w.reshape(L, D) # reshape weight matrix
J = 0.0 # cost
G = np.zeros_like(W) # gradient matrix
for n in range(N):
Gn = np.zeros_like(W)
x = X[n, :]
y = Y[n, :]
nPos = np.sum(y) # num of positive examples
nNeg = L - nPos # num of negative examples
for k in range(L):
wk = W[k, :]
term = np.dot(wk, x)
if y[k] == 1:
term2 = np.exp(-term) / nPos
J += term2
Gn[k, :] = -x * term2
else:
term2 = np.exp(p * term) / nNeg
J += term2 / p
Gn[k, :] = x * term2
G = G + Gn
J = 0.5 * C * np.dot(w, w) + J / N
G = C * W + G / N
return (J, G.ravel())
def obj_pnorm_push_loopn(w, X, Y, p, C):
Objective with L2 regularisation and p-norm push loss
Input:
- w: current weight vector, flattened L x D
- X: feature matrix, N x D
- Y: label matrix, N x L
- p: constant for p-norm push loss
- C: regularisation constant
N, D = X.shape
L = Y.shape[1]
assert(w.shape[0] == L * D)
assert(p >= 1)
assert(C >= 0)
W = w.reshape(L, D) # reshape weight matrix
J = 0.0 # cost
G = np.zeros_like(W) # gradient matrix
for n in range(N):
Gn = np.zeros_like(W)
x = X[n, :]
y = Y[n, :]
nPos = np.sum(y) # num of positive examples
nNeg = L - nPos # num of negative examples
ixPos = np.nonzero(y)[0].tolist() # indices positive examples
ixNeg = list(set(np.arange(L)) - set(ixPos)) # indices negative examples
scalingPos = np.exp( -np.dot(W[ixPos, :], x)) / nPos
scalingNeg = np.exp(p * np.dot(W[ixNeg, :], x)) / nNeg
Gn[ixPos, :] = np.tile(-x, (nPos,1)) * scalingPos[:, None] # scaling each row of a matrix
Gn[ixNeg, :] = np.tile( x, (nNeg,1)) * scalingNeg[:, None] # with a different scalar
J += np.sum(scalingPos) + np.sum(scalingNeg) / p
G = G + Gn
J = 0.5 * C * np.dot(w, w) + J / N
G = C * W + G / N
return (J, G.ravel())
def obj_pnorm_push(w, X, Y, p, C):
Objective with L2 regularisation and p-norm push loss
Input:
- w: current weight vector, flattened L x D
- X: feature matrix, N x D
- Y: label matrix, N x L
- p: constant for p-norm push loss
- C: regularisation constant
N, D = X.shape
L = Y.shape[1]
assert(w.shape[0] == L * D)
assert(p >= 1)
assert(C >= 0)
W = w.reshape(L, D) # reshape weight matrix
J = 0.0 # cost
G = np.zeros_like(W) # gradient matrix
for k in range(nLabels):
wk = W[k, :]
Yk = Y[:, k]
sPosVec = np.dot(X[Yk == 1, :], wk) # Nk+ by 1
sNegVec = np.dot(X[Yk == 0, :], wk) # NK- by 1
#nPosVec = np.sum(Y[Yk == 1, :], axis=1) # Nk+ by 1
#nNegVec = np.sum(Y[Yk == 0, :], axis=1) # NK- by 1
nPosVec = np.sum(Y[Yk == 1, :], axis=1) + 0.1 # Nk+ by 1 with smoothing
nNegVec = np.sum(Y[Yk == 0, :], axis=1) + 0.1 # NK- by 1 with smoothing
#nPosVec = np.ones_like(sPosVec) * N
#nNegVec = np.ones_like(sNegVec) * N
lossPos = np.divide(np.exp(-sPosVec), nPosVec) # NK+ by 1
lossNeg = np.divide(np.exp(p * sNegVec), nNegVec) # NK- by 1
J += np.sum(lossPos) + np.sum(lossNeg / p)
GradPos = -X[Yk == 1, :] * lossPos[:, None]
GradNeg = X[Yk == 0, :] * lossNeg[:, None]
G[k, :] = np.sum(GradPos, axis=0) + np.sum(GradNeg, axis=0)
J = 0.5 * C * np.dot(w, w) + J / N
G = C * W + G / N
return (J, G.ravel())
X_train, Y_train = create_dataset_v2(data = data_train)
X_test, Y_test = create_dataset_v2(data = data_test)
%%script false
p = 1
C = 1
w0 = np.random.rand(nFeatures * nLabels)
check_grad(lambda w: obj_pnorm_push(w, X_train, Y_train, p, C)[0], \
lambda w: obj_pnorm_push(w, X_train, Y_train, p, C)[1], w0)
allTruths_train = Y_train
allTruths_test = Y_test
allPreds_train = None
allPreds_test = None
np.random.seed(SEED)
p = 1 # [1, 10]
C = 1 # [0, 1]
opt_method = 'BFGS' #'Newton-CG'
#opt_method = 'nelder-mead'
options = {'disp': True}
w0 = np.random.rand(nFeatures * nLabels) # initial guess
opt = minimize(obj_pnorm_push, w0, args=(X_train, Y_train, p, C), method=opt_method, jac=True, options=options)
if opt.success == True:
w = opt.x
allPreds_train = np.dot(X_train, w.reshape(nLabels, nFeatures).T)
allPreds_test = np.dot(X_test, w.reshape(nLabels, nFeatures).T)
else:
sys.stderr.write('Optimisation failed')
print(allPreds_test.shape)
print(allTruths_test.shape)
print('Training set:')
printEvaluation(allTruths_train, allPreds_train)
print('Test set:')
printEvaluation(allTruths_test, allPreds_test)
#make_pipeline(preprocessing.StandardScaler(), svm.SVC(C=1))
#cross_val_score(clf, iris.data, iris.target, cv=cv)
#%%script false
precisions_train = dict()
precisions_test = dict()
allTruths_test = Y_test
allTruths_train = Y_train
p_set = [1, 3, 10, 30]
C_set = [0.1, 0.3, 1, 3, 10, 30, 100, 300, 1000, 3000]
opt_method = 'BFGS' #'Newton-CG'
for p in p_set:
for C in C_set:
print('-------------------------------------')
print('p in loss: {}, C for regularisation: {}'.format(p, C))
allPreds = None
allPreds_train = None
w0 = np.random.rand(nFeatures * nLabels) # initial guess
opt = minimize(obj_pnorm_push, w0, args=(X_train, Y_train, p, C), method=opt_method, jac=True)
if opt.success == True:
w = opt.x
allPreds_test = np.dot(X_test, w.reshape(nLabels, nFeatures).T)
allPreds_train = np.dot(X_train, w.reshape(nLabels, nFeatures).T)
precisions_train[(p,C)] = avgPrecisionK(allTruths_train, allPreds_train)
precisions_test[(p,C)] = avgPrecisionK(allTruths_test, allPreds_test)
else:
sys.stderr.write('Optimisation failed')
precisions_train[(p,C)] = 0
precisions_test[(p,C)] = 0
print('%20s %.4f' % ('Average Precision@K on training set: ', precisions_train[(p,C)]))
print('%20s %.4f\n' % ('Average Precision@K on test set: ', precisions_test[(p,C)]))
#%%script false
fig = plt.figure(figsize=[10, 8])
ax = plt.subplot(1,1,1)
#colors = itertools.cycle(['r', 'g'])
styles = itertools.cycle(['-', '--', ':', '-.'])
for p in p_set:
ls = styles.__next__()
plt.plot(np.arange(len(C_set)), [precisions_train[(p,C)] for C in C_set], \
ls=ls, c='r', label='p=%d'%p + ', train')
plt.plot(np.arange(len(C_set)), [precisions_test[(p,C)] for C in C_set], \
ls=ls, c='g', label='p=%d'%p + ', test')
plt.plot(np.arange(len(C_set)), [0.5149 for C in C_set], ls='-', c='b', label='Logistic Regression, test')
plt.legend(loc='best')
plt.xticks(np.arange(len(C_set)), C_set, fontsize=10, rotation=0, horizontalalignment='center')
plt.xlabel('Regularisation Constant')
plt.ylabel('Average Precision@K')
plt.title('Performance on Yeast dataset, multi-label learning with p-norm push loss', fontsize=15)
fig.savefig('pnorm.svg')
A = np.random.rand(15).reshape(3,5)
A
x = np.sum(A, axis=0)
#x
C = np.dot(A, A.T)
one = np.ones(A.shape[0])
np.dot(x, x)
np.sum(np.sum(C))
np.dot(np.dot(one, C), one)
D = np.dot(A.T, A)
np.sum(np.sum(D))
A = np.random.rand(20000000).reshape(10000,2000)
#A = np.random.rand(10000000).reshape(2000,10000)
%%timeit
x = np.sum(A, axis=0)
np.dot(x, x)
%%timeit
C = np.dot(A, A.T)
np.sum(np.sum(C))
%%timeit
C = np.dot(A, A.T)
one = np.ones(A.shape[0])
np.dot(np.dot(one, C), one)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Case studies
Step2: Select tables[1], which is the second table on the page.
Step3: Not all researchers provide estimates for the same dates.
Step4: Some of the estimates are in a form Pandas doesn't recognize as numbers, but we can coerce them to be numeric.
Step5: Here are the results. Notice that we are working in millions now, not billions.
Step6: We can use xlim to zoom in on everything after Year 0.
Step7: See if you can find a model that fits these data well from Year 0 to 1950.
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
import os
filename = 'World_population_estimates.html'
if not os.path.exists(filename):
!wget https://raw.githubusercontent.com/AllenDowney/ModSimPy/master/data/World_population_estimates.html
from pandas import read_html
filename = 'World_population_estimates.html'
tables = read_html(filename, header=0, index_col=0, decimal='M')
len(tables)
table1 = tables[1]
table1.head()
table1.columns = ['PRB', 'UN', 'Maddison', 'HYDE', 'Tanton',
'Biraben', 'McEvedy & Jones', 'Thomlinson', 'Durand', 'Clark']
for col in table1.columns:
table1[col] = pd.to_numeric(table1[col], errors='coerce')
table1.plot()
decorate(xlim=[-10000, 2000], xlabel='Year',
ylabel='World population (millions)',
title='Prehistoric population estimates')
plt.legend(fontsize='small');
table1.plot()
decorate(xlim=[0, 2000], xlabel='Year',
ylabel='World population (millions)',
title='CE population estimates')
# Solution
# The function I found that best matches the data has the form
# a + b / (c - x)
# This function is hard to explain physically; that is, it doesn't
# correspond to a growth model that makes sense in terms of human
# behavior.
# And it implies that the population goes to infinity in 2040.
from numpy import linspace
from matplotlib.pyplot import plot
xs = linspace(100, 1950)
ys = 110 + 200000 / (2040 - xs)
table1.plot()
plot(xs, ys, color='gray', alpha=0.5, label='model')
decorate(xlim=[0, 2000], xlabel='Year',
ylabel='World population (millions)',
title='CE population estimates')
# Solution
# And it doesn't do a particularly good job of predicting
# actual growth from 1940 to the present.
# TODO: load these series from table2
#plot(census, ':', label='US Census')
#plot(un, '--', label='UN DESA')
xs = linspace(1940, 2020)
ys = 110 + 200000 / (2040 - xs)
plot(xs, ys/1000, color='gray', label='model')
decorate(xlim=[1950, 2016], xlabel='Year',
ylabel='World population (billions)',
title='Prehistoric population estimates')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we'll use MDTraj's atom selection language to split out the protein and the ligand, which has residue name YYG in the input files. We're only interested in contacts between the protein and the ligand (not contacts within the protein). We'll also only look at heavy atom contacts.
Step2: Making an accessing a contact trajectory
Step3: Once the ContactTrajectory has been made, contacts for individual frames can be accessed either by taking the index of the ContactTrajectory itself, or by getting the list of contact (e.g., all the residue contacts frame-by-frame) and selecting the frame of interest.
Step4: Advanced Python indexing is also allowed. In this example, note how the most common partners for YYG change! This is also what we see in the contact concurrences example.
Step5: We can easily turn the ContactTrajectory into ContactFrequency
Step6: Rolling Contact Frequencies
Step7: Now we'll plot each windowed frequency, and we will see the transition as some contacts fade out and others grow in.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from contact_map import ContactTrajectory, RollingContactFrequency
import mdtraj as md
traj = md.load("data/gsk3b_example.h5")
print(traj) # to see number of frames; size of system
topology = traj.topology
yyg = topology.select('resname YYG and element != "H"')
protein = topology.select('protein and element != "H"')
contacts = ContactTrajectory(traj, query=yyg, haystack=protein)
contacts[0].residue_contacts.most_common()
contacts.residue_contacts[0].most_common()
for contact in contacts[50:80:4]:
print(contact.residue_contacts.most_common()[:3])
freq = contacts.contact_frequency()
fig, ax = plt.subplots(figsize=(5.5,5))
freq.residue_contacts.plot_axes(ax=ax)
RollingContactFrequency(contacts, width=30, step=14)
rolling_frequencies = contacts.rolling_frequency(window_size=30, step=14)
rolling_frequencies
fig, axs = plt.subplots(3, 2, figsize=(12, 10))
for ax, freq in zip(axs.flatten(), rolling_frequencies):
freq.residue_contacts.plot_axes(ax=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
x = np.array([0, 1, 1, 1, 3, 1, 5, 5, 5])
y = np.array([0, 2, 3, 4, 2, 4, 3, 4, 5])
a = 1
b = 4
idx_list = ((x == a) & (y == b))
result = idx_list.nonzero()[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Other Useful Packages (not used today)
Step2: Prepare another URL on your own
Step3: Where did the ? go?
Step4: View Returned Data
Step5: Ethics
Step6: Collect Our Data
Step7: Fix the zip codes
Step8: Pivot the data so age group are columns
Step9: Now we have some zip codes!
Step10: Scraping
Step11: Use Find Feature to Narrow Your Search
Step12: 1. List the contents
Step13: Get All Links in the History Section
Step14: Use a for loop and scrape the first paragraph from a bunch of wikipedia articles
Step15: Back To Our Data
Step16: Subset down to the data we have
Step17: Create a dataframe from the new AQI data
Step18: Combine The Data
Step19: Look At The Data
Step20: Look At The Data
Step21: Look at particulates
Step22: Scatter Plot
Step23: Run a regression
Step24: Evaluate the model with some regression plots
Step25: Population confound
Step26: Fix zipcode column
Step27: Re-run Regression
Step28: Poisson Regression
Step29: Influence plot for outsized-effect of any observations
Step30: Diagnostic plots
Step31: SciKitLearn
Step32: Create and train the model
Step33: Look At Clusters
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import pickle
import statsmodels.api as sm
from sklearn import cluster
import matplotlib.pyplot as plt
%matplotlib inline
from bs4 import BeautifulSoup as bs
import requests
import time
# from ggplot import *
base_url = "http://www.mywebsite.com/data/api?"
attributes = ["key1=value1",
"key2=value2",
"API_KEY=39DC3727-09BD-XXXX-XXXX-XXXXXXXXXXXX"
]
post_url = '&'.join(attributes)
print(base_url+post_url)
base_url = "http://www.airnowapi.org/aq/observation/zipCode/historical/"
attributes = ["format=application/json",
"zipCode=20007",
"date=2017-09-05T00-0000",
"distance=25",
"API_KEY=39DC3727-09BD-48C4-BBD8-XXXXXXXXXXXX"
]
post_url = '&'.join(attributes)
print(base_url+post_url)
ingredients=requests.get(base_url, post_url)
ingredients = ingredients.json()
print(ingredients[0])
for item in ingredients:
AQIType = item['ParameterName']
City=item['ReportingArea']
AQIValue=item['AQI']
print("For Location ", City, " the AQI for ", AQIType, "is ", AQIValue)
time.sleep(1)
asthma_data = pd.read_csv('asthma-emergency-department-visit-rates-by-zip-code.csv')
asthma_data.head(2)
asthma_data[['zip','coordinates']] = asthma_data.loc[:,'ZIP code'].str.split(
pat='\n',expand=True)
asthma_data.drop('ZIP code', axis=1,inplace=True)
asthma_data.head(2)
asthma_unstacked = asthma_data.pivot_table(index = ['Year',
'zip',
'County',
'coordinates',
'County Fips code'],
columns = 'Age Group',
values = 'Number of Visits')
asthma_unstacked.reset_index(drop=False,inplace=True)
asthma_unstacked.head(2)
base_url = "http://www.airnowapi.org/aq/observation/zipCode/historical/"
zips = asthma_unstacked.zip.unique()
zips = zips[:450]
date ="date=2015-09-01T00-0000"
api_key = "API_KEY=39DC3727-09BD-48C4-BBD8-XXXXXXXXXXXX"
return_format = "format=application/json"
zip_str = "zipCode="
post_url = "&".join([date,api_key,return_format,zip_str])
data_dict = {}
for zipcode in zips:
time.sleep(1)
zip_post = post_url + str(zipcode)
ingredients = requests.get(base_url, zip_post)
ingredients = ingredients.json()
zip_data = {}
for data_point in ingredients:
AQIType = data_point['ParameterName']
AQIVal = data_point['AQI']
zip_data[AQIType] = AQIVal
data_dict[zipcode]= zip_data
base_url = "http://www.airnowapi.org/aq/observation/zipCode/historical/"
zips = asthma_unstacked.zip.unique()
date ="date=2015-09-{}T00-0000"
api_key = "API_KEY=XXX"
return_format = "format=application/json"
zip_str = "zipCode="
post_url = "&".join([date,api_key,return_format,zip_str])
# data_dict = {}
time.sleep(3600)
for d in ['01','05','10','15','20','25','30',]:
if d =='01':
ziplist = zips[449:]
else:
ziplist = zips
for zipcode in ziplist:
time.sleep(1)
zip_post = post_url.format(d) + str(zipcode)
ingredients = requests.get(base_url, zip_post)
try:
ingredients = ingredients.json()
except:
if ingredients.text[0:26]=='<WebServiceError><Message>':
time.sleep(3605)
else:
print(zipcode)
zip_data = {}
for data_point in ingredients:
AQIType = data_point['ParameterName']
AQIVal = data_point['AQI']
zip_data[AQIType] = AQIVal
zip_data['day'] = int(d)
data_dict[zipcode]= zip_data
pickle.dump(data_dict,open('ca_aqi_data.p','wb'))
for i, z in enumerate(zips):
if z==zipcode:
print(i)
len(data_dict)
pickle.dump(data_dict,open('AQI_data_raw.p','wb'))
ingredients = requests.get("https://en.wikipedia.org/wiki/Data_science")
soup = bs(ingredients.text)
print(soup.body.p)
parser_div = soup.find("div", class_="mw-parser-output")
wiki_content = parser_div.find_all('p')
print(wiki_content[1])
print('*****************************************')
print(wiki_content[1].text)
parser_div = soup.find("div", id="toc")
wiki_content = parser_div.find_all('ul')
for item in wiki_content:
print(item.text)
wiki_content = soup.find_all('a',href=True)
in_hist = False
links = []
for l in wiki_content:
link = l['href']
if link == '/w/index.php?title=Data_science&action=edit§ion=2':
in_hist = False
if in_hist:
links.append(link)
if link =="/w/index.php?title=Data_science&action=edit§ion=1":
in_hist = True
print(links)
topics = ['Data_scraping','Machine_learning','Statistics','Linear_algebra',
'Cluster_analysis','Scientific_modelling','Analysis','Linear_regression']
base_url = 'https://en.wikipedia.org/wiki/'
paragraphs = []
for topic in topics:
url = base_url.format(topic)
ingredients = requests.get(base_url+topic)
soup = bs(ingredients.text)
parser_div = soup.find("div", class_="mw-parser-output")
wiki_content = parser_div.find_all('p')
for p in range(10):
if len(wiki_content[p].text)>10:
paragraphs.append(wiki_content[p].text)
break
time.sleep(1)
print(dict(zip(topics,paragraphs)))
pickle.dump(data_dict,open('AQI_data_raw.p','wb'))
collected = list(data_dict.keys())
asthma_2015_sub = asthma_unstacked.loc[(asthma_unstacked.zip.isin(collected))&
(asthma_unstacked.Year == 2015),:]
aqi_data = pd.DataFrame.from_dict(data_dict, orient='index')
aqi_data.reset_index(drop=False,inplace=True)
aqi_data.rename(columns={'index':'zip'},inplace=True)
aqi_data.head()
asthma_aqi = asthma_2015_sub.merge(aqi_data,how='outer',on='zip')
asthma_aqi.rename(columns = {'Adults (18+)':'Adults',
'All Ages':'Incidents',
'Children (0-17)':'Children'},inplace=True)
asthma_aqi.head(2)
asthma_aqi.Incidents.plot.hist(20)
asthma_aqi.loc[:,['Incidents','OZONE']].plot.density()
asthma_aqi.loc[:,['PM2.5','PM10']].plot.hist()
asthma_aqi.plot.scatter('OZONE','PM2.5')
y =asthma_aqi.loc[:,'Incidents']
x =asthma_aqi.loc[:,['OZONE','PM2.5']]
x['c'] = 1
ols_model1 = sm.OLS(y,x,missing='drop')
results = ols_model1.fit()
print(results.summary())
pickle.dump([results,ols_model1],open('ols_model_results.p','wb'))
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(results, fig=fig)
ingredients = requests.get('https://www.california-demographics.com/zip_codes_by_population')
soup = bs(ingredients.text)
table = soup.find("table")
population = pd.read_html(str(table),flavor='html5lib')[0]
population.rename(columns=population.iloc[0],inplace=True)
population.drop(index=0,inplace=True)
population.head(2)
population[['zip','zip2']]=population.loc[:,'Zip Code'].str.split(
pat =' and ',
expand=True)
population.Population = population.Population.astype(np.float)
population.loc[population.zip2!=None,'Population']=population.loc[population.zip2!=None,'Population']/2
temp_pop = population.loc[population.zip!=None,['Population','zip2']].copy()
temp_pop.rename(columns={'zip2':'zip'},inplace=True)
population = pd.concat([population.loc[:,['Population','zip']],
temp_pop],axis=0)
population.head(2)
asthma_aqi = asthma_aqi.merge(population,how='left',on='zip')
y =asthma_aqi.loc[:,'Adults']
x =asthma_aqi.loc[:,['OZONE','Population']]
x['c'] = 1
glm_model = sm.GLM(y,x,missing='drop',family=sm.families.Poisson())
ols_model2 = sm.OLS(y,x,missing='drop')
glm_results = glm_model.fit()
results = ols_model2.fit()
print(glm_results.summary())
pickle.dump([glm_results,glm_model],open('glm_model_pop_results.p','wb'))
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(results, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(results, ax=ax, criterion="cooks")
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(results, "OZONE", fig=fig)
model_df = asthma_aqi.loc[:,['OZONE','PM2.5','Incidents',]]
model_df.dropna(axis=0,inplace=True)
model_df = (model_df - model_df.mean()) / (model_df.max() - model_df.min())
asthma_air_clusters=cluster.KMeans(n_clusters = 3)
asthma_air_clusters.fit(model_df)
model_df['clusters3']=asthma_air_clusters.labels_
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
labels = asthma_air_clusters.labels_
ax.scatter(model_df.loc[:, 'PM2.5'], model_df.loc[:, 'OZONE'], model_df.loc[:, 'Incidents'],
c=labels.astype(np.float), edgecolor='k')
ax.set_xlabel('Particulates')
ax.set_ylabel('Ozone')
ax.set_zlabel('Incidents')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extend for the k-Winners simulation
Step2: Model accuracy with 1% of active neurons, with and without boosting
Step3: Extend to CNNs
Step4: Evaluate the role of boosting
Step5: Plot loss and acc for different betas and answer the question if boosting helps or hurts
Step6: Exploration on how torch.expand is working in the background
Step7: Exploration on how to select kth winners in torch
|
<ASSISTANT_TASK:>
Python Code:
dataset = Dataset(config=dict(dataset_name='MNIST', data_dir='~/nta/datasets',
batch_size_train=256, batch_size_test=1024))
# torch cross_entropy is log softmax activation + negative log likelihood
loss_func = F.cross_entropy
# a custom Lambda module
class Lambda(nn.Module):
def __init__(self, func):
super().__init__()
self.func = func
def forward(self, x):
return self.func(x)
# simple feedforward model
# use a lambda layer to resize
model = nn.Sequential(
Lambda(lambda x: x.view(-1,28*28)),
nn.Linear(784,100),
nn.ReLU(),
nn.Linear(100,10),
)
# calculate accuracy
def accuracy(loader, num_batches=3):
len_dataset = loader.dataset.data.size()[0]
running_acc = 0
running_count = 0
# do no cover entire dataset. training is shuffled, testing is not
iter_loader = iter(loader)
for _ in range(num_batches):
x,y = next(iter_loader)
out = model(x)
preds = torch.argmax(out, dim=1)
running_acc += (preds == y).float().sum()
running_count += x.size()[0]
return running_acc.item() / running_count
# baseline
def fit(model, dataset, verbose=True, epochs=1, epoch_eval=True):
test_accuracies = []
losses = []
# dataset
train_loader = dataset.train_loader
test_loader = dataset.test_loader
# hyperparams
opt = optim.SGD(model.parameters(), lr=.01, momentum=0.9)
num_batches = 60
# training loop
print("Training Accuracy before training: {:.4f}".format(accuracy(train_loader)))
for epoch in range(epochs):
model.train()
iter_loader = iter(train_loader)
for i in range(num_batches):
x,y = next(iter_loader)
# calculate loss
loss = loss_func(model(x), y)
losses.append(loss.item())
# backpropagate
loss.backward()
# learn
opt.step()
opt.zero_grad()
if verbose:
if i % 20 == 0:
print("Loss: {:.8f}".format(loss.item()*1000/len(x)))
if epoch_eval:
model.eval()
test_acc = accuracy(test_loader)
test_accuracies.append(test_acc)
model.eval()
print("Training Accuracy after training: {:.4f}".format(accuracy(train_loader)))
print("Test Accuracy after training: {:.4f}".format(accuracy(test_loader)))
print("---------------------------")
return losses, test_accuracies
fit(model, dataset)
# from functions import KWinnersBatch as KWinners
from functions import KWinners
model_gen = lambda k: nn.Sequential(
Lambda(lambda x: x.view(-1,28*28)),
nn.Linear(784,100),
KWinners(k_perc=k),
nn.Linear(100,10),
)
model = model_gen(.1)
fit(model, dataset, epochs=1)
for k in np.arange(0.01,1,0.1):
print("K: %f" % k)
model = model_gen(k)
fit(model, dataset, verbose=False)
# no non-linearity required to get a low accuracy
model = nn.Sequential(
Lambda(lambda x: x.view(-1,28*28)),
nn.Linear(784,100),
nn.Linear(100,10),
)
fit(model, dataset)
# simple CNN Model
non_linearity = nn.ReLU
model = nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
non_linearity(),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
non_linearity(),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
non_linearity(),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
losses, accs = fit(model, dataset, epochs=20)
# kWinners
from functions import KWinners
model_gen = lambda k: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
model = model_gen(k=0.25)
kw_losses, kw_accs = fit(model, dataset, epochs=20)
# kWinners
from functions import KWinners
model_gen = lambda k: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k, use_absolute=False),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k, use_absolute=False),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k, use_absolute=False),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
model = model_gen(k=0.25)
kwp_losses, kwp_accs = fit(model, dataset, epochs=20)
plt.plot(kw_losses, label='kw_abs_losses')
plt.plot(kwp_losses, label='kw_pos_losses')
plt.plot(losses, label='losses')
plt.legend();
# kWinners without boosting
from functions import KWinners
model_gen = lambda k: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k, use_boosting=False),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k, use_boosting=False),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k, use_boosting=False),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
model = model_gen(k=0.1)
fit(model, dataset, epochs=1, epoch_eval=False);
# kWinners with boosting
from functions import KWinners
model_gen = lambda k: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k, use_boosting=True),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k, use_boosting=True),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k, use_boosting=True),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
model = model_gen(k=0.1)
fit(model, dataset, epochs=1, epoch_eval=False);
# Exploring several values for beta
from functions import KWinners
model_gen = lambda k,b: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k, use_boosting=True, beta=b),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k, use_boosting=True, beta=b),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k, use_boosting=True, beta=b),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
for b in [0, 0.01, 0.02, 0.05, 0.1]:
model = model_gen(k=0.1, b=b)
print("Beta: %f" % b)
fit(model, dataset, epochs=1, epoch_eval=False);
# Exploring several values for beta
from functions import KWinners
model_gen = lambda k,b: nn.Sequential(
nn.Conv2d(1,32, kernel_size=3, stride=2, padding=1), # 14x14
KWinners(k_perc=k, use_boosting=True, beta=b),
nn.Conv2d(32,64, kernel_size=3, stride=2, padding=1), # 7x7
KWinners(k_perc=k, use_boosting=True, beta=b),
nn.Conv2d(64,128, kernel_size=3, stride=2, padding=1), # 4x4
KWinners(k_perc=k, use_boosting=True, beta=b),
Lambda(lambda x: x.view(x.size(0), -1)), # 128
nn.Linear(128*4*4,10) # 10
)
betas, losses, accs = [], [] ,[]
for b in [0, 0.002, 0.01]:
model = model_gen(k=0.1, b=b)
print("Beta: %f" % b)
loss, acc = fit(model, dataset, epochs=50, epoch_eval=True, verbose=False);
betas.append(b)
losses.append(loss)
accs.append(acc)
rcParams['figure.figsize'] = (16,8)
for beta, loss in zip(betas, losses):
plt.plot(loss, label=str(beta))
plt.legend();
for beta, acc in zip(betas, accs):
plt.plot(acc, label=str(beta))
plt.legend();
# topk experimentation
t = torch.randn((4,3,2))
t
b = torch.ones((3,2)) * 2
b
t.shape, b.shape
b.expand((4,3,2))
t * b.expand((4,3,2))
# topk experimentation
t = torch.randn((4,3,2))
t
tx = t.view(t.size()[0], -1)
print(tx.size())
val, _ = torch.kthvalue(tx, 1, dim=-1)
val
[t.size()[0]] + [1 for _ in range(len(t.size())-1)]
t.shape
(t > val.view(4,1,1)).sum(dim=0).shape
(t > val.view(4,1,1)).sum(dim=0)
t > val.view(4,1,1)
t.shape
val.view(4,1,1).shape
val.view(4,1,1)
val, ind = torch.topk(t, k=1, dim=2)
ind
ind.shape
t.shape
t[ind].shape
t.shape
# get indices
# apply indices
mask = torch.zeros_like(t)
mask.scatter(1, ind, 1.)
ind
t = torch.randn(4,4,3)
t
t.topk(2, dim=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Indexing
Step2: Play
Step3: Dimensions
Step4: Indexing
Step5: Play
Step6: ... have a look at y and predict what the next expression will output ...
Step7: Array functions
Step9: Play
Step10: Linear algebra
Step11: Other linear algebra functions
Step12: CSV data file example
Step13: HDF5 data file example
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.arange(5)
a
a.size
a.shape
a[0]
a[-1]
a[2:4]
a[2:]
a[:4]
a[:]
a[1::2]
a.max()
a.min()
a.sum()
a + 3
a * 5
a + 3 * 5
a + 15
(a + 3) * 5
a * 5 + 3
a * (5 + 3)
%matplotlib inline
import matplotlib.pyplot as plt
a = np.linspace(-np.pi, np.pi, 100)
plt.plot(a)
plt.plot(a, np.cos(a))
plt.plot(a, np.arctan(a))
a = np.random.random(15).reshape(3,5)
a
a.size
a.shape
a[0]
a[0,1]
a[0][1]
a[:,1]
a[1,1:3]
a[:,::2]
y = np.arange(35).reshape(5,7)
y[1:5:2,::3]
a
a.sum()
a.sum(axis=1)
a.sum(axis=0)
a = np.random.random(24).reshape(2,3,4)
a
a.shape
a.sum(axis=2)
a.sum()
a *= 7
a, a.sum()
a += 3
a, a.sum()
a.sum(axis=(1,2))
a.sum(axis=(0,1))
a.sum(axis=(0,1)).sum()
# from numpy.org
def my_func(arr):
Average first and last element of a 1-D array
return (arr[0] + arr[-1]) * 0.5
b = np.array([[1,2,3], [4,5,6], [7,8,9]])
np.apply_along_axis(my_func, 0, b)
np.apply_along_axis(my_func, 1, b)
a = np.arange(15).reshape(3,5)
a
a*a
a.transpose()
a.dot(a.transpose())
a.transpose().dot(a)
# from numpy.org
a = np.array([[1, 2, 3], [3, 4, 6.7], [5, 9.0, 5]])
b = np.array([3, 2, 1])
np.linalg.solve(a, b) # solve the equation ax = b
data = np.genfromtxt('SCI_SICR_lv2_20031119_09000_0000_v1.0.dat')
data.shape
cols = 'lon0 lat0 lon1 lat1 lon2 lat2 lon3 lat3 lon_cent lat_cent dsr_time int_time sza lza H2O HDO N2O CO CH4 H2O_err HDO_err N2O_err CO_err CH4_err red.chisq dof eflag albedo cl_fr mean_elev st px bs'.split()
for i in range(len(cols)):
print(i, cols[i])
col_ix = cols.index('CH4')
col_ix
col_data = data.transpose()[col_ix]
print(col_data.shape)
for d in col_data[:10]:
print(type(d), d)
plt.plot(col_data)
plt.plot(col_data[:100])
plt.plot([d for d in col_data if d < 4e19])
import h5py
f = h5py.File('SCI_RPRO_L2__CO____20031119T191154_20031119T194955_09000_01_070200_20030115T002650.nc', 'r')
for group_name, group in f.items():
print(group_name, group)
if len(group) < 20:
for col in group:
print('+', col, group[col].shape)
print(f['/main_product/CO_column'].shape)
for d in f['/main_product/CO_column'][:10]:
print(d)
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(f['/main_product/CO_column'], color='green')
print(f['/instrument/time'].shape)
for d in f['/instrument/time'][0:9]:
print(d)
from datetime import datetime
times = [datetime(*d) for d in f['/instrument/time']]
for t in times[0:9]:
print(t)
plt.plot(times, f['/main_product/CO_column'], color='green')
plt.show()
ax1 = plt.axes()
ax1.plot(times, f['/main_product/CO_column'], color='green')
ax1.set_ylabel('CO')
ax2 = ax1.twinx()
ax2.plot(times, f['/side_product/H2O_column'], color='blue')
ax2.set_ylabel('H2O')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's choose PUM2 PWM (RBP in Human)
Step2: Visualization - PWM class
Step3: We can select the PWM with id 129.
Step4: Initialize the conv filters with PWM values
Step5: ci.PSSMKernelInitializer will set the filters of the first convolutional layer to the values of the position-specific scoring matrix (PSSM)
Step6: Test-set performance
Step7: Filter visualization
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
# RBP PWM's
from concise.data import attract
dfa = attract.get_metadata()
dfa
# TF PWM's
from concise.data import encode
dfe = encode.get_metadata()
dfe
# TF PWM's
from concise.data import hocomoco
dfh = hocomoco.get_metadata()
dfh
dfa_pum2 = dfa[dfa.Gene_name.str.match("PUM2") & \
dfa.Organism.str.match("Homo_sapiens")]
dfa_pum2
# Visualize the PUM2 Motifs from different experiments
from concise.utils.pwm import PWM
dfa_pum2_uniq = dfa_pum2[["Experiment_description", "PWM_id"]].drop_duplicates()
pwm_list = attract.get_pwm_list(dfa_pum2_uniq.PWM_id)
for i, pwm in enumerate(pwm_list):
print("PWM_id:", pwm.name, "; Experiment_description:", dfa_pum2_uniq.Experiment_description.iloc[i])
pwm.plotPWM(figsize=(3,1))
pwm_list = [pwm for pwm in pwm_list if pwm.name == "129"]
pwm_list
import concise.layers as cl
import keras.layers as kl
import concise.initializers as ci
import concise.regularizers as cr
from keras.callbacks import EarlyStopping
from concise.preprocessing import encodeDNA
from keras.models import Model, load_model
from keras.optimizers import Adam
# get the data
def load(split="train", st=None):
dt = pd.read_csv("../data/RBP/PUM2_{0}.csv".format(split))
# DNA/RNA sequence
xseq = encodeDNA(dt.seq) # list of sequences -> np.ndarray
# response variable
y = dt.binding_site.as_matrix().reshape((-1, 1)).astype("float")
return {"seq": xseq}, y
train, valid, test = load("train"), load("valid"), load("test")
# deduce sequence length
seq_length = train[0]["seq"].shape[1]
# define the model
def model(train, filters=1, kernel_size=9, pwm_list=None, lr=0.001):
seq_length = train[0]["seq"].shape[1]
if pwm_list is None:
kinit = "glorot_uniform"
binit = "zeros"
else:
kinit = ci.PSSMKernelInitializer(pwm_list, add_noise_before_Pwm2Pssm=True)
binit = "zeros"
# sequence
in_dna = cl.InputDNA(seq_length=seq_length, name="seq")
x = cl.ConvDNA(filters=filters,
kernel_size=kernel_size,
activation="relu",
kernel_initializer=kinit,
bias_initializer=binit,
name="conv1")(in_dna)
x = kl.AveragePooling1D(pool_size=4)(x)
x = kl.Flatten()(x)
x = kl.Dense(units=1)(x)
m = Model(in_dna, x)
m.compile(Adam(lr=lr), loss="binary_crossentropy", metrics=["acc"])
return m
# create two models: with and without PWM initialization
m_rand_init = model(train, filters=3, pwm_list=None) # random initialization
m_pwm_init = model(train, filters=3, pwm_list=pwm_list) # motif initialization
print("Random initialization:")
m_rand_init.get_layer("conv1").plot_weights(figsize=(3, 5));
print("Known PWM initialization:")
m_pwm_init.get_layer("conv1").plot_weights(figsize=(3, 5));
# train the models
m_rand_init.fit(train[0], train[1], epochs=50, validation_data=valid,
verbose=0,
callbacks=[EarlyStopping(patience=5)])
m_pwm_init.fit(train[0], train[1], epochs=50, validation_data=valid,
verbose=0,
callbacks=[EarlyStopping(patience=5)]);
import concise.eval_metrics as cem
# performance on the test-set
# Random initialization
print("Random intiailzation auPR:", cem.auprc(test[1], m_rand_init.predict(test[0])))
# PWM initialization
print("Known PWM initialization auPR:", cem.auprc(test[1], m_pwm_init.predict(test[0])))
m_rand_init.get_layer("conv1").plot_weights(plot_type="motif_pwm_info", figsize=(3, 5));
m_pwm_init.get_layer("conv1").plot_weights(plot_type="motif_pwm_info", figsize=(3, 5));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll create an instance of RadarServer to point to the appropriate radar server access URL.
Step2: Next, we'll create a new query object to help request the data. Using the chaining methods, let's ask for the latest data at the radar KLVX (Louisville, KY). We see that when the query is represented as a string, it shows the encoded URL.
Step3: We can use the RadarServer instance to check our query, to make sure we have required parameters and that we have chosen valid station(s) and variable(s)
Step4: Make the request, which returns an instance of TDSCatalog; this handles parsing the returned XML information.
Step5: We can look at the datasets on the catalog to see what data we found by the query. We find one volume in the return, since we asked for the volume nearest to a single time.
Step6: We can pull that dataset out of the dictionary and look at the available access URLs. We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download).
Step7: We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL.
Step8: We define some helper functions to make working with the data easier. One takes the raw data and converts it to floating point values with the missing data points appropriately marked. The other helps with converting the polar coordinates (azimuth and range) to Cartesian (x and y).
Step9: The CDMRemote reader provides an interface that is almost identical to the usual python NetCDF interface. We pull out the variables we need for azimuth and range, as well as the data itself.
Step10: Then convert the raw data to floating point values and the polar coordinates to Cartesian.
Step11: MetPy is a Python package for meteorology (Documentation
Step12: Finally, we plot them up using matplotlib and cartopy. We create a helper function for making a map to keep things simpler later.
Step13: Download a collection of historical data
Step14: The specified longitude, latitude are in NY and the TDS helpfully finds the closest station to that point. We can see that for this time range we obtained multiple datasets.
Step15: Grab the first dataset so that we can get the longitude and latitude of the station and make a map for plotting. We'll go ahead and specify some longitude and latitude bounds for the map.
Step16: Use the function to make a new map and plot a colormapped view of the data
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import warnings
warnings.filterwarnings("ignore", category=matplotlib.cbook.MatplotlibDeprecationWarning)
%matplotlib inline
# The archive of data on S3 URL did not work for me, despite .edu domain
#url = 'http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/'
#Trying motherlode URL
url = 'http://thredds.ucar.edu/thredds/radarServer/nexrad/level2/IDD/'
from siphon.radarserver import RadarServer
rs = RadarServer(url)
from datetime import datetime, timedelta
query = rs.query()
query.stations('KLVX').time(datetime.utcnow())
rs.validate_query(query)
catalog = rs.get_catalog(query)
catalog.datasets
ds = list(catalog.datasets.values())[0]
ds.access_urls
from siphon.cdmr import Dataset
data = Dataset(ds.access_urls['CdmRemote'])
import numpy as np
def raw_to_masked_float(var, data):
# Values come back signed. If the _Unsigned attribute is set, we need to convert
# from the range [-127, 128] to [0, 255].
if var._Unsigned:
data = data & 255
# Mask missing points
data = np.ma.array(data, mask=data==0)
# Convert to float using the scale and offset
return data * var.scale_factor + var.add_offset
def polar_to_cartesian(az, rng):
az_rad = np.deg2rad(az)[:, None]
x = rng * np.sin(az_rad)
y = rng * np.cos(az_rad)
return x, y
sweep = 0
ref_var = data.variables['Reflectivity_HI']
ref_data = ref_var[sweep]
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref = raw_to_masked_float(ref_var, ref_data)
x, y = polar_to_cartesian(az, rng)
from metpy.plots import ctables # For NWS colortable
ref_norm, ref_cmap = ctables.registry.get_with_steps('NWSReflectivity', 5, 5)
import matplotlib.pyplot as plt
import cartopy
def new_map(fig, lon, lat):
# Create projection centered on the radar. This allows us to use x
# and y relative to the radar.
proj = cartopy.crs.LambertConformal(central_longitude=lon, central_latitude=lat)
# New axes with the specified projection
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add coastlines
ax.coastlines('50m', 'black', linewidth=2, zorder=2)
# Grab state borders
state_borders = cartopy.feature.NaturalEarthFeature(
category='cultural', name='admin_1_states_provinces_lines',
scale='50m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=1, zorder=3)
return ax
# Our specified time
#dt = datetime(2012, 10, 29, 15) # Superstorm Sandy
#dt = datetime(2016, 6, 18, 1)
dt = datetime(2016, 6, 8, 18)
query = rs.query()
query.lonlat_point(-73.687, 41.175).time_range(dt, dt + timedelta(hours=1))
cat = rs.get_catalog(query)
cat.datasets
ds = list(cat.datasets.values())[0]
data = Dataset(ds.access_urls['CdmRemote'])
# Pull out the data of interest
sweep = 0
rng = data.variables['distanceR_HI'][:]
az = data.variables['azimuthR_HI'][sweep]
ref_var = data.variables['Reflectivity_HI']
# Convert data to float and coordinates to Cartesian
ref = raw_to_masked_float(ref_var, ref_var[sweep])
x, y = polar_to_cartesian(az, rng)
fig = plt.figure(figsize=(10, 10))
ax = new_map(fig, data.StationLongitude, data.StationLatitude)
# Set limits in lat/lon space
ax.set_extent([-77, -70, 38, 43])
# Add ocean and land background
ocean = cartopy.feature.NaturalEarthFeature('physical', 'ocean', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['water'])
land = cartopy.feature.NaturalEarthFeature('physical', 'land', scale='50m',
edgecolor='face',
facecolor=cartopy.feature.COLORS['land'])
ax.add_feature(ocean, zorder=-1)
ax.add_feature(land, zorder=-1)
ax.pcolormesh(x, y, ref, cmap=ref_cmap, norm=ref_norm, zorder=0);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The output of each iteration of the for-loop is a string which contains the entire line of the file.
Step2: Notice that all of the file contents that were printed before ("First line of file", "Second line of file", etc...) were deleted because we used w when we wrote. Notice what happens when we simply append to our file.
Step3: Notice that when we used a as our permissions that it simply added new text to the end of the file when we wrote. We will now return the file to its original state so that we can run this file again.
|
<ASSISTANT_TASK:>
Python Code:
f = open('filespython.txt', 'r')
for line in f:
print(line)
f.close()
f = open('filespython.txt', 'w')
f.write('This is another line\n')
f.close()
f = open('filespython.txt', 'r')
print(f.read())
f.close()
f = open('filespython.txt', 'a')
f.write('This is another line\n')
f.close()
f = open('filespython.txt', 'r')
print(f.read())
f.close()
# Open file
f = open('filespython.txt', 'w')
# Will use this string in each line so create it first
lof = " line of file\n"
for currline in ["First", "Second", "Third", "Last"]:
f.write(currline + lof)
f.close()
f = open('filespython.txt', 'r')
print(f.read())
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [pooling.AveragePooling2D.1] input 6x6x3, pool_size=(2, 2), strides=(1, 1), padding='valid', data_format='channels_last'
Step2: [pooling.AveragePooling2D.2] input 6x7x3, pool_size=(2, 2), strides=(2, 1), padding='valid', data_format='channels_last'
Step3: [pooling.AveragePooling2D.3] input 6x6x3, pool_size=(3, 3), strides=None, padding='valid', data_format='channels_last'
Step4: [pooling.AveragePooling2D.4] input 6x6x3, pool_size=(3, 3), strides=(3, 3), padding='valid', data_format='channels_last'
Step5: [pooling.AveragePooling2D.5] input 6x6x3, pool_size=(2, 2), strides=None, padding='same', data_format='channels_last'
Step6: [pooling.AveragePooling2D.6] input 6x6x3, pool_size=(2, 2), strides=(1, 1), padding='same', data_format='channels_last'
Step7: [pooling.AveragePooling2D.7] input 6x7x3, pool_size=(2, 2), strides=(2, 1), padding='same', data_format='channels_last'
Step8: [pooling.AveragePooling2D.8] input 6x6x3, pool_size=(3, 3), strides=None, padding='same', data_format='channels_last'
Step9: [pooling.AveragePooling2D.9] input 6x6x3, pool_size=(3, 3), strides=(3, 3), padding='same', data_format='channels_last'
Step10: [pooling.AveragePooling2D.10] input 5x6x3, pool_size=(3, 3), strides=(2, 2), padding='valid', data_format='channels_first'
Step11: [pooling.AveragePooling2D.11] input 5x6x3, pool_size=(3, 3), strides=(1, 1), padding='same', data_format='channels_first'
Step12: [pooling.AveragePooling2D.12] input 4x6x4, pool_size=(2, 2), strides=None, padding='valid', data_format='channels_first'
Step13: export for Keras.js tests
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(270)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(271)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 7, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(2, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(272)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(273)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(274)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(275)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(276)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 7, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(2, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(277)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(278)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(279)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.9'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(280)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.10'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (5, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(281)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.11'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (4, 6, 4)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(282)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.12'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/pooling/AveragePooling2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Input
Step3: Workflow
Step4: About stemmers and lemmatisation
Step5: Count & POS tag of each stemmed/non-stop word
Step6: Proportion of POS tags
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
from collections import Counter
bloboftext =
This little piggy went to market,
This little piggy stayed home,
This little piggy had roast beef,
This little piggy had none,
And this little piggy went wee wee wee all the way home.
## Tokenization
bagofwords = nltk.word_tokenize(bloboftext.lower())
print len(bagofwords)
## Stop word removal
stop = stopwords.words('english')
bagofwords = [i for i in bagofwords if i not in stop]
print len(bagofwords)
snowball_stemmer = SnowballStemmer("english")
## What words was stemmed?
_original = set(bagofwords)
_stemmed = set([snowball_stemmer.stem(i) for i in bagofwords])
print 'BEFORE:\t%s' % ', '.join(map(lambda x:'"%s"'%x, _original-_stemmed))
print ' AFTER:\t%s' % ', '.join(map(lambda x:'"%s"'%x, _stemmed-_original))
del _original, _stemmed
## Proceed with stemming
bagofwords = [snowball_stemmer.stem(i) for i in bagofwords]
for token, count in Counter(bagofwords).most_common():
print '%d\t%s\t%s' % (count, nltk.pos_tag([token])[0][1], token)
record = {}
for token, count in Counter(bagofwords).most_common():
postag = nltk.pos_tag([token])[0][1]
if record.has_key(postag):
record[postag] += count
else:
record[postag] = count
recordpd = pd.DataFrame.from_dict([record]).T
recordpd.columns = ['count']
N = sum(recordpd['count'])
recordpd['percent'] = recordpd['count']/N*100
recordpd
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Llama Drama Low (1920x1080)
Step2: Llama Drama Medium (1920x1080)
Step3: Llama Drama High (1920x1080)
Step4: Playlist
Step5: StreamEngine
Step6: Exemple réel
|
<ASSISTANT_TASK:>
Python Code:
!ffmpeg -i LlamaDrama.mp4 -movflags faststart -b:v 256000 -maxrate 256000 -x264opts "fps=24:keyint=48:min-keyint=48:no-scenecut" -hls_list_size 0 -hls_time 4 -hls_base_url http://192.168.3.14:8000/low/ low/LlamaDrama.m3u8
!ffmpeg -i LlamaDrama.mp4 -movflags faststart -b:v 512000 -maxrate 512000 -x264opts "fps=24:keyint=48:min-keyint=48:no-scenecut" -hls_list_size 0 -hls_time 4 -hls_base_url http://192.168.3.14:8000/medium/ medium/LlamaDrama.m3u8
!ffmpeg -i LlamaDrama.mp4 -movflags faststart -b:v 1024000 -maxrate 1024000 -x264opts "fps=24:keyint=48:min-keyint=48:no-scenecut" -hls_list_size 0 -hls_time 4 -hls_base_url http://192.168.3.14:8000/high/ high/LlamaDrama.m3u8
from collections import namedtuple
from io import BytesIO
from requests import get
import m3u8
from time import time
from io import BytesIO
from subprocess import call
Stream = namedtuple('Stream',['bandwidth', 'uri'])
class HLS:
speed = 0 # Bits / second
i = 0
def __init__(self, uri):
self.selector = StreamEngine(uri)
def __iter__(self):
return self
def __next__(self):
stream = self.selector.selectStream(self.speed)
if self.i < len(stream.segments):
startTime = time()
buf = getSegment(stream.segments[self.i])
self.speed = round((buf.getbuffer().nbytes*8) / (time() - startTime))
print('%d bits/s' %self.speed)
self.i += 1
return buf
else:
raise StopIteration
class StreamEngine:
currentStream = None
streamM3 = None
streams = None
def __init__(self, uri):
self.streams = sorted([Stream(playlist.stream_info.bandwidth, playlist.uri)
for playlist in m3u8.load(uri).playlists])
self.currentStream = self.streams[0]
self.streamM3 = m3u8.load(self.currentStream.uri)
def selectStream(self, speed):
newStream = self.currentStream
for stream in self.streams:
if stream.bandwidth < speed:
newStream = stream
else:
break
if newStream != self.currentStream:
self.currentStream = newStream
self.streamM3 = m3u8.load(newStream.uri)
print('Changing Streams: New BitRate %d' %newStream.bandwidth)
return self.streamM3
def getSegment(segment):
buf = BytesIO()
r = get(segment.uri, stream=True)
for chunk in r.iter_content(chunk_size=2048):
if chunk:
buf.write(chunk)
return buf
from subprocess import Popen, PIPE, STDOUT
hls = HLS('http://192.168.3.14:8000/LlamaDrama.m3u8')
player = Popen("/usr/local/bin/gst-play-1.0 fd://0".split(), stdout=PIPE, stdin=PIPE)
for segment in hls:
player.stdin.write(segment.getvalue())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: $...$ or a membership test and the like.
Step2: Assertions, preconditions, postconditions
Step3: If an assertion is true, nothing happens. All is well and execution proceeds without interuption.
Step4: The assertion statement has an optional additional expression that is printed by the exception.
Step6: Let's write a function that could use some assertions. Here's a function that converts a temperature in degrees Kelvin to Celsius, so it requires that its input temperature not be a negative number. I advocate adding the assertions to the documentation string so the function's users know the terms of the contract.
Step7: Here's what the documentation string gives.
Step8: Let's try it with an invalid input.
Step9: The input argument needs to be numeric, so we should perform type checking on it. There are several types of numbers that are allowed
Step11: We'll add in the type checking. It needs to go before the assertion about tK's value because that assertion itself would fail if tK isn't numeric!
Step13: A $\bf postcondition$ is a system state that is guaranteed to be true after a function finishes executing. Postconditions should be placed after all state-changing expressions. They give the user peace of mind that the function has, in fact, fufilled its promises and things may proceed.
Step14: Functions as arguments to other functions
Step15: The issue here is the scipy is gignormous and it would take forever to load the whole thing -- and we only need the one routine. So, we need to load it specifically. Well, actually we load the optimize package in which the needed function resides.
Step16: Let's try it on $(x-1)^2$. We don't want to bother to write a formal, named function like for this. We don't need a function with a name to compute (x - 1)**2, so this is a perfect place for a lambda function.
Step17: The value that minimizes the function is returned as the x attribute. It is 1.0, as expected.
Step18: The function itself is passed into weirdFunc. Here's other examples.
Step19: This might bake your noodle, but we can pass a function to itself.
Step20: We can execute the function we passed in. Inside the weirdFunc function, the argument function is named "f" so we execute it in the usual way
Step21: Parametric functions
Step22: Let's plot a parametric function of two functions
Step24: Here's a function that implements two-dimensional parametric functions
Step25: The beautiful thing about parametric functions is that they are functions (mathematically speaking) of the parameter, but not of the spatial coordinates. So we can have curves of any shape.
Step27: Final example
Step28: Recursion
Step30: Easy enough. To solve this problem recursively, let's write it as an iterated function, as above. Suppose we have a magick function named TOTAL(n) that returns the sum of $1...n$. Clearly
Step31: At first, recursion seems like complete magick. It's as if the program DOESN'T DO ANYTHING yet it computes the answer. There are no iteration variables, no intermediate results (it seems) -- just the answer. But a little reflection on how python executes this function might shed some light. There are intermediate results that are held on the execution stack. Those results are the values the variable n is given each time TOTAL is called. (Remember that n is $\it local$ to the instance of its invocation, so every invocation of TOTAL gets its own private copy of n. For TOTAL(3) the execution stack looks like this
Step32: Example
Step33: This list comprehension produces the Fibonacci sequence.
Step34: The print statements reveal that the algorithm is wicked inefficient. For f(4), f(2) is called twice, and f(1) is called three times. Try f(5) and you'll start to see that the redundancy problem is growing out of control. A little algebra shows how the problem arises.
Step35: The function returns a tuple instead of the one single number, which is slightly annoying. One quick fix is to just tack on a [0] to the function invocation to pick out only the first number.
Step36: This alogrithm is wicked fast. If you try to compute f(1000) with the first-try implementation, python's stack goes too deep and throws and overflow exception -- it can't even solve the problem! Also notice how python quietly changed to arbitrary precision arithematic when the size of the number grew larger than what can be represented with machine arithemtic. Slicker than frog's fur.
Step37: Base cases
Step38: Here's what the zero-order approximation looks like.
Step39: The length of the approximation is $\sqrt((2-0)^2 + (4-0)^2)$
Step40: It happens that a closed-form solution exists for this problem
Step41: The next iteration step's approximation is...
Step43: This approximation is better than what we got from the first, as expected. To generalize,
Step44: The arclength function takes an array of parametric lambda functions, t0, t1, and a convergence tolerance. Here's how we use a lambda function inside our routine.
Step45: The following function is executed in our base case. It calculates the length of a segment.
Step46: Let's try it
Step47: Same code works for curves of any dimension
Step48: This agrees to the exact answer to at least five significant digits. Here's a three-dimensional helical spiral.
|
<ASSISTANT_TASK:>
Python Code:
'eggs' < 'spam'
8 in (1,8,7,3,9,'spam')
assert(8>5)
assert(8<5)
assert(8<5), 'wicked helpful assertion string'
def K2C(tK):
Convert degrees K to C.
Argument: tK is a temperature in degrees K.
Precondition: (tK > 0)
assert tK > 0, 'tK is negative'
return tK - 273.15
help(K2C)
K2C(-4)
NumberTypes = (int, long, float)
isinstance(8, NumberTypes)
isinstance('spam!', NumberTypes)
NumberTypes = (int, long, float)
def K2C(tK):
Convert degrees K to C.
Argument: tK is a temperature in degrees K.
Preconditions: (tK is an int or long int or float) and (tK > 0)
assert isinstance(tK, NumberTypes), 'tK must be numeric'
assert tK > 0, 'tK is negative, which is not allowed'
return tK - 273.15
K2C('hello world!')
NumberTypes = (int, long, float)
def K2C(tK):
Convert degrees K to C.
Argument: tK is a temperature in degrees K.
Preconditions: (tK is int or long int or float) and (tK > 0)
Postconditions: (result is a float in Celsius)
assert isinstance(tK, NumberTypes), 'tK must be numeric'
assert tK > 0, 'K2C: tK is negative, which is not allowed'
result = tK - 273.15 # conversion to Celsius
assert isinstance(result, float), 'the result must be a float'
return result
K2C(288)
import scipy
help(scipy.optimize.minimize_scalar)
'WAT '* 12
from scipy import optimize
help(scipy.optimize.minimize_scalar)
scipy.optimize.minimize_scalar(lambda x: (x-1)**2)
def justAnotherFunction(x):
print 'yo. in justAnotherFunction'
return x
def weirdFunc(f):
print f
return None
weirdFunc(justAnotherFunction)
weirdFunc(lambda x: (x-1)**2)
weirdFunc(weirdFunc)
def weirdFunc(f):
return f(4)
weirdFunc(justAnotherFunction)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
def myPlot(yf, npts = 50):
T = np.linspace(0,1,npts)
y = [yf(t) for t in T]
return plt.plot(T, y)
myPlot(lambda t: t**2)
def myPlot(xf, yf, npts = 50):
T = np.linspace(0,1,npts)
x = [xf(t) for t in T]
y = [yf(t) for t in T]
return plt.plot(x, y)
myPlot(np.sin, np.cos)
def parametricFunc(xf, yf, t):
Evaluate a parametric function {x(t), y(t)}
Arguments:
xf is a function of one argument that will be evaluated at t.
yf is a function of one argument that will be evaluated at t.
t is a float
Preconditions: (t is a float)
assert isinstance(t, float), 't must be a float'
return np.array([xf(t), yf(t)])
parametricFunc(np.sin, np.cos, 0.5)
T = np.linspace(0, 2*np.pi, 200)
pnts = np.array([parametricFunc(np.sin, np.cos, t) for t in T])
x=pnts[:,0]
y=pnts[:,1]
plt.plot(x,y)
def parametricFuncND(listOfFunctions, t):
Evaluate a parametric function of any (finite) dimension
Arguments:
listOfFunctions is a list of functions of one value that will be evaluated a t.
t is a float in [0,1]
Preconditions: (t is a float)
assert isinstance(t, float), 't must be a float'
assert len(listOfFunctions) > 0, 'the list of functions must have at least one function'
return np.array([f(t) for f in listOfFunctions])
parametricFuncND([np.sin, np.cos, lambda x: x**2 - 3.0 * x], 0.5)
total = 0 # initialize the summand
n = 5
for i in range(1,n+1):
total += i
total
def TOTAL(n):
compute the summation of [1...n]
if n == 0:
return 0
else:
return TOTAL(n-1) + n
TOTAL(5)
TOTAL(3)
def f0(n, verbose=False):
if n == 0:
return 0
if n == 1:
return 1
else:
f1 = f0(n-1, verbose)
if verbose: print 'f(n-1) = f(', n-1, ') =', f1
f2 = f0(n-2, verbose)
if verbose: print 'f(n-2) = f(', n-2, ') =', f2
return f1 + f2
f0(4, True)
%timeit f0(10)
[f0(n) for n in range(6)]
def f1(n, verbose=False):
if verbose: print 'f1(', n, ')'
if n == 1 or n == 0:
return 1,0 # return a tuple. Notice that parentheses are not needed here
else:
# note the multiple assignment. the LHS is also a tuple, appearances notwithstanding
f2, f3 = f1(n-2, verbose)
return 2*f2 + f3, f2
f1(4, True)
f1(1000)[0]
%timeit f1(10)
36.2/1.58
x = np.linspace(0,2,50,endpoint=True)
plt.plot(x,x**2)
plt.xlabel('x')
plt.ylabel('y = $x^2$')
plt.title('A Parabola')
x = np.linspace(0,2,50,endpoint=True)
x0 = 0
x1 = 2
plt.plot(x,x**2)
plt.plot([x0,x1],[x0,x1**2],'ro')
plt.plot([x0,x1],[x0,x1**2],'r-')
plt.xlabel('x')
plt.ylabel('y = $x^2$')
plt.title('A Parabola Approximated with a Line Segment')
np.sqrt(2**2 + 4**2)
x = np.linspace(0,2,50,endpoint=True)
x0 = 0
xm = 1
x1 = 2
plt.plot(x,x**2)
plt.plot([x0,xm,x1],[x0,xm**2,x1**2],'ro')
plt.plot([x0,xm,x1],[x0,xm**2,x1**2],'r-')
plt.xlabel('x')
plt.ylabel('y = $x^2$')
plt.title('A Parabola Approximated with a Two Line Segments')
s1 = np.sqrt((1-0)**2 + (1-0)**2)
s2 = np.sqrt((1-0)**2 + (4-1)**2)
print s1, '+', s2, '=', s1+s2
NumberTypes = (int, long, float)
nptype = type(np.array([])) # let python tell me the name of the numpy array
def pythag(p1, p2):
Compute the straight-line distance from p1 to p2.
Arguments:
p1 is an numpy array of any dimension. It's elements are numeric.
p2 is an numpy array of any dimension same as p1. It's elements are numeric.
Preconditions:
(p1 is not empty) and (p1's elements are numeric)
(p2 is not empty) and (p2's elements are numeric) and (p2 has the same length as p1)
p1 and p2 are one-dimensional (vectors, not matrices)
Postcondition:
(result is a non-negative) and (result is a float)
assert(isinstance(p1, nptype))
assert(isinstance(p2, nptype))
assert(len(p1) > 0)
assert(len(p1) == len(p2))
for i in range(len(p1)):
assert(isinstance(p1[i], NumberTypes))
assert(isinstance(p2[i], NumberTypes))
result = np.sqrt(sum((p2 - p1)**2))
assert(result >= 0) # this does both post conditions in one check
return result
pythag(np.array([0,0]), np.array([2,4]))
x = lambda t: t**2
x(8)
def arclenSegment(curves, t0, t1):
p0 = np.array([c(t0) for c in curves])
p1 = np.array([c(t1) for c in curves])
length = pythag(p0, p1)
return length
arclenSegment([lambda t: t, lambda t: t**2], 0.0, 2.0)
arclenSegment([lambda t: t, lambda t: t**2, lambda t: t**3], 0.0, 2.0)
def arclen(curves, t0, t1, tol=0.001):
assert(t0 < t1), 't0 must be less than t1'
assert(tol > 0), 'tolerance must be positive'
assert(len(curves) > 0), 'there must be at least one curve'
dt = (t1 - t0)/2.0 # divide and conquer
tm = t0 + dt
if dt <= tol:
return arclenSegment(curves, t0, t1)
else:
return arclen(curves, t0, tm, tol) + arclen(curves, tm, t1, tol)
arclen([lambda t: t, lambda t: t**2], 0.0, 2.0)
arclen([lambda t: np.sin(2.0 * np.pi * t), lambda t: np.cos(2.0 * np.pi * t),lambda t: t,], 0.0, 2.0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the previous chapter we used Bayes's Theorem to solve a cookie problem; then we solved it again using a Bayes table.
Step2: If that doesn't work, you might have to install empiricaldist; try running
Step3: Pmf creates an empty Pmf with no outcomes.
Step4: In this example, all outcomes in the sequence appear once, so they all have the same probability, $1/6$.
Step5: The letter M appears once out of 11 characters, so its probability is $1/11$.
Step6: In the word "Mississippi", about 36% of the letters are "s".
Step7: You can also call a Pmf as if it were a function, with a letter in parentheses.
Step8: If the quantity is in the distribution the results are the same.
Step9: With parentheses, you can also provide a sequence of quantities and get a sequence of probabilities.
Step10: The quantities in a Pmf can be strings, numbers, or any other type that can be stored in the index of a Pandas Series.
Step11: This distribution, which contains the prior probability for each hypothesis, is called (wait for it) the prior distribution.
Step12: The result is the unnormalized posteriors; that is, they don't add up to 1.
Step13: The return value from normalize is the total probability of the data, which is $5/8$.
Step14: From the posterior distribution we can select the posterior probability for Bowl 1
Step15: And the answer is 0.6.
Step16: Now the posterior probability for Bowl 1 is almost 70%.
Step17: And here's the update.
Step18: Now the posterior probability for Bowl 1 is about 53%.
Step19: We can use this array to make the prior distribution
Step20: As this example shows, we can initialize a Pmf with two parameters.
Step21: The likelihood of the data is the fraction of vanilla cookies in each bowl, which we can calculate using hypos
Step22: Now we can compute the posterior distribution in the usual way
Step23: The following figure shows the prior distribution and the posterior distribution after one vanilla cookie.
Step24: The posterior probability of Bowl 0 is 0 because it contains no vanilla cookies.
Step25: And here's what the posterior distribution looks like.
Step26: After two vanilla cookies, the high-numbered bowls have the highest posterior probabilities because they contain the most vanilla cookies; the low-numbered bowls have the lowest probabilities.
Step27: And here's the posterior distribution.
Step28: Now Bowl 100 has been eliminated because it contains no chocolate cookies.
Step29: Or Pmf provides a more memorable name for the same thing
Step30: As you might suspect, this example isn't really about bowls; it's about estimating proportions.
Step31: We can make the prior distribution like this
Step32: As in the previous example, the prior probability gets broadcast across the hypotheses.
Step33: Now we're ready to do the update.
Step34: And here's the update.
Step35: The posterior probability for the 6-sided die is $4/9$.
Step36: The likelihood for the 6-sided die is 0 because it is not possible to get a 7 on a 6-sided die.
Step38: After rolling a 1 and a 7, the posterior probability of the 8-sided die is about 69%.
Step39: The first parameter is a Pmf that represents the possible dice and their probabilities.
Step40: And use update_dice to do the updates.
Step41: The result is the same. We will see a version of this function in the next chapter.
Step42: Exercise
Step43: Exercise
Step44: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py')
from utils import set_pyplot_params
set_pyplot_params()
from empiricaldist import Pmf
coin = Pmf()
coin['heads'] = 1/2
coin['tails'] = 1/2
coin
die = Pmf.from_seq([1,2,3,4,5,6])
die
letters = Pmf.from_seq(list('Mississippi'))
letters
letters['s']
try:
letters['t']
except KeyError as e:
print(type(e))
letters('s')
letters('t')
die([1,4,7])
prior = Pmf.from_seq(['Bowl 1', 'Bowl 2'])
prior
likelihood_vanilla = [0.75, 0.5]
posterior = prior * likelihood_vanilla
posterior
posterior.normalize()
posterior
posterior('Bowl 1')
posterior *= likelihood_vanilla
posterior.normalize()
posterior
likelihood_chocolate = [0.25, 0.5]
posterior *= likelihood_chocolate
posterior.normalize()
posterior
import numpy as np
hypos = np.arange(101)
prior = Pmf(1, hypos)
prior.normalize()
prior.head()
likelihood_vanilla = hypos/100
likelihood_vanilla[:5]
posterior1 = prior * likelihood_vanilla
posterior1.normalize()
posterior1.head()
from utils import decorate
def decorate_bowls(title):
decorate(xlabel='Bowl #',
ylabel='PMF',
title=title)
prior.plot(label='prior', color='C5')
posterior1.plot(label='posterior', color='C4')
decorate_bowls('Posterior after one vanilla cookie')
posterior2 = posterior1 * likelihood_vanilla
posterior2.normalize()
posterior2.plot(label='posterior', color='C4')
decorate_bowls('Posterior after two vanilla cookies')
likelihood_chocolate = 1 - hypos/100
posterior3 = posterior2 * likelihood_chocolate
posterior3.normalize()
posterior3.plot(label='posterior', color='C4')
decorate_bowls('Posterior after 2 vanilla, 1 chocolate')
posterior3.idxmax()
posterior3.max_prob()
hypos = [6, 8, 12]
prior = Pmf(1/3, hypos)
prior
prior.qs
prior.ps
likelihood1 = 1/6, 1/8, 1/12
posterior = prior * likelihood1
posterior.normalize()
posterior
likelihood2 = 0, 1/8, 1/12
posterior *= likelihood2
posterior.normalize()
posterior
def update_dice(pmf, data):
Update pmf based on new data.
hypos = pmf.qs
likelihood = 1 / hypos
impossible = (data > hypos)
likelihood[impossible] = 0
pmf *= likelihood
pmf.normalize()
pmf = prior.copy()
pmf
update_dice(pmf, 1)
update_dice(pmf, 7)
pmf
# Solution
pmf = prior.copy()
for data in [1, 3, 5, 7]:
update_dice(pmf, data)
pmf
# Solution
# Notice that I don't bother to normalize the prior.
# The `Pmf` gets normalized during the update, so we
# don't have to normalize it before.
ps = [1,2,3,4,5]
qs = [4,6,8,12,20]
pmf = Pmf(ps, qs)
update_dice(pmf, 7)
pmf
# Solution
# In the BlackWhite drawer, the probability of getting a match is 1/2
# In the RedGreenBlue drawer, the probability of a match is 1/3
hypos = ['BlackWhite', 'RedGreenBlue']
prior = Pmf(1/2, hypos)
likelihood = 1/2, 1/3
posterior = prior * likelihood
posterior.normalize()
posterior
# Solution
# If I drew from the BlackWhite drawer, the probability the
# socks are white is 1/2
posterior['BlackWhite'] / 2
# Solution
# The trick to this question is to notice that Elvis's twin was a brother.
# If they were identical twins, it is certain they would be the same sex.
# If they were fraternal twins, the likelihood is only 50%.
# Here's a solution using a Bayes table
import pandas as pd
table = pd.DataFrame(index=['identical', 'fraternal'])
table['prior'] = 1/3, 2/3
table['likelihood'] = 1, 1/2
table['unnorm'] = table['prior'] * table['likelihood']
prob_data = table['unnorm'].sum()
table['posterior'] = table['unnorm'] / prob_data
table
# Solution
# Here's a solution using a Pmf
hypos = ['identical', 'fraternal']
prior = Pmf([1/3, 2/3], hypos)
prior
# Solution
likelihood = 1, 1/2
posterior = prior * likelihood
posterior.normalize()
posterior
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Partie 3
Step2: Cet exercice montre une façon curieuse de se déplacer dans un tableau puisqu'on commence à la première position puis on va la position indiqué par le premier élément du tableau et ainsi de suite. On s'arrête quand on tombe sur la valeur zéro.
Step3: Partie 5
Step4: Partie 6
Step5: Partie 7
Step6: Le programme affiche None qui était la valeur par défaut de la variable position. La boucle n'a pas changé le contenu de la variable. Donc, lorsque position==-1, cela veut dire que le résultat n'a pas été trouvé.
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
l = [ 4, 3, 0, 2, 1 ]
i = 0
while l[i] != 0 :
i = l[i]
print (i) # que vaut l[i] à la fin ?
from IPython.display import Image
Image("td2_1.png")
l = [ 3, 6, 2 , 7, 9 ]
x = 7
for i,v in enumerate(l) :
if v == x :
position = i
print ( position )
l = [2, 3, 6, 7, 9]
# si la liste n'est pas triée, il faut écrire :
l.sort ()
x = 7
a = 0
b = len(l)-1
while a <= b :
m = (a+b)//2 # ne pas oublier // sinon la division est réelle
if l[m] == x :
position = m # ligne A
break # pas la peine de continuer, on quitte la boucle
elif l[m] < x :
a = m+1
else :
b = m-1
print ( position )
l = [2, 3, 6, 7, 9]
l.sort ()
x = 5
position = -1
a = 0
b = len(l)-1
while a <= b :
m = (a+b)//2
if l[m] == x :
position = m
break
elif l[m] < x :
a = m+1
else :
b = m-1
print ( position )
import random, math
l = list(range(0,1000000))
for k in range(0,10):
x = random.randint(0,l[-1])
iter = 0
a = 0
b = len(l)-1
while a <= b :
iter += 1
m = (a+b)//2
if l[m] == x :
position = m
break
elif l[m] < x :
a = m+1
else :
b = m-1
print ("k=",k, "x=", x, "itération=", iter, " log2(len(l))=", math.log(len(l))/math.log(2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3> Simulate some time-series data </h3>
Step2: <h3> Train model locally </h3>
Step3: <h3> Cloud ML Engine </h3>
Step4: Monitor training with TensorBoard
|
<ASSISTANT_TASK:>
Python Code:
# Change these to try this notebook out
BUCKET = "cloud-training-demos-ml"
PROJECT = "cloud-training-demos"
REGION = "us-central1"
SEQ_LEN = 50
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['SEQ_LEN'] = str(SEQ_LEN)
os.environ['TFVERSION'] = "1.13"
import tensorflow as tf
print(tf.__version__)
import numpy as np
import seaborn as sns
def create_time_series():
freq = (np.random.random()*0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
noise = [np.random.random()*0.3 for i in range(SEQ_LEN)] # -0.3 to +0.3 uniformly distributed
x = np.sin(np.arange(0,SEQ_LEN) * freq) * ampl + noise
return x
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
for i in range(0, 5):
sns.tsplot( create_time_series(), color=flatui[i%len(flatui)] ); # 5 series
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in range(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
import os
try:
os.makedirs("data/sines/"")
except OSError:
pass
to_csv("data/sines/train-1.csv", 1000) # 1000 sequences
to_csv("data/sines/valid-1.csv", 250)
!head -5 data/sines/*-1.csv
%%bash
DATADIR=$(pwd)/data/sines
OUTDIR=$(pwd)/trained/sines
rm -rf $OUTDIR
gcloud ml-engine local train \
--module-name=sinemodel.task \
--package-path=${PWD}/sinemodel \
-- \
--train_data_path="${DATADIR}/train-1.csv" \
--eval_data_path="${DATADIR}/valid-1.csv" \
--output_dir=${OUTDIR} \
--model=linear --train_steps=10 --sequence_length=$SEQ_LEN
import shutil
shutil.rmtree(path = "data/sines", ignore_errors = True)
os.makedirs("data/sines/")
for i in range(0,10):
to_csv("data/sines/train-{}.csv".format(i), 1000) # 1000 sequences
to_csv("data/sines/valid-{}.csv".format(i), 250)
%%bash
gsutil -m rm -rf gs://${BUCKET}/sines/*
gsutil -m cp data/sines/*.csv gs://${BUCKET}/sines
%%bash
for MODEL in linear dnn cnn rnn rnn2; do
OUTDIR=gs://${BUCKET}/sinewaves/${MODEL}
JOBNAME=sines_${MODEL}_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=sinemodel.task \
--package-path=${PWD}/sinemodel \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--train_data_path="gs://${BUCKET}/sines/train*.csv" \
--eval_data_path="gs://${BUCKET}/sines/valid*.csv" \
--output_dir=$OUTDIR \
--train_steps=3000 --sequence_length=$SEQ_LEN --model=$MODEL
done
from google.datalab.ml import TensorBoard
TensorBoard().start("gs://{}/sinewaves".format(BUCKET))
for pid in TensorBoard.list()["pid"]:
TensorBoard().stop(pid)
print("Stopped TensorBoard with pid {}".format(pid))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Source
Step2: 2. numpy.genfromtxt()
Step3: Note
Step4: Formatting
Step5: Try looking at the data from the 1960's to the present day. You can do this by just changing the 'x' and 'y' axis
Step6: 4. Save the figure
Step7: Now lets see if we can make some subplots
Step8: Now you can see the x axis has gone wrong!
Step9: Task
Step10: Example of what else you can do
Step11: Example - Contour plots
Step12: Example 1 - Quiver Plots
Step13: Example 2 - Quiver Plots
|
<ASSISTANT_TASK:>
Python Code:
# This will plot a simple scatter graph of points.
# The points will have all different sizes just for visual appearance, as well as varied colours
%matplotlib inline
# Import the required libraries
import numpy as np
import matplotlib.pyplot as plt
# Lets say we want to plot 50 points
N = 50
# Generate some random data
x = np.random.rand(N)
y = np.random.rand(N)
# Add a random colour field
colors = np.random.rand(N)
# Alter the size of the particles randomly
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
# Here we create a figure in an object orientated fashion, and call is 'ax'
fig = plt.figure()
ax = fig.add_subplot(111)
# Let's plot it! Simple right? Plot x, y. Alpha = the transparency of the points - very useful!
ax.scatter(x, y, s=area, c=colors, alpha=0.5)
# Now for some labels....and a title (they always help)
ax.set_xlabel('Random x')
ax.set_ylabel('Random y')
ax.set_title('Awesome! (but kind of pointless...)')
# First of all, lets read some data from a CSV file.
#"Figure 1. Average Global Sea Surface Temperature, 1880-2015",,,
#Source: EPA's Climate Change Indicators in the United States: www.epa.gov/climate-indicators,,,"Data source: NOAA, 2016",,,
# Units: temperature anomaly (°F),,,
# https://www.epa.gov/sites/production/files/2016-08/sea-surface-temp_fig-1.csv
# Import our libraries
import csv
import matplotlib.pyplot as plt
import os
# We first need to find our file.
file_name = os.path.join(os.path.pardir, 'data', 'sea-surface-temp_fig-1.csv')
## Header is 6 lines and we are going to read in the data
skip_rows = 6
## Read 'file_name' and call it 'data'
## This is a simple .csv file. Go into the folder 'Data' and open it in Excel if you don't believe us!
## Set delimiter for csv, we will skip the headers, and names=True means that columns have names we will use.
data = np.genfromtxt(file_name, delimiter=',', skip_header=skip_rows, names=True, dtype=None)
## Name the data based on the column names
years = data['Year']
temperature = data['Annual_anomaly']
### Time to create our plot
### fig sets up a blank figure for us.
fig = plt.figure()
### We are now going to use object orientated apporoach and call it 'ax'
ax = fig.add_subplot(111)
### Plot X, Y using 'ax' our blank figure. Colour (c) = 'red'...try changing this!
#ax.plot(x, y, c='red')
### How about adding some labels?
#ax.set_xlabel('Year')
#ax.set_ylabel('Temperature (oF)')
#ax.set_title('Annual Anomaly')
## This one sets the limits of the x and y axis.
#ax.set_xlim(..., ...)
#ax.set_ylim(..., ...)
# fig.savefig('awesome_figure.png', dpi=300, bbox_to_inches='tight')
# Here we are going to loop over the same dataset twice and product a subplot with a different colour.
# Two data arrays are created - uncomment 'data' to see what the output looks like.
data = [temperature for i in range(2)]
# Create a colour scheme up to the value of 12. A simple array of numbers.
colours = np.linspace(0.2,6,12)
### Trying playing with the value '6' what does it do and how does it effect the output?
### Why not try the other values too....remember to save your origional code! Use comments '#' to save it for later.
# Set a figure of 2 Vertical, by 1 Horizonal
fig, ax = plt.subplots(2, 1)
# Loop over the two data arrays,
for ix, arr, col in zip(ax.flat, data, colours):
ix.plot(arr, color=str(col))
file_name = os.path.join(os.path.pardir, 'data', 'Shelf_sea_nuts.csv')
### Header is 1 line and we are going to read in the data
skip_rows = 1
### Read 'file_name' and call it 'data'
### This is a simple .csv file. Go into the folder 'Data' and open it in Excel if you don't believe us!
data = np.genfromtxt(file_name, delimiter=',', names=True, dtype=None)
print(data.dtype)
print("What is that? We have the headers printed out for us!")
print("Maybe we should do something with these.... can you guess where they go?")
### Name the data based on the column names
#ICES_year = data['']
#ICES_temperature = data['']
#ICES_Oxygen = data['']
#ICES_Nitrate = data['']
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.???(???, ???, color=???, alpha=???) # hint how about we try a scatter plot?
#ax.set_xlabel('???')
#ax.set_ylabel('???')
#ax.set_title('???')
### Create some random numbers
n = 100000
x = np.random.randn(n)
y = (1.5 * x) + np.random.randn(n)
### Plot the data without colouring the points by frequency
fig1 = plt.figure()
ax = fig1.add_subplot(111)
### Formatting
ax.plot(x,y,'.r')
ax.set_xlabel('Random X')
ax.set_ylabel('Random Y')
### Estimate the 2D histogram we are using a numpy function and binning the data.
### What happends if you play around with the value 200???
#nbins = 200
H, xedges, yedges = np.histogram2d(x,y,bins=nbins)
# H needs to be rotated and flipped
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
Hmasked = np.ma.masked_where(H==0,H) # Mask pixels with a value of zero
# Plot 2D histogram using pcolor
fig2 = plt.figure()
ax = fig2.add_subplot(111)
ax.pcolormesh(xedges,yedges,Hmasked)
ax.set_xlabel('Random X distribution')
ax.set_ylabel('Random Y distribution')
#### Create a function for generating visual data
def f(x,y):
return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
### Creates data
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
### Object orientated
fig4 = plt.figure()
ax = fig4.add_subplot(111)
### Plot a contour plot
ax.contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=plt.cm.hot)
C = ax.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
### Do you want values inline with the contours? If so, uncomment me below!
#ax.clabel(C, inline=1, fontsize=30)
print("Did you label the values inline?")
### Creates data and puts it onto a grid
n = 8
X,Y = np.mgrid[0:n,0:n]
### Plot quiver
fig5 = plt.figure()
ax = fig5.add_subplot(111)
ax.quiver(X,Y)
### Creates random data 'X' 'Y' 'U' 'V'
n = 8
X,Y = np.mgrid[0:n,0:n]
T = np.arctan2(Y-n/2.0, X-n/2.0)
U,V = R*np.cos(T), R*np.sin(T)
### Creates some data that we will use for the colouring of the arrows.
R = 10+np.sqrt((Y-n/2.0)**2+(X-n/2.0)**2)
# Plots using object orientated way
fig6 = plt.figure()
ax = fig6.add_subplot(111)
# Make a quiver plot. and colour is using 'R'
ax.quiver(X,Y,U,V,R, alpha=.5)
### If you'd like to make it look a little more perfected and better contrast, try this one:
#ax.quiver(X,Y,U,V, facecolor='None', linewidth=.5)
# Set the limits.
ax.set_xlim(-1,n)
ax.set_ylim(-1,n)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. NUMPY
Step2: Define a new 3x2 array named my_array2 with [1, 2, 3] in the first row and [4,5,6] in the second.
Step3: There are a number of operations you can do with numpy arrays similar to the ones you can do with matrices in Matlab. One os the most important is slicing. We saw it when we talked about lists, it consists in extracting some subarray of the array.
Step4: One important thing to consider when you do slicing are the dimensions of the output array. Check the shape of my_array3. Check also its dimension with function ndim
Step5: If you have correctly computed it you will see that my_array3 is one dimensional. Sometimes this can be a problem when you are working with 2D matrixes (and vectors can be considered as 2D matrixes with one of the sizes equal to 1). To solve this, numpy provides the newaxis constant.
Step6: Check again the shape and dimension of my_array3
Step7: It is possible to extract a single row or column from a 2D numpy array so that the result is still 2D, without explictly recurring to np.newaxis. Compare the outputs of the following print commands.
Step8: Another important array manipulation method is array concatenation or stacking. It is useful to always state explicitly in which direction we want to stack the arrays. For example in the following example we are stacking the arrays vertically.
Step9: EXERCISE
Step10: You can create numpy arrays in several ways, not only from lists. For example numpy provides a number of functions to create special types of matrices.
Step11: Finally numpy provides all the basic matrix operations
Step12: 3. Classification example
Step13: In the previous code we have saved the features in matrix X and the class labels in the vector labels. Both are 2D numpy arrays.
Step14: According to this plot, which classes seem more difficult to distinguish?
Step15: Take the columns (5,6,17) of the data and save them in a matrix X_com. This will be our input data. Convert this array into a float array. The shape should be (1994,3)
Step16: Plot each variable in X_com versus y_com to have a first (partial) view of the data.
Step17: 4.2 Train/Test splitting
Step18: 4.3 Normalization
Step19: 4.4 Training
Step20: 4.5 Prediction and evaluation
Step21: 4.6 Saving the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Needed to include the figures in this notebook, you can remove it
# to work with a normal script
import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
my_array = np.array([[1, 2],[3, 4]])
print(my_array)
print(np.shape(my_array))
#<SOL>
#</SOL>
my_array3 = my_array[:,1]
print(my_array3)
print(my_array[1,0:2])
#<SOL>
#</SOL>
my_array3 = my_array3[:,np.newaxis]
#<SOL>
#</SOL>
print(my_array[:,1])
print(my_array[:,1].shape)
print(my_array[:,1:2])
print(my_array[:,1:2].shape)
print(my_array)
print(my_array2)
print(np.concatenate( (my_array, my_array2) , axis=1)) # columnwise concatenation
#<SOL>
#</SOL>
#<SOL>
#</SOL>
t = np.arange(0.0, 1.0, 0.05)
a1 = np.sin(2*np.pi*t)
a2 = np.sin(4*np.pi*t)
#s = sin(2*3.14159*t)
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(t,a1)
plt.xlabel('t')
plt.ylabel('a_1(t)')
ax2 = plt.subplot(212)
ax2.plot(t,a2, 'r.')
plt.xlabel('t')
plt.ylabel('a_2(t)')
plt.show()
# Open up the csv file in to a Python object
csv_file_object = csv.reader(open('iris_data.csv', 'r'))
datalist = [] # Create a variable called 'data'.
for row in csv_file_object: # Run through each row in the csv file,
datalist.append(row) # adding each row to the data variable
data = np.array(datalist) # Then convert from a list to an array
# Be aware that each item is currently
# a string in this format
print(np.shape(data))
X = data[:,0:-1]
label = data[:,-1,np.newaxis]
print(X.shape)
print(label.shape)
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a quadratic curve
Step2: First, generate a dataset using this model using these parameters and the following characteristics
Step3: Now fit the model to the dataset to recover estimates for the model's parameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
a_true = 0.5
b_true = 2.0
c_true = -4.0
# YOUR CODE HERE
x = np.linspace(-5, 5, 30)
y = a_true*(x**2) + b_true*(x) + [c_true]*30 + 2*np.random.randn(30)
plt.scatter(x, y)
assert True # leave this cell for grading the raw data generation and plot
# YOUR CODE HERE
def model(x, a, b, c):
return a*x**2 + b*x + c
theta_best, theta_cov = opt.curve_fit(model, x, y, sigma=2)
print("a = ", theta_best[0], " +- ", theta_cov[0,0])
print("b = ", theta_best[1], " +- ", theta_cov[1,1])
print("c = ", theta_best[2], " +- ", theta_cov[2,2])
fitline = theta_best[0]*x**2 + theta_best[1]*x + theta_best[2]
plt.plot(x, fitline, color="r")
plt.scatter(x, y)
plt.xlabel("x")
plt.ylabel("y")
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) Take a first look at the data
Step2: Use the code cell below to print the first five rows of the sf_permits DataFrame.
Step3: Does the dataset have any missing values? Once you have an answer, run the code cell below to get credit for your work.
Step4: 2) How many missing data points do we have?
Step5: 3) Figure out why the data is missing
Step6: 4) Drop missing values
Step7: Once you have an answer, run the code cell below.
Step8: 5) Drop missing values
Step9: 6) Fill in missing values automatically
|
<ASSISTANT_TASK:>
Python Code:
from learntools.core import binder
binder.bind(globals())
from learntools.data_cleaning.ex1 import *
print("Setup Complete")
# modules we'll use
import pandas as pd
import numpy as np
# read in all our data
sf_permits = pd.read_csv("../input/building-permit-applications-data/Building_Permits.csv")
# set seed for reproducibility
np.random.seed(0)
# TODO: Your code here!
#%%RM_IF(PROD)%%
sf_permits.head()
# Check your answer (Run this code cell to receive credit!)
q1.check()
# Line below will give you a hint
#_COMMENT_IF(PROD)_
q1.hint()
# TODO: Your code here!
percent_missing = ____
# Check your answer
q2.check()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q2.hint()
#_COMMENT_IF(PROD)_
q2.solution()
#%%RM_IF(PROD)%%
# get the number of missing data points per column
percent_missing = sf_permits.isnull().sum().sum()
q2.assert_check_failed()
#%%RM_IF(PROD)%%
# get the number of missing data points per column
missing_values_count = sf_permits.isnull().sum()
# how many total missing values do we have?
total_cells = np.product(sf_permits.shape)
total_missing = missing_values_count.sum()
# percent of data that is missing
percent_missing = (total_missing/total_cells) * 100
q2.assert_check_passed()
# Check your answer (Run this code cell to receive credit!)
q3.check()
# Line below will give you a hint
#_COMMENT_IF(PROD)_
q3.hint()
# TODO: Your code here!
#%%RM_IF(PROD)%%
sf_permits.dropna()
# Check your answer (Run this code cell to receive credit!)
q4.check()
# Line below will give you a hint
#_COMMENT_IF(PROD)_
q4.hint()
# TODO: Your code here
sf_permits_with_na_dropped = ____
dropped_columns = ____
# Check your answer
q5.check()
#%%RM_IF(PROD)%%
# remove all columns with at least one missing value
sf_permits_with_na_dropped = sf_permits.dropna(axis=1)
# calculate number of dropped columns
cols_in_original_dataset = sf_permits.shape[1]
cols_in_na_dropped = sf_permits_with_na_dropped.shape[1]
dropped_columns = cols_in_original_dataset - cols_in_na_dropped
q5.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q5.hint()
#_COMMENT_IF(PROD)_
q5.solution()
# TODO: Your code here
sf_permits_with_na_imputed = ____
# Check your answer
q6.check()
#%%RM_IF(PROD)%%
sf_permits_with_na_imputed = sf_permits_with_na_dropped.fillna(method='bfill', axis=0).fillna(0)
q6.assert_check_failed()
#%%RM_IF(PROD)%%
sf_permits_with_na_imputed = sf_permits.fillna(method='bfill', axis=0).fillna(0)
q6.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q6.hint()
#_COMMENT_IF(PROD)_
q6.solution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Compute inverse solution
Step3: Decoding in sensor space using a logistic regression
Step4: To investigate weights, we need to retrieve the patterns of a fitted model
|
<ASSISTANT_TASK:>
Python Code:
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LogisticRegression
import mne
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.decoding import (cross_val_multiscore, LinearModel, SlidingEstimator,
get_coef)
print(__doc__)
data_path = mne.datasets.sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.8
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(None, 10., fir_design='firwin')
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
snr = 3.0
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
stcs = apply_inverse_epochs(epochs, inverse_operator,
lambda2=1.0 / snr ** 2, verbose=False,
method="dSPM", pick_ori="normal")
# Retrieve source space data into an array
X = np.array([stc.lh_data for stc in stcs]) # only keep left hemisphere
y = epochs.events[:, 2]
# prepare a series of classifier applied at each time sample
clf = make_pipeline(StandardScaler(), # z-score normalization
SelectKBest(f_classif, k=500), # select features for speed
LinearModel(LogisticRegression(C=1, solver='liblinear')))
time_decod = SlidingEstimator(clf, scoring='roc_auc')
# Run cross-validated decoding analyses:
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=1)
# Plot average decoding scores of 5 splits
fig, ax = plt.subplots(1)
ax.plot(epochs.times, scores.mean(0), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.axvline(0, color='k')
plt.legend()
# The fitting needs not be cross validated because the weights are based on
# the training sets
time_decod.fit(X, y)
# Retrieve patterns after inversing the z-score normalization step:
patterns = get_coef(time_decod, 'patterns_', inverse_transform=True)
stc = stcs[0] # for convenience, lookup parameters from first stc
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(np.abs(patterns), vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s',
subjects_dir=subjects_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In logistic regression, we let
Step2: The default settings work pretty well and give us a 7% misclassification rate. Let us try various transformations.
Step3: Let's plot these results.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from classifiers import read_spam_data, transform_log, transform_binary
#from sklearn.linear_model import LogisticRegression # reference sklearn implementation
from classifiers import LogisticRegression
train_data, test_data = read_spam_data()
train_data.head()
#logReg = LogisticRegression(solver='newton-cg', C=1) # sklearn implementation, only newton-cg converges
logReg = LogisticRegression(regularization = 1)
logReg.fit(train_data.drop('spam', axis = 1).as_matrix(), train_data['spam'].as_matrix())
print(logReg.intercept_)
print(logReg.coef_)
print(1-logReg.score(train_data.drop('spam', axis = 1).as_matrix(), train_data['spam'].as_matrix()))
print(1-logReg.score(test_data.drop('spam', axis = 1).as_matrix(), test_data['spam'].as_matrix()))
from sklearn import preprocessing
# transform the data
ytrain = train_data['spam'].as_matrix()
ytest = test_data['spam'].as_matrix()
Xtrain_raw = train_data.drop('spam', axis = 1).as_matrix()
Xtest_raw = test_data.drop('spam', axis = 1).as_matrix()
Xtrain_standard = preprocessing.scale(Xtrain_raw, axis=0)
Xtest_standard = preprocessing.scale(Xtest_raw, axis=0)
Xtrain_log = np.apply_along_axis(transform_log, axis = 0, arr=Xtrain_raw)
Xtest_log = np.apply_along_axis(transform_log, axis = 0, arr=Xtest_raw)
Xtrain_binary = np.apply_along_axis(transform_binary, axis = 0, arr=Xtrain_raw)
Xtest_binary = np.apply_along_axis(transform_binary, axis = 0, arr=Xtest_raw)
data_transform = ['Raw', 'Standard', 'Log', 'Binary']
Xtrain = [Xtrain_raw, Xtrain_standard, Xtrain_log, Xtrain_binary]
Xtest = [Xtest_raw, Xtest_standard, Xtest_log, Xtest_binary]
## now run lots of models to find regularization parameter
regularization = np.linspace(0, 20, num=41)
misclassification_rates = pd.DataFrame(dtype=np.float64,
index = np.arange(len(regularization)),
columns = ['Regularization'] +
list(map(lambda x : x + ' Train', data_transform)) +
list(map(lambda x : x + ' Test', data_transform)))
for i in range(len(regularization)):
misclassification_rates.iloc[i]['Regularization'] = regularization[i]
if regularization[i] == 0:
regularization[i] += 0.01 # hack when there's no convergence.
logReg = LogisticRegression(regularization = regularization[i])
for j in range(len(data_transform)):
logReg.fit(Xtrain[j], ytrain)
misclassification_rates.iloc[i][data_transform[j] + ' Train'] = 1 - logReg.score(Xtrain[j], ytrain)
misclassification_rates.iloc[i][data_transform[j] + ' Test'] = 1 - logReg.score(Xtest[j], ytest)
misclassification_rates
import matplotlib.pyplot as plt
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3']
plt.figure(figsize=(12,8))
for i in range(len(data_transform)):
plt.plot(misclassification_rates['Regularization'], misclassification_rates[data_transform[i] + ' Train'],
color=colors[i], linestyle='--', linewidth=2, marker='.')
plt.plot(misclassification_rates['Regularization'], misclassification_rates[data_transform[i] + ' Test'],
color=colors[i], linestyle='-', linewidth=2, marker='.')
plt.grid(True)
plt.legend(loc='center left', bbox_to_anchor=(1,0.5), title="Dataset")
plt.ylabel("Misclassification Rate")
plt.xlabel("Regularization Parameter ($\lambda$)")
plt.title("Misclassification Rates for Various Transforms and Regularizations")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sacar toda la info de una peli para poder meterla en un diccionario y usarla en ElasticSearch, indexandola (metodo todo en 1)
Step2: Pruebas
Step3: Elastic Seach (cabezera de ejemplo)
Step4: Inicializacion real de Elastic Search (ejecutar)
Step5: Guardamos el top 250 dentro de elastic search (antiguo)
Step6: Buscamos los datos guardados (antiguo)
Step7: Sacar los hits e info de unos cuantos de ellos
Step8: Query sin fuzziness
Step9: Query con fuzziness añadida
Step10: Query 2 con highlight de distintos campos y la forma de mostrarlo
Step11: Borrar datos
|
<ASSISTANT_TASK:>
Python Code:
from imdb import IMDb
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
ia = IMDb()
listaPelis = ia.get_top250_movies()
listaPelis
for i in range(10,250):
peli = listaPelis[i]
peli2 = ia.get_movie(peli.movieID)
string = peli2.summary()
separado = string.split('\n')
solucion = {}
for i in range(2,len(separado)):
sep2 = separado[i].split(':')
#Forma de evitar que haya fallo al pasar el split a diccionario
#Caso del fallo en los 2 cuadros de abajo
sep2[1:len(sep2)] = [''.join(sep2[1:len(sep2)])]
solucion.update(dict([sep2]))
es.index(index='prueba-index', doc_type='text', body=solucion)
separado
sep2[1]
import pandas as pd
lista=[]
for i in range(0400000,0400010,1):
peli = ia.get_movie(i)
lista.append(peli.summary())
datos = pd.DataFrame(lista)
print datos.values
import pandas as pd
lista=[]
datos = pd.DataFrame([])
for i in range(0005000,0005003):
lista.append(ia.get_movie(i))
lista.append(ia.get_movie_plot(i))
datos = datos.append(lista)
print datos.values
from datetime import datetime
from elasticsearch import Elasticsearch
es = Elasticsearch()
'''
doc = {
'prueba': 'Holi',
'text': 'A man throws away an old top hat and a tramp uses it to sole his boots.',
}
res = es.index(index="movies-index", doc_type='text', id=1, body=doc)
print(res['created'])
'''
res = es.get(index="movies-index", doc_type='text', id=6)
print(res['_source'])
es.indices.refresh(index="movies-index")
res = es.search(index="movies-index", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(text)s" % hit["_source"])
# make sure ES is up and running
import requests
res = requests.get('http://localhost:9200')
print(res.content)
from elasticsearch import Elasticsearch
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
#Lista con el top 250 de peliculas
top = ia.get_top250_movies()
#Recorro la lista y saco los datos para indexarlos en elastic search, el id es el orden en la lista
for i in range(0,250):
es.index(index='films-index', doc_type='text', id=i, body=top[i].data)
res = es.search(index="films-index", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
#Modificar para que funcione
for hit in res['hits']['hits']:
print("%(kind)s %(title)s %(year)s %(rating)s" % hit["_source"])
res = es.search(index="prueba-index", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(Title)s %(Genres)s %(Director)s %(Cast)s %(Writer)s %(Country)s %(Language)s %(Rating)s %(Plot)s" % hit["_source"])
res = es.search(index="prueba-index", body={"query": {"match_all": {}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(Title)s" % hit["_source"])
res = es.search(index="prueba-index", body={"query": {"match_all": {}}})
res
res = es.search(index="prueba-index", body={
"query":
{"match" : {'Director': 'Christopher Nolan'}
},
{
"highlight" : {
"fields" : {
"Language" : {}
}
}
}
})
res
res = es.search(index="prueba-index", body={"query": {"match" : {'Director': 'Christophe Nola'}}})
print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(Title)s" % hit["_source"])
bodyQuery = {
"query": {
"multi_match" : {
"query" : "Interes",
"fields": ["Plot", "Title"],
"fuzziness": "2",
"type": "phrase",
}
}
}
res = es.search(index="prueba-index", body=bodyQuery)
#print res
#print("Got %d Hits:" % res['hits']['total'])
for hit in res['hits']['hits']:
print("%(Title)s" % hit["_source"])
bodyQuery2 = {
"query": {
"match": {
"_all":"Inter"
}
},
"highlight" : {
"fields" : {
"Title" : {},
"Plot" : {"fragment_size" : 150, "number_of_fragments" : 3}
},
#Permite el hightlight sobre campos que no se han hecho query
#como Plot en este ejemplo
"require_field_match" : False
}
}
res = es.search(index="prueba-index", body=bodyQuery2)
print("Got %d Hits:" % res['hits']['total'])
# Uso el [0] porque solo hay 1 hit, si hubiese mas, pues habria mas campos
# de la lista, habria que usar el for de arriba para sacar el highlight de
# cada uno de la lista
#print res['hits']['hits'][0]['highlight']
for hit in res['hits']['hits']:
print(hit)
es.delete(index='prueba-index', doc_type='text', id=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph regularization for image classification using synthesized graphs
Step2: Dependencies and imports
Step3: Flowers dataset
Step4: After downloading the dataset and splitting it, we can visualize a few samples from it.
Step5: In the next cell, we determine the number of samples present in the splits.
Step7: Graph construction
Step15: We encourage you to try out other pre-trained models available in the tf.keras.applications module and also on TensorFlow Hub. We'll now write a couple of utility functions to create the sample embeddings for graph construction.
Step16: Graph building
Step17: Each bi-directional edge is represented by two directed edges in the output TSV
Step21: Sample features
Step22: A note on create_records()
Step23: Base model
Step25: Hyperparameters
Step28: Prepare the data
Step29: Visualization
Step31: In the figure above, weight denotes the similarity strength of the examples.
Step32: After building and initializing the model in Keras, we can compile it and finally train it.
Step33: Plot training metrics
Step34: Graph regularization
Step35: Plot training metrics of the graph-regularized model
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --quiet neural-structured-learning
!pip install --quiet tensorflow-hub
import matplotlib.pyplot as plt
import numpy as np
import neural_structured_learning as nsl
import tensorflow as tf
import tensorflow_datasets as tfds
# Resets notebook state
tf.keras.backend.clear_session()
tfds.disable_progress_bar()
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print(
"GPU is",
"available" if tf.config.list_physical_devices("GPU") else "NOT AVAILABLE")
train_ds, validation_ds = tfds.load(
"tf_flowers",
split=["train[:85%]", "train[85%:]"],
as_supervised=True
)
plt.figure(figsize=(10, 10))
for i, (image, label) in enumerate(train_ds.take(9)):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image)
plt.title(int(label))
plt.axis("off")
num_train_examples = tf.data.experimental.cardinality(train_ds)
num_val_examples = tf.data.experimental.cardinality(validation_ds)
print(f"Total training examples: {num_train_examples}")
print(f"Total validation examples: {num_val_examples}")
IMG_SIZE = 224 #@param ["128", "224"] {type:"raw"}
PROJECTED_DIM = 128 #@param {type:"slider", min:128, max:1024, step:128}
#@markdown `IMG_SIZE` of 224 denotes the 224 $\times$ 224 resolution.
def create_feature_extractor_model():
Creates a feature extractor model with DenseNet121.
inputs = tf.keras.layers.Input((IMG_SIZE, IMG_SIZE, 3))
densenet_model = tf.keras.applications.DenseNet121(weights="imagenet",
input_shape=(IMG_SIZE, IMG_SIZE, 3),
pooling="avg", include_top=False
)
densenet_model.trainable = False
x = tf.keras.applications.densenet.preprocess_input(inputs)
outputs = densenet_model(x, training=False)
return tf.keras.Model(inputs, outputs, name="densenet_feature_extractor")
feature_extractor = create_feature_extractor_model()
feature_extractor.summary()
def resize(image, label):
Resizes the images to (IMG_SIZE x IMG_SIZE) size.
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
# Resize all the images to uniform shape so that they can
# be batched.
train_ds = train_ds.map(resize)
validation_ds = validation_ds.map(resize)
def _int64_feature(value):
Returns int64 tf.train.Feature.
return tf.train.Feature(int64_list=tf.train.Int64List(value=value.tolist()))
def _bytes_feature(value):
Returns bytes tf.train.Feature.
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[value.encode('utf-8')]))
def _float_feature(value):
Returns float tf.train.Feature.
return tf.train.Feature(float_list=tf.train.FloatList(value=value.tolist()))
def create_embedding_example(feature_extractor, image,
projection_matrix, record_id):
Create tf.Example containing the sample's embedding and its ID.
image_features = feature_extractor(image[None, ...])
image_features_numpy = image_features.numpy().squeeze()
compressed_image_features = image_features_numpy.dot(projection_matrix)
features = {
"id": _bytes_feature(str(record_id)),
"embedding": _float_feature(compressed_image_features)
}
return tf.train.Example(features=tf.train.Features(feature=features))
def generate_random_projection_weights(original_dim=1024,
projected_dim=PROJECTED_DIM):
Generates a random projection matrix.
random_projection_matrix = np.random.randn(
projected_dim, original_dim).T
return random_projection_matrix
def create_embeddings(feature_extractor, dataset, output_path,
starting_record_id):
Creates TFRecords with embeddings of the images.
projection_matrix = generate_random_projection_weights()
record_id = int(starting_record_id)
with tf.io.TFRecordWriter(output_path) as writer:
for image, _ in dataset:
example = create_embedding_example(feature_extractor,
image,
projection_matrix,
record_id)
record_id = record_id + 1
writer.write(example.SerializeToString())
return record_id
# Persist TF.Example features containing embeddings for training data in
# TFRecord format.
create_embeddings(feature_extractor, train_ds, "flowers_embeddings.tfr", 0)
similarity_threshold = 0.7
graph_builder_config = nsl.configs.GraphBuilderConfig(
similarity_threshold=similarity_threshold,
lsh_splits=10, lsh_rounds=15, random_seed=12345)
nsl.tools.build_graph_from_config(["flowers_embeddings.tfr"],
"flowers_graph_70.tsv",
graph_builder_config)
!wc -l flowers_graph_70.tsv
def _bytes_feature_image(value):
Returns bytes tf.train.Feature.
return tf.train.Feature(
bytes_list=tf.train.BytesList(value=[value]))
def create_example(image, label, record_id):
Create tf.Example containing the image, label, and ID.
features = {
"id": _bytes_feature(str(record_id)),
"image": _bytes_feature_image(image.numpy()),
"label": _int64_feature(np.asarray([label])),
}
return tf.train.Example(features=tf.train.Features(feature=features))
def create_records(dataset, record_path, starting_record_id):
Generates TFRecords from a tf.data.Dataset object.
record_id = int(starting_record_id)
with tf.io.TFRecordWriter(record_path) as writer:
for image, label in dataset:
image = tf.cast(image, tf.uint8)
image = tf.image.encode_jpeg(image, optimize_size=True,
chroma_downsampling=False)
example = create_example(image, label, record_id)
record_id = record_id + 1
writer.write(example.SerializeToString())
return record_id
# Persist TF.Example features (images and labels) for training and validation
# data in TFRecord format.
next_record_id = create_records(train_ds,
"train_data.tfr", 0)
create_records(validation_ds, "validation_data.tfr",
next_record_id)
nsl.tools.pack_nbrs(
"train_data.tfr",
"",
"flowers_graph_70.tsv",
"nsl_train_data.tfr",
add_undirected_edges=True,
max_nbrs=3
)
!ls -lh *_data.tfr
NBR_FEATURE_PREFIX = "NL_nbr_"
NBR_WEIGHT_SUFFIX = "_weight"
class HParams(object):
Hyperparameters used for training.
def __init__(self):
### dataset parameters
self.num_classes = 5
self.num_train_examples = num_train_examples
self.num_val_examples = num_val_examples
### neural graph learning parameters
self.distance_type = nsl.configs.DistanceType.L2
self.graph_regularization_multiplier = 0.3
self.num_neighbors = 2
### network architecture parameters
self.num_channels = 32
self.kernel_size = 3
### training parameters
self.train_epochs = 30
self.batch_size = 64
### eval parameters
self.eval_steps = None # All instances in the test set are evaluated.
HPARAMS = HParams()
default_jpeg_value = tf.ones((IMG_SIZE, IMG_SIZE, 3), dtype=tf.uint8)
default_jpeg_value *= 255
default_jpeg_value = tf.image.encode_jpeg(default_jpeg_value, optimize_size=True,
chroma_downsampling=False)
def make_dataset(file_path, training=False):
Creates a `tf.data.TFRecordDataset`.
Args:
file_path: Name of the file in the `.tfrecord` format containing
`tf.train.Example` objects.
training: Boolean indicating if we are in training mode.
Returns:
An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example`
objects.
def parse_example(example_proto):
Extracts relevant fields from the `example_proto`.
Args:
example_proto: An instance of `tf.train.Example`.
Returns:
A pair whose first value is a dictionary containing relevant features
and whose second value contains the ground truth labels.
feature_spec = {
'image': tf.io.FixedLenFeature([], tf.string,
default_value=default_jpeg_value),
'label': tf.io.FixedLenFeature((), tf.int64, default_value=-1),
}
# We also extract corresponding neighbor features in a similar manner to
# the features above during training.
if training:
for i in range(HPARAMS.num_neighbors):
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'image')
nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i,
NBR_WEIGHT_SUFFIX)
feature_spec[nbr_feature_key] = tf.io.FixedLenFeature([], tf.string,
default_value=default_jpeg_value)
# We assign a default value of 0.0 for the neighbor weight so that
# graph regularization is done on samples based on their exact number
# of neighbors. In other words, non-existent neighbors are discounted.
feature_spec[nbr_weight_key] = tf.io.FixedLenFeature(
[1], tf.float32, default_value=tf.constant([0.0]))
features = tf.io.parse_single_example(example_proto, feature_spec)
labels = features.pop('label')
# We need to convert the byte-strings back to images.
features['image'] = tf.image.decode_jpeg(features['image'], channels=3)
if training:
for i in range(HPARAMS.num_neighbors):
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'image')
features[nbr_feature_key] = tf.image.decode_jpeg(features[nbr_feature_key],
channels=3)
return features, labels
dataset = tf.data.TFRecordDataset([file_path])
if training:
dataset = dataset.shuffle(HPARAMS.batch_size * 10)
dataset = dataset.map(parse_example)
dataset = dataset.batch(HPARAMS.batch_size)
return dataset
train_dataset = make_dataset('nsl_train_data.tfr', True)
validation_dataset = make_dataset('validation_data.tfr')
sample = next(iter(train_dataset))
sample[0].keys()
plt.figure(figsize=(20, 20))
plt.subplot(1, 3, 1)
plt.imshow(sample[0]["NL_nbr_0_image"][0])
neighbor_one_weight = float(sample[0]["NL_nbr_0_weight"][0].numpy())
plt.title(f"Neighbor 1 with weight: {neighbor_one_weight:.3f}", fontsize=14)
plt.axis("off")
plt.subplot(1, 3, 2)
plt.imshow(sample[0]["NL_nbr_1_image"][0])
neighbor_two_weight = float(sample[0]["NL_nbr_1_weight"][0].numpy())
plt.title(f"Neighbor 2 with weight: {neighbor_two_weight:.3f}", fontsize=14)
plt.axis("off")
plt.subplot(1, 3, 3)
plt.imshow(sample[0]["image"][0])
plt.title(f"Original image with label: {int(sample[1][0])}", fontsize=14)
plt.axis("off")
plt.show()
def make_cnn_model():
Creates a simple CNN model.
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(
input_shape=(IMG_SIZE, IMG_SIZE, 3), name='image'),
tf.keras.layers.experimental.preprocessing.Rescaling(scale=1. / 255),
tf.keras.layers.Conv2D(
HPARAMS.num_channels, HPARAMS.kernel_size, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(
HPARAMS.num_channels, HPARAMS.kernel_size, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.GlobalAvgPool2D(),
tf.keras.layers.Dense(HPARAMS.num_classes)
])
return model
model = make_cnn_model()
model.summary()
model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=HPARAMS.train_epochs,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=5)])
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "-r^" is for solid red line with triangle markers.
plt.plot(epochs, loss, '-r^', label='Training loss')
# "-b0" is for solid blue line with circle markers.
plt.plot(epochs, val_loss, '-bo', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, '-r^', label='Training acc')
plt.plot(epochs, val_acc, '-bo', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.show()
# Build a new base CNN model.
base_reg_model = make_cnn_model()
# Wrap the base model with graph regularization.
graph_reg_config = nsl.configs.make_graph_reg_config(
max_neighbors=HPARAMS.num_neighbors,
multiplier=HPARAMS.graph_regularization_multiplier,
distance_type=HPARAMS.distance_type,
sum_over_axis=-1)
graph_reg_model = nsl.keras.GraphRegularization(base_reg_model,
graph_reg_config)
graph_reg_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
graph_reg_history = graph_reg_model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=HPARAMS.train_epochs,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=5)])
graph_reg_history_dict = graph_reg_history.history
acc = graph_reg_history_dict['accuracy']
val_acc = graph_reg_history_dict['val_accuracy']
loss = graph_reg_history_dict['loss']
graph_loss = graph_reg_history_dict['scaled_graph_loss']
val_loss = graph_reg_history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.clf() # clear figure
# "-r^" is for solid red line with triangle markers.
plt.plot(epochs, loss, '-r^', label='Training loss')
# "-gD" is for solid green line with diamond markers.
plt.plot(epochs, graph_loss, '-gD', label='Training graph loss')
# "-b0" is for solid blue line with circle markers.
plt.plot(epochs, val_loss, '-bo', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend(loc='best')
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, '-r^', label='Training acc')
plt.plot(epochs, val_acc, '-bo', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Bogotá 20 de Noviembre de 2015
Step2: SIMULACION CON NIVEL=1
Step3: SIMULACION CON NIVEL=3
Step4: Simulacion del ejercicio 4.1.13
Step5: SIMULACION CON NIVEL=3
Step6: SIMULACION CON NIVEL=3
Step7: SIMULACION CON NIVEL=1
Step8: Simulacion del ejercicio 4.1.14
Step9: SIMULACION Level=1
Step10: Simulacion del ejercicio 4.3-6
Step11: SIMULACION CON NIVEL=3
Step12: Diseño en electric del circuito
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/fig-4-1-9.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/fig-4-1-12.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/fig-4-1-13.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/sim-fig-4-1-12-level1.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/sim-fig-4-1-12-level3.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/4-1-13.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/VCL-slow.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/VCL-fast.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/VCL-level1.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/circuito4.1.14.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/simulacion4.1.14.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/4-3-6.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/sim-4-3-6.png'))
from IPython.core.display import Image, display
display(Image(url='images/taller-sept-27/electric.png'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
x = [[source_vocab_to_int.get(word, 0) for word in sentence.split()] \
for sentence in source_text.split('\n')]
y = [[target_vocab_to_int.get(word, 0) for word in sentence.split()] \
for sentence in target_text.split('\n')]
source_id_text = []
target_id_text = []
for i in range(len(x)):
n1 = len(x[i])
n2 = len(y[i])
source_id_text.append(x[i])
target_id_text.append(y[i] + [target_vocab_to_int['<EOS>']])
return (source_id_text, target_id_text)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
input_text = tf.placeholder(tf.int32,[None, None], name="input")
target_text = tf.placeholder(tf.int32,[None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return input_text, target_text, learning_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for dencoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
enc_cell_drop = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)
_, enc_state = tf.nn.dynamic_rnn(enc_cell_drop, rnn_inputs, dtype=tf.float32)
return enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
train_dec_fm = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
train_logits_drop, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, train_dec_fm, \
dec_embed_input, sequence_length, scope=decoding_scope)
train_logits = output_fn(train_logits_drop)
#I'm missing the keep_prob! don't know where to put it
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: Maximum length of
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope)
#Again, don't know where to put the keep_drop param
return inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
dec_cell_drop = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size,\
None, scope=decoding_scope)
with tf.variable_scope("decoding") as decoding_scope:
train_logits = decoding_layer_train(encoder_state, dec_cell_drop, dec_embed_input,\
sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(encoder_state, dec_cell_drop, dec_embeddings,\
target_vocab_to_int['<GO>'],target_vocab_to_int['<EOS>'], sequence_length,\
vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
encoder_state = encoding_layer(embed_input, rnn_size, num_layers, keep_prob)
processed_target_data = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, processed_target_data)
train_logits, infer_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, target_vocab_size,\
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 20
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 512
# Number of Layers
num_layers = 1
# Embedding Size
encoding_embedding_size = 512
decoding_embedding_size = 512
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.6
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target_batch,
[(0,0),(0,max_seq - target_batch.shape[1]), (0,0)],
'constant')
if max_seq - batch_train_logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
words = sentence.split(" ")
word_ids = []
for word in words:
eord = word.lower()
if word in vocab_to_int:
word_id = vocab_to_int[word]
else:
word_id = vocab_to_int['<UNK>']
word_ids.append(word_id)
return word_ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The dependency parse shows the coarse POS tag for each token, as well as the dependency tag if given
Step2: Creating Visualizations Outside of Jupyter
Step3: <font color=blue>After running the cell above, click the link below to view the dependency parse
Step4: Click this link to view the dependency
|
<ASSISTANT_TASK:>
Python Code:
# Perform standard imports
import spacy
nlp = spacy.load('en_core_web_sm')
# Import the displaCy library
from spacy import displacy
# Create a simple Doc object
doc = nlp(u"The quick brown fox jumped over the lazy dog's back.")
# Render the dependency parse immediately inside Jupyter:
displacy.render(doc, style='dep', jupyter=True, options={'distance': 110})
for token in doc:
print(f'{token.text:{10}} {token.pos_:{7}} {token.dep_:{7}} {spacy.explain(token.dep_)}')
displacy.serve(doc, style='dep', options={'distance': 110})
doc2 = nlp(u"This is a sentence. This is another, possibly longer sentence.")
# Create spans from Doc.sents:
spans = list(doc2.sents)
displacy.serve(spans, style='dep', options={'distance': 110})
options = {'distance': 110, 'compact': 'True', 'color': 'yellow', 'bg': '#09a3d5', 'font': 'Times'}
displacy.serve(doc, style='dep', options=options)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def sumOfDigit(K ) :
sod = 0
while(K ) :
sod = sod + K % 10
K = K // 10
return sod
def totalNumbersWithSpecificDifference(N , diff ) :
low = 1
high = N
while(low <= high ) :
mid =(low + high ) // 2
if(mid - sumOfDigit(mid ) < diff ) :
low = mid + 1
else :
high = mid - 1
return(N - high )
N = 13
diff = 2
print(totalNumbersWithSpecificDifference(N , diff ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we will import the questions from the database and clean those data. The cleaning includes the following steps
|
<ASSISTANT_TASK:>
Python Code:
from database import Database
database = Database(
'<host name>',
'<database name>',
'<user name>',
'<password>',
'utf8mb4'
)
connection = database.connect_with_pymysql()
from preprocessor import Decoder, Cleaner
# decoder instance
decoder = Decoder()
if connection:
try:
with connection.cursor() as cursor:
# example: decode all questions
for data in decoder.decode_in_range(cursor, 'questions', 'body', 1, 99478):
if data:
if all(data):
try:
# example: punctuation remove
cleaned_data = Cleaner.punctuation_remover(data[1])
# example: whitespace reomve
cleaned_data = Cleaner.whitespace_remover(cleaned_data)
sql = "UPDATE questions SET body='" + cleaned_data + "' WHERE id= "+str(data[0])
cursor.execute(sql)
connection.commit()
except Exception:
print "Exception in updating id " + str(data[0])
finally:
connection.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Correlation
Step2: Calculating $r$
Step3: Based on the scatter diagram, we expect that $r$ will be positive but not equal to 1.
Step4: Step 1. Convert each variable to standard units.
Step5: Step 2. Multiply each pair of standard units.
Step6: Step 3. $r$ is the average of the products computed in Step 2.
Step7: As expected, $r$ is positive but not equal to 1.
Step8: The correlation function
Step9: Let's call the function on the x and y columns of t. The function returns the same answer to the correlation between $x$ and $y$ as we got by direct application of the formula for $r$.
Step10: As we noticed, the order in which the variables are specified doesn't matter.
Step11: Calling correlation on columns of the table suv gives us the correlation between price and mileage as well as the correlation between price and acceleration.
|
<ASSISTANT_TASK:>
Python Code:
z = np.random.normal(0, 1, 500)
def r_scatter(xs, r):
Generate y-values for a scatter plot with correlation approximately r
return r*xs + (np.sqrt(1-r**2))*z
corr_opts = {
'aspect_ratio': 1,
'xlim': (-3.5, 3.5),
'ylim': (-3.5, 3.5),
}
nbi.scatter(np.random.normal(size=500), r_scatter, options=corr_opts, r=(-1, 1, 0.05))
x = np.arange(1, 7, 1)
y = make_array(2, 3, 1, 5, 2, 7)
t = Table().with_columns(
'x', x,
'y', y
)
t
nbi.scatter(t.column(0), t.column(1), options={'aspect_ratio': 1})
def standard_units(nums):
return (nums - np.mean(nums)) / np.std(nums)
t_su = t.with_columns(
'x (standard units)', standard_units(x),
'y (standard units)', standard_units(y)
)
t_su
t_product = t_su.with_column('product of standard units', t_su.column(2) * t_su.column(3))
t_product
# r is the average of the products of standard units
r = np.mean(t_product.column(4))
r
nbi.scatter(t.column(1), t.column(0), options={'aspect_ratio': 1})
def correlation(t, x, y):
return np.mean(standard_units(t.column(x))*standard_units(t.column(y)))
interact(correlation, t=fixed(t),
x=widgets.ToggleButtons(options=['x', 'y'], description='x-axis'),
y=widgets.ToggleButtons(options=['x', 'y'], description='y-axis'))
correlation(t, 'x', 'y')
correlation(t, 'y', 'x')
suv = (Table.read_table('https://www.inferentialthinking.com/notebooks/hybrid.csv')
.where('class', 'SUV'))
interact(correlation, t=fixed(suv),
x=widgets.ToggleButtons(options=['mpg', 'msrp', 'acceleration'],
description='x-axis'),
y=widgets.ToggleButtons(options=['mpg', 'msrp', 'acceleration'],
description='y-axis'))
correlation(suv, 'mpg', 'msrp')
correlation(suv, 'acceleration', 'msrp')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure database connectivity
Step2: Load data from table
Step3: Explore the loaded data using PixieDust
|
<ASSISTANT_TASK:>
Python Code:
import pixiedust
pixiedust.enableJobMonitor()
# @hidden_cell
# Enter your DashDB JDBC URL (e.g. 'jdbc:db2://dashdb-entry-yp-dal00-00.services.dal.bluemix.net:50000/BLUDB')
jdbcurl = 'jdbc:db2://...'
# Enter your DashDB user name (e.g. 'dash0815')
user = '...'
# Enter your DashDB password (e.g. 'myvoiceismypassword')
password = '...'
# Enter your source table or view name (e.g. 'mytable')
table = '...'
# no changes are required to this cell
# obtain Spark SQL Context
sqlContext = SQLContext(sc)
# load data
props = {}
props['user'] = user
props['password'] = password
dashdb_data = sqlContext.read.jdbc(jdbcurl, table, properties=props)
display(dashdb_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vanilla use of print
Step2: The second print uses a “formatter” (%s, %r and %d)
Step3: Python adds a newline character (\n) after every print statement by default.
Step4: %s is intended for string substitution whereas %d is for integers. r% converts using the repr() function rather than str() or int() and is useful when you want to return something in valid Python syntax (see next)
Step5: Don’t worry about datetime, it’s just to illustrate the difference between repr() and str() or %s and %d
Step6: Recap on maths
Step7: Small mistakes can be difficult to spot. You will avoid and spot these mistakes with practice.
Step8: Two common ways to access elements in an list
Step9: The opposite of list.pop(index) is list.insert(index,value)
Step10: A list needs to be created (a.k.a. initialised) before you can apply methods to it.
Step11: Dictionary advanced
Step12: Sometimes you’ll find you need a data structure more complex than a dictionary
Step13: The two print statements illustrate alternatives
Step14: The keys() method returns a view of the keys which can be looped through like a list or turned into a list
Step15: Here we setup a temporary key (called name) and then loop through every key, retrieving the corresponding value for each key
Step16: Here we setup a temporary key (called name) and then loop through every key, retrieving the corresponding value for each key
Step17: The index method can be used to find the position of specific substrings
Step18: Remember – the numbering starts from 0!
Step19: Remember – the numbering starts from 0!
Step20: Don’t worry about the random bit. It’s just an example of some useful code that’s already been written. It’s more important to understand what the rest of the code is doing than how random.choice() works at this stage.
|
<ASSISTANT_TASK:>
Python Code:
message = "Hello world"
print ("My message is:", message)
message = "Hello world"
print ("My 1st message is:", message)
print ("My 2nd message is: %s" % message)
message = "Hello world"
print ("My 1st message is:", message, end=(". "))
print ("My 2nd message is: %s " % message)
Name = "Romissa"
Donut_number = 30
print("%s has %d donuts" % (Name, Donut_number))
import datetime
date = datetime.date.today()
a = str(date)
b = repr(date)
print(a, end=" ")
print(b)
print("%s %r" % (date, date))
seq = input("Enter a DNA sequence:").upper()
print(seq + seq)
x = 5
y = 10
print("Result 1 = " , x * y )
print("Result 2 = " , x ** y ) # ** - meaning “to the power of”
print("Result 3 = , x ** y ")
values = [5, 7, 4, 6, 1, 2]
print (values)
print(values[3]) #By the position
for temp_value in values: #Using a loop
print (temp_value)
names = ["Andy", "Bob", "Chris"]
removed_name = names.pop(0)
print (removed_name)
print(names)
numbers = []
for i in range(1,24,3):
numbers.append(i)
print(numbers)
student_records = {20071213: "Alistair Darby"} # declaring a dictionary
student_records[20081423] = "John Smith" # adding some more records
student_records[20096137] = "Jane Doe"
student_records[20109334] = "Fred Blogs"
print(student_records[20081423]) # printing a specific record
student_records = {20071213: ["Alistair Darby", "A.C. Darby"]}
student_records[20081423] = ["John Smith", "J. L. Smith"]
student_records[20096137] = ["Jane Doe", "J. P. Doe"]
student_records[20109334] = ["Fred Blogs","Frederick Blogs","F. J. Blogs"]
print(student_records[20109334]) # print the list for this specific key
for name in student_records[20109334]: # iterate through the list and print
print(name, end=" ") # each value
codons = {
"ATT" : "Ile",
"ATC" : "Ile",
"ATA" : "Ile",
"CTT" : "Leu",
"CTC" : "Leu",
"CTA" : "Leu",
"CTG" : "Leu",
#... And so on...
}
Codon_1 = "CTC"
print("%s encodes %s" % (Codon_1, codons[Codon_1]))
print(Codon_1, "encodes", codons[Codon_1])
student_records = {20071213: "Andy Jones"}
student_records[20081423] = "John Smith"
student_records[20096137] = "Jane Doe"
print("Total number of students =" , len(student_records.keys()))
print(student_records.keys())
list(student_records.keys())
student_records = {20071213: "Andy Jones",
20081423 : "John Smith",
20096137 :"Jane Doe"}
del student_records[20081423]
print("Total number of students = " , len(student_records.keys()))
student_records = {20071213: "Andy Jones",
20081423 : "John Smith",
20096137 :"Jane Doe"}
for name in student_records.keys():
print(name, ":", student_records[name])
student_records = {20071213: "Andy Jones",
20081423 : "John Smith",
20096137 :"Jane Doe"}
for name in student_records.keys():
print(name, ":", student_records[name])
DNA = "ACTGATCGACTGATCGATCGA"
for index in range(0, len(DNA), 3): # Remember - range requires the arguments (start, stop, step). Step has a default value of 1
chunk = DNA[index:index+3] # Remember - you can slice a string by giving [start:stop] indexes
print(chunk,end=" ")
DNA = "ACTGATCGACTGATCGATCGA"
print("Index for CGA : ", DNA.index( "CGA" ))
print("Index for GAT : ", DNA.index( "GAT" ))
codons = ["ATG", "GAC", "TTG"]
print("Index for TTG : ", codons.index( "TTG" ))
print("Index for ATG : ", codons.index( "ATG" ))
import random #Import the random module
dna = "GCTAGCTACGTACGATCGT" #Starting string
for i in range(0,10): #Loop to 10 starting from 0
dna += random.choice("CGTA") #Choose a random base and add it
print(dna)
Output will be different every time
s = "-"
seq = ["a", "b", "c"] # This is sequence of strings.
print (seq)
print (s.join( seq ))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Least squares and linear basis functions models
Step3: Load the data
Step6: Least squares with a linear basis function model
Step8: Let us play with polynomial regression. Note that we will use your implemented function compute_mse. Please copy and paste your implementation from exercise02.
Step11: Evaluating model predication performance
Step13: Then, test your split_data function below.
Step17: Ridge Regression
|
<ASSISTANT_TASK:>
Python Code:
def compute_cost_MSE(y, tx, beta):
compute the loss by mse.
e = y - tx.dot(beta)
mse = e.dot(e) / (2 * len(e))
return mse
def compute_cost_MAE(y, tx, w):
y = np.array(y)
return np.sum(abs(y - np.dot(tx, w))) / y.shape[0]
def least_squares(y, tx):
calculate the least squares solution.
# ***************************************************
# INSERT YOUR CODE HERE
# least squares: TODO
# returns mse, and optimal weights
# ***************************************************
weight = np.linalg.solve(np.dot(tx.T,tx), np.dot(tx.T,y))
return least_square_mse(y,tx, weight),weight
def least_square_mse(y, tx, w):
return compute_cost_MSE(y, tx, w)
def rmse(y, tx, w):
return np.sqrt(compute_cost_MSE)
from helpers import *
def test_your_least_squares():
height, weight, gender = load_data_from_ex02(sub_sample=False, add_outlier=False)
x, mean_x, std_x = standardize(height)
y, tx = build_model_data(x, weight)
# ***************************************************
# INSERT YOUR CODE HERE
# least square or grid search: TODO
# this code should compare the optimal weights obtained
# by least squares vs. grid search
# ***************************************************
mse, lsq_w = least_squares(y,tx)
print(lsq_w)
test_your_least_squares()
# load dataset
x, y = load_data()
print("shape of x {}".format(x.shape))
print("shape of y {}".format(y.shape))
def build_poly(x, degree):
polynomial basis functions for input data x, for j=0 up to j=degree.
# ***************************************************
# INSERT YOUR CODE HERE
# polynomial basis function: TODO
# this function should return the matrix formed
# by applying the polynomial basis to the input data
# ***************************************************
x = np.array(x)
res = x
for d in range(2, degree + 1):
res = np.concatenate((res, x ** d), axis=-1)
# print(len(x),degree)
# print(res)
res = np.reshape(res, (degree, len(x)))
res = np.c_[np.ones((len(res.T), 1)),res.T]
return res
def build_poly2(x, degree):
polynomial basis function.
X = np.ones((x.shape[0], degree + 1))
for i in range(degree):
X[:, i + 1:degree + 1] *= x[:, np.newaxis]
return X
test = np.array(range(10))
build_poly2(test,2)
from plots import *
# from .build_polynomial import *
def polynomial_regression():
Constructing the polynomial basis function expansion of the data,
and then running least squares regression.
# define parameters
degrees = [1, 3, 7, 12]
# define the structure of figure
num_row = 2
num_col = 2
f, axs = plt.subplots(num_row, num_col)
for ind, degree in enumerate(degrees):
# ***************************************************
# INSERT YOUR CODE HERE
# form the data to do polynomial regression.: TODO
# ***************************************************
x_degree = build_poly(x,degree)
# ***************************************************
# INSERT YOUR CODE HERE
# least square and calculate rmse: TODO
# ***************************************************
lsq_degree, weight = least_squares(y,x_degree)
# print(weight)
rmse = np.sqrt(2*lsq_degree)
print("Processing {i}th experiment, degree={d}, rmse={loss}".format(
i=ind + 1, d=degree, loss=rmse))
# plot fit
plot_fitted_curve(
y, x, weight, degree, axs[ind // num_col][ind % num_col])
plt.tight_layout()
plt.savefig("visualize_polynomial_regression")
plt.show()
polynomial_regression()
def split_data(x, y, ratio, seed=1):
split the dataset based on the split ratio.
# set seed
np.random.seed(seed)
# ***************************************************
# INSERT YOUR CODE HERE
# split the data based on the given ratio: TODO
# ***************************************************
# Random shuffle the index by enumerate.
pair = np.c_[x,y]
np.random.shuffle(pair)
index = np.round(x.size * ratio,0).astype('int16')
p1, p2 = np.split(pair,[index])
x1,y1 = zip(*p1)
x2,y2 = zip(*p2)
return x1,y1,x2,y2
def split_data2(x, y, ratio, seed=1):
split the dataset based on the split ratio.
# set seed
np.random.seed(seed)
ntr = round(y.shape[0] * ratio)
ind = np.random.permutation(range(y.shape[0]))
x_tr = x[ind[:ntr]]
x_te = x[ind[ntr:]]
y_tr = y[ind[:ntr]]
y_te = y[ind[ntr:]]
return (x_tr, y_tr , x_te , y_te)
test_x = np.array( range(0,10))
test_y = np.array(range(0,10))
print(split_data(test_x, test_y, 0.5))
print(split_data2(test_x, test_y, 0.5))
def train_test_split_demo(x, y, degree, ratio, seed):
polynomial regression with different split ratios and different degrees.
# ***************************************************
# INSERT YOUR CODE HERE
# split the data, and return train and test data: TODO
# ***************************************************
trainX,trainY,testX,testY = split_data(x,y,ratio,seed)
# ***************************************************
# INSERT YOUR CODE HERE
# form train and test data with polynomial basis function: TODO
# ***************************************************
# print(len(trainX))
# trainX = np.c_[np.ones((len(trainX),1)), build_poly(trainX,degree)]
# testX = np.c_[np.ones((len(testX),1)), build_poly(testX,degree)]
trainX = build_poly(trainX, degree)
testX = build_poly(testX, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# calcualte weight through least square.: TODO
# ***************************************************
mse, weight = least_squares(trainY,trainX)
# ***************************************************
# INSERT YOUR CODE HERE
# calculate RMSE for train and test data,
# and store them in rmse_tr and rmse_te respectively: TODO
# ***************************************************
mse_test = np.sum((testY-np.dot(testX,weight))**2)/len(testY)
rmse_tr = np.sqrt(2*mse)
rmse_te = np.sqrt(2*mse_test)
print("proportion={p}, degree={d}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
p=ratio, d=degree, tr=rmse_tr, te=rmse_te))
seed = 6
degrees = [1,3, 7, 12]
split_ratios = [0.9, 0.5, 0.1]
for split_ratio in split_ratios:
for degree in degrees:
train_test_split_demo(x, y, degree, split_ratio, seed)
def ridge_regression(y, tx, lamb):
implement ridge regression.
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression: TODO
# ***************************************************
# Hes = tx.T * tx + 2*N*lambda * I_m
G = np.eye(tx.shape[1])
G[0,0] = 0
hes = np.dot(tx.T,tx) + lamb * G
weight = np.linalg.solve(hes,np.dot(tx.T,y))
mse = compute_cost_MSE(y, tx, weight)
return mse,weight
def ridge_regression_demo(x, y, degree, ratio, seed):
ridge regression demo.
# define parameter
lambdas = np.logspace(-3, 1, 10)
trainX,trainY,testX,testY = split_data(x,y,ratio,seed)
trainX = build_poly(trainX,degree)
testX = build_poly(testX,degree)
_rmse_te = []
_rmse_tr = []
# define the structure of figure
# num_row = 6
# num_col = 2
# f, axs = plt.subplots(num_row, num_col)
for ind, lamb in enumerate(lambdas):
mse, weight = ridge_regression(trainY,trainX,lamb)
# ***************************************************
# INSERT YOUR CODE HERE
# calculate RMSE for train and test data,
# and store them in rmse_tr and rmse_te respectively: TODO
# ***************************************************
mse_test = compute_cost_MSE(testY, testX, weight)
rmse_tr = np.sqrt(2*mse)
rmse_te = np.sqrt(2*mse_test)
_rmse_te.append(rmse_te)
_rmse_tr.append(rmse_tr)
print("lambda={l}, proportion={p}, degree={d}, weight={w}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
l=ind, p=ratio, d=degree, w=len(weight), tr=rmse_tr, te=rmse_te))
# plot fit
# plot_fitted_curve(
# y, x, weight, degree, axs[ind // num_col][ind % num_col])
print(_rmse_te, _rmse_tr)
# plt.hold(False)
# rmse_tr_plt, = plt.plot(lambdas, _rmse_tr, 's-b', label="train error")
# plt.semilogx()
# plt.hold(True)
# rmse_te_plt, = plt.plot(lambdas, _rmse_te, 's-r', label="test error")
# plt.xlabel('lambda')
# plt.ylabel('rmse')
# plt.title('ridge regression for polynomial degree {deg}'.format(deg=degree))
# plt.legend(handles=[rmse_tr_plt, rmse_te_plt])
# plt.show()
plot_train_test(_rmse_tr, _rmse_te, lambdas, degree)
seed = 11
degree = 7
split_ratio = 0.5
ridge_regression_demo(x, y, degree, split_ratio, seed)
def polynomial_regression():
Constructing the polynomial basis function expansion of the data,
and then running least squares regression.
# define parameters
degrees = [7]
# define the structure of figure
num_row = 2
num_col = 2
f, axs = plt.subplots(num_row, num_col)
for ind, degree in enumerate(degrees):
# ***************************************************
# INSERT YOUR CODE HERE
# form the data to do polynomial regression.: TODO
# ***************************************************
x_degree = build_poly(x,degree)
# ***************************************************
# INSERT YOUR CODE HERE
# least square and calculate rmse: TODO
# ***************************************************
lsq_degree, weight = least_squares(y,x_degree)
# print(weight)
rmse = np.sqrt(2*lsq_degree)
print("Processing {i}th experiment, degree={d}, rmse={loss}".format(
i=ind + 1, d=degree, loss=rmse))
# plot fit
plot_fitted_curve(
y, x, weight, degree, axs[ind // num_col][ind % num_col])
plt.tight_layout()
plt.savefig("visualize_polynomial_regression")
plt.show()
polynomial_regression()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1 -- Mostly defaults
Step2: Example 2
Step3: Example 3
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.sankey import Sankey
Sankey(flows=[0.25, 0.15, 0.60, -0.20, -0.15, -0.05, -0.50, -0.10],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth', 'Fifth'],
orientations=[-1, 1, 0, 1, 1, 1, 0, -1]).finish()
plt.title("The default settings produce a diagram like this.");
# Notice:
# 1. Axes weren't provided when Sankey() was instantiated, so they were
# created automatically.
# 2. The scale argument wasn't necessary since the data was already
# normalized.
# 3. By default, the lengths of the paths are justified.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[],
title="Flow Diagram of a Widget")
sankey = Sankey(ax=ax, scale=0.01, offset=0.2, head_angle=180,
format='%.0f', unit='%')
sankey.add(flows=[25, 0, 60, -10, -20, -5, -15, -10, -40],
labels=['', '', '', 'First', 'Second', 'Third', 'Fourth',
'Fifth', 'Hurray!'],
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0],
pathlengths=[0.25, 0.25, 0.25, 0.25, 0.25, 0.8, 0.25, 0.25,
0.25],
patchlabel="Widget\nA",
alpha=0.2, lw=2.0) # Arguments to matplotlib.patches.PathPatch()
diagrams = sankey.finish()
diagrams[0].patch.set_facecolor('#37c959')
diagrams[0].texts[-1].set_color('r')
diagrams[0].text.set_fontweight('bold')
# Notice:
# 1. Since the sum of the flows is nonzero, the width of the trunk isn't
# uniform. If verbose.level is helpful (in matplotlibrc), a message is
# given in the terminal window.
# 2. The second flow doesn't appear because its value is zero. Again, if
# verbose.level is helpful, a message is given in the terminal window.
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[], title="Two Systems")
sankey = Sankey(ax=ax, unit=None)
flows = [0.25, 0.15, 0.60, -0.10, -0.05, -0.25, -0.15, -0.10, -0.35]
sankey.add(flows=flows, label='one',
orientations=[-1, 1, 0, 1, 1, 1, -1, -1, 0])
sankey.add(flows=[-0.25, 0.15, 0.1], fc='#37c959', label='two',
orientations=[-1, -1, -1], prior=0, connect=(0, 0))
diagrams = sankey.finish()
diagrams[-1].patch.set_hatch('/')
ax.legend(loc='best');
# Notice that only one connection is specified, but the systems form a
# circuit since: (1) the lengths of the paths are justified and (2) the
# orientation and ordering of the flows is mirrored.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ano, je
Step2: Najdi chyby 1
Step3: Správné řešení
Step4: Najdu chybu 2
Step5: Šťastná - bohatá
Step6: Řešení 2
Step7: Řešení 3
|
<ASSISTANT_TASK:>
Python Code:
tah_cloveka = 'kámen'
tah_pocitace = 'papír'
if tah_cloveka == 'kámen' and tah_pocitace == 'kámen'or tah_cloveka == 'nůžky' and tah_pocitace == 'nůžky' or tah_cloveka == 'papír' and tah_pocitace == 'papír':
print('Plichta.')
elif tah_cloveka == 'kámen' and tah_pocitace == 'nůžky' or tah_cloveka == 'nůžky'and tah_pocitace == 'papír' or tah_cloveka == 'papír' and tah_pocitace == 'kámen':
print('Vyhrála jsi!')
elif tah_cloveka == 'kámen' and tah_pocitace == 'papír'or tah_cloveka == 'papír' and tah_pocitace == 'nůžky' or tah_cloveka == 'nůžky' and tah_pocitace == 'kámen':
print('Počítač vyhrál.')
tah_cloveka = 'kámen'
tah_pocitace = 'papír'
if tah_cloveka == tah_pocitace:
print('Plichta.')
elif tah_cloveka == 'kámen' and tah_pocitace == 'nůžky' or tah_cloveka == 'nůžky'and tah_pocitace == 'papír' or tah_cloveka == 'papír' and tah_pocitace == 'kámen':
print('Vyhrála jsi!')
else:
print('Počítač vyhrál.')
from random import randrange
cislo = randrange(2)
if cislo == 0:
tah_pocitace = "kámen"
print("Počítač vybral kámen.")
if cislo == 1:
print("Počítač vybral nůžky.")
tah_pocitace = "nůžky"
else:
tah_pocitace = "papír"
print("Počítač vybral papír.")
from random import randrange
cislo = randrange(2)
if cislo == 0:
tah_pocitace = "kámen"
print("Počítač vybral kámen.")
elif cislo == 1:
print("Počítač vybral nůžky.")
tah_pocitace = "nůžky"
else:
tah_pocitace = "papír"
print("Počítač vybral papír.")
strana = int(input('Zadej velikost strany v cm: '))
strana = 2852
print('Objem krychle o straně',strana,'cm je', strana**3,'cm3')
print('Obsah krychle o straně',strana,'cm je', 6*strana**2,'cm2')
print('Odpovídej "ano" nebo "ne".')
stastna_retezec = input('Jsi šťastná?')
bohata_retezec = input('Jsi bohatá?')
if stastna_retezec == 'ano':
if bohata_retezec == 'ano':
print ("ty se máš")
elif bohata_retezec == 'ne':
print ("zkus mín utrácet")
elif stastna_retezec == 'ne':
if bohata_retezec == 'ano':
print ("zkus se víc usmívat")
elif bohata_retezec == 'ne':
print ("to je mi líto")
else:
print ("Nerozumím.")
print('Odpovídej "ano" nebo "ne".')
stastna_retezec = input('Jsi šťastná?')
bohata_retezec = input('Jsi bohatá?')
if stastna_retezec == 'ano' and bohata_retezec == 'ano':
print ("Grauluji")
elif stastna_retezec == 'ano' and bohata_retezec == 'ne':
print('Zkus míň utrácet.')
elif stastna_retezec == 'ne' and bohata_retezec == 'ano':
print ("zkus se víc usmívat")
elif stastna_retezec == 'ne' and bohata_retezec == 'ne':
print ("to je mi líto")
else:
print ("Nerozumim")
print('Odpovídej "ano" nebo "ne".')
stastna_retezec = input('Jsi šťastná? ')
if stastna_retezec == 'ano':
stastna = True
elif stastna_retezec == 'ne':
stastna = False
else:
print('Nerozumím!')
bohata_retezec = input('Jsi bohatá? ')
if bohata_retezec == 'ano':
bohata = True
elif bohata_retezec == 'ne':
bohata = False
else:
print('Nerozumím!')
if bohata and stastna:
print('Gratuluji!')
elif bohata:
print('Zkus se víc usmívat.')
elif stastna:
print('Zkus míň utrácet.')
else:
print('To je mi líto.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To reduce memory consumption and running time, some of the steps are
Step2: The data was collected with a CTF 275 system at 2400 Hz and low-pass
Step3: In the memory saving mode we use preload=False and use the memory
Step4: Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference
Step5: For noise reduction, a set of bad segments have been identified and stored
Step6: Here we compute the saccade and EOG projectors for magnetometers and add
Step7: Visually inspect the effects of projections. Click on 'proj' button at the
Step8: Typical preprocessing step is the removal of power line artifact (50 Hz or
Step9: We also lowpass filter the data at 100 Hz to remove the hf components.
Step10: Epoching and averaging.
Step11: The event timing is adjusted by comparing the trigger times on detected
Step12: We mark a set of bad channels that seem noisier than others. This can also
Step13: The epochs (trials) are created for MEG channels. First we find the picks
Step14: We only use first 40 good epochs from each run. Since we first drop the bad
Step15: The averages for each conditions are computed.
Step16: Typical preprocessing step is the removal of power line artifact (50 Hz or
Step17: Here we plot the ERF of standard and deviant conditions. In both conditions
Step18: Show activations as topography figures.
Step19: We can see the MMN effect more clearly by looking at the difference between
Step20: Source estimation.
Step21: The transformation is read from a file. More information about coregistering
Step22: To save time and memory, the forward solution is read from a file. Set
Step23: The sources are computed using dSPM method and plotted on an inflated brain
Step24: Deviant condition.
Step25: Difference.
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
print(__doc__)
use_precomputed = True
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', 'bst_auditory',
'S01_Noise_20131218_01.ds')
preload = not use_precomputed
raw = read_raw_ctf(raw_fname1, preload=preload)
n_times_run1 = raw.n_times
mne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])
raw_erm = read_raw_ctf(erm_fname, preload=preload)
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick_types(meg=True, eeg=False, stim=True, misc=True, eog=True,
ecg=True)
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = annotations_df['label'].values
annotations = mne.Annotations(onsets, durations, descriptions)
raw.set_annotations(annotations)
del onsets, durations, descriptions
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
raw.plot(block=True)
if not use_precomputed:
meg_picks = mne.pick_types(raw.info, meg=True, eeg=False)
raw.plot_psd(tmax=np.inf, picks=meg_picks)
notches = np.arange(60, 181, 60)
raw.notch_filter(notches, phase='zero-double', fir_design='firwin2')
raw.plot_psd(tmax=np.inf, picks=meg_picks)
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double', fir_design='firwin2')
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=False,
proj=True)
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs, picks
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
for evoked in (evoked_std, evoked_dev):
evoked.filter(l_freq=None, h_freq=40., fir_design='firwin')
evoked_std.plot(window_title='Standard', gfp=True, time_unit='s')
evoked_dev.plot(window_title='Deviant', gfp=True, time_unit='s')
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard', time_unit='s')
evoked_dev.plot_topomap(times=times, title='Deviant', time_unit='s')
evoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')
evoked_difference.plot(window_title='Difference', gfp=True, time_unit='s')
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To run this Notebook, you must have an environmental variable pointed at the PYPIT Development suite
Step2: Checking edges
Step3: PYPYIT Script
Step4: Show
Step5: Add a Slit
Step6: PYPIT script (coming someday, especially if requested)
Step7: Re-run Tracing (only recommended for Developers)
Step8: Load
Step9: Run
|
<ASSISTANT_TASK:>
Python Code:
# imports
import os
from pypit import traceslits
print(os.getenv('PYPIT_DEV'))
mstrace_root = os.getenv('PYPIT_DEV')+'Cooked/Trace/MasterTrace_KeckLRISr_150420_402'
Tslits = traceslits.TraceSlits.from_master_files(mstrace_root)
Tslits.show('edges')
# left edge, right edge, row on image
add_user_slits = [[489,563,1024]]
# run_to_finish resets things in a proper manner
Tslits.add_user_slits(add_user_slits, run_to_finish=True)
# check -- S11 is the new one
Tslits.show('edges')
# write to disk (this will over-write so be careful)
new_root = os.path.basename(mstrace_root)+'_new'
Tslits.save_master(new_root)
# Frame with extra slit
mstrace_root2 = os.getenv('PYPIT_DEV')+'Cooked/Trace/MasterTrace_KeckLRISr_20160110_A'
# Load
Tslits2 = traceslits.TraceSlits.from_master_files(mstrace_root2)
# Show
Tslits2.show('edges')
# Setup slit to remove -- xleft, yleft at yrow=nrow/2
rm_slits = [[229, 380]]
# Remove
Tslits2.remove_slit(rm_slits)
# Check
Tslits2.show('edges')
# write to disk (this will over-write so be careful)
new_root2 = os.path.basename(mstrace_root2)+'_new'
Tslits.save_master(new_root2)
mstrace_root3 = os.getenv('PYPIT_DEV')+'Cooked/Trace/MasterTrace_KeckLRISr_20160110_A'
Tslits2 = traceslits.TraceSlits.from_master_files(mstrace_root3)
tslit_dict = Tslits2.run()
tslit_dict
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img src= attachment
Step2: In the above code snippet we have three lines of error prone code; we cant add ints to strings and we cant divide by zero and we cant add objects that have yet to be defined. and yet, Python ran just fine. The reason being I have already eluded to; Python ignores those three lines of code because 'A' does not equal 11.
Step3: So this code goes through the numbers 1-to-10 and calculates the square of the number. Now in this case only one number gets printed (100), which happens to be 10x10.
Step4: So we changed the indentation of the print statement and we get a completely different behaviour. In the first case we print one number and in the second case we print ten numbers.
Step5: Unlike before, we get an error message this time. In this case the problem is the b=2 line, followed by an intended print statement. The fix here is to either indent b=2 or dedent the print statement. The correct approach will be dependant upon what your code is suppose to do. And lastly
Step6: Once again, we have two bits of code that will act very differently from one another and the only difference is indentation. In version (A) what happens is we check if 'A' is True and if it is we check if 'B' is True. In the second case, we check if 'B' is True independently of whether 'A' is True.
Step7: So our code was called on the numbers 2, 5, 33 and 100. If then printed out a few statements, you should notice the last two lines contradict each other, 100 cannot be both even and odd!
Step8: Homework Assignment
|
<ASSISTANT_TASK:>
Python Code:
if 1:
used_tab = True
if used_tab:
used_spaces = True
print("Something Something")
a = 10
if a == 11:
a += "abc" # a (an int) + "abc" should be a type error!
10/0 # dividing by zero should raise a zero division error!
dave + hat + seven_turnips # should be a name error, none of these things are defined!
else:
print(a*a)
for i in range(1, 11):
square = i*i
print(square)
for i in range(1, 11):
square = i*i
print(square) # <--- now indented...
for i in range(10):
a = 3
b = 2
print(i)
a = b = ""
# Version (A):
if a:
pass
if b:
pass
# Version (B):
if a:
pass
if b:
pass
# Takes a number and prints if it is even, odd AND >10, or less than 10
# Take note of the indentation guys!
for number in [2, 5, 33, 100]:
if number % 2 == 0: # number is even
print("{} is even".format(number))
if number > 10:
print("{} is odd AND greater than 10".format(number))
else:
pass # pass is a way to make python do nothing.
for number in [2, 5, 33, 100]:
if number % 2 == 0:
print("{} is even".format(number))
elif number > 10: # <--- This is the ONLY LINE that has changed.
print("{} is odd AND greater than 10".format(number))
else:
pass
## HOUSES AND KITTENS, A D&D KNOCK-OFF ###
#### THIS CODE IS A MESS, FIX THE INDENTATION
quest = "KILL KITTEN"
current_xp = 20
next_level_xp = 100
weapon_of_choice = "sponge mallet"
is_dead = False
attack_dmg = 10
enemy_health = 10
print('\nGAMEMASTER says: "{} does {} and the enemy has {} health do you want to attack?"\n'.format(weapon_of_choice, attack_dmg, enemy_health))
atk = input('Enter "Y" to attack, else "N"')
do_attack = True if atk == "Y" else False
if do_attack:
enemy_health -= attack_dmg
if enemy_health == 0:
is_dead = True
if is_dead:
print('INKEEPER says: "OMG WHAT DID YOU DO!!!!, WHY DID YOU KILL THAT POOR LITTLE KITTEN WITH YOUR {}? ..YOU..MONSTER!!!"'.format(weapon_of_choice.upper()))
if quest == "KILL KITTEN":
print('\nGAMEMASTER says: "Our brave hero completes his quest. + 80 xp"\n')
current_xp += 80
print('INKEEPER says: "I have a baby seal in the swimming pool if you want to club that too?"'.format(weapon_of_choice))
new_quest_accept = input('GAMEMASTER says: "DO YOU ACCEPT THE CLUB POOR SEAL QUEST? Type Y or N"\n')
if new_quest_accept == "Y":
quest = "CLUB BABY SEAL"
print('INKEEPER says: "YES!? I was being sarcastic you twit, why on earth would I give you a quest to harm by baby seal?"')
else:
print('INKEEPER says: "NO? Well THANKYOU for not killing every living thing in my house. Speaking of which, Why are you even in my house? GET OUT GET OUT YOU FOUL BEAST!!"')
elif do_attack and not is_dead:
print('INKEEPER says: "SNUFFLES!!! WHAT DID HE DO TO YOU!!! SOMEONE PLEASE FETCH A CAT DOCTOR"')
if current_xp == next_level_xp:
print('\nGAMEMASTER says: "LEVEL UP!!, {} now does +2 attack"\n'.format(weapon_of_choice))
attack_dmg += 2
else:
print('INKEEPER says: "Thankyou kind sir for not maiming my little kitten with your {}"'.format(weapon_of_choice))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fully-Connected Neural Nets
Step4: Affine layer
Step5: Affine layer
Step6: ReLU layer
Step7: ReLU layer
Step8: "Sandwich" layers
Step9: Loss layers
Step10: Two-layer network
Step11: Solver
Step12: Multilayer network
Step13: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.
Step14: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.
Step15: Inline question
Step16: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
Step17: RMSProp and Adam
Step18: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules
Step19: Train a good model!
Step20: Test you model
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
# Test the affine_backward function
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print 'Testing affine_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print 'Testing svm_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print '\nTesting softmax_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-2
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print 'Testing initialization ... '
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print 'Testing test-time forward pass ... '
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print 'Testing training loss (no regularization)'
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print 'Running numeric gradient check with reg = ', reg
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
model = TwoLayerNet()
solver = None
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 50% accuracy on the validation set. #
##############################################################################
# data = {
# 'X_train': X_train,# training data
# 'y_train': y_train,# training labels
# 'X_val': X_val,# validation data
# 'y_val': y_val,# validation labels
# }
model = TwoLayerNet(input_dim=data['X_train'].size/data['X_train'].shape[0],
hidden_dim=160,
num_classes=len(np.unique(data['y_train'])),
reg=0.1)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=1000)
solver.train()
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
# TODO: Use a three-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 1e-2
learning_rate = 1e-2
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
# TODO: Use a five-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
learning_rate = 1e-3
weight_scale = 1e-1
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])
expected_velocity = np.asarray([
[ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])
print 'next_w error: ', rel_error(next_w, expected_next_w)
print 'velocity error: ', rel_error(expected_velocity, config['velocity'])
num_train = 4000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
# Test RMSProp implementation; you should see errors less than 1e-7
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'cache error: ', rel_error(expected_cache, config['cache'])
# Test Adam implementation; you should see errors around 1e-7 or less
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],
[ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],
[ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],
[ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'v error: ', rel_error(expected_v, config['v'])
print 'm error: ', rel_error(expected_m, config['m'])
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
## Tune hyperparameters
## Goal: Reach 50% validation accuracy
import sys
results = {}
best_val = -1
best_model = None
# random search for hyperparameter optimization
max_count = 3
learning_rates = sorted(10**np.random.uniform(-4, -3, max_count))
weight_scales = sorted(10**np.random.uniform(-2, -1, max_count))
i = 0
for lr in learning_rates:
for ws in weight_scales:
print('set %d, learning rate: %f, weight_scale: %f' % (i+1, lr, ws))
i += 1
sys.stdout.flush()
model = FullyConnectedNet(
[100, 100, 100, 100, 100],
weight_scale=ws, dtype=np.float64,use_batchnorm=False, reg=1e-2)
solver = Solver(model, data,
print_every=1000, num_epochs=1, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': lr,
},
lr_decay = 0.9,
verbose = True
)
solver.train()
train_acc = solver.train_acc_history[-1]
val_acc = solver.val_acc_history[-1]
results[(lr,ws)] = train_acc, val_acc
# Print out results.
for lr, ws in sorted(results):
train_acc, val_acc = results[(lr, ws)]
print 'lr %e ws %e train accuracy: %f, validation accuracy: %f' % (
lr, ws, train_acc, val_acc)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('weight scale')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('weight scale/log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Notify when finished
best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #
# batch normalization and dropout useful. Store your best model in the #
# best_model variable. #
################################################################################
learning_rate = 1.184318e-04
model = FullyConnectedNet([100, 100, 100, 100, 100],
weight_scale=5.608636e-02, reg=1e-2)
solver = Solver(model, data,
num_epochs=10, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': learning_rate
},
verbose=True,
print_every=1000)
solvers[update_rule] = solver
solver.train()
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history)
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, label='train')
plt.plot(solver.val_acc_history, label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
best_model = model
X_val = data['X_val']
y_val = data['y_val']
X_test = data['X_test']
y_test = data['y_test']
pass
################################################################################
# END OF YOUR CODE #
################################################################################
y_test_pred = np.argmax(best_model.loss(X_test), axis=1)
y_val_pred = np.argmax(best_model.loss(X_val), axis=1)
print 'Validation set accuracy: ', (y_val_pred == y_val).mean()
print 'Test set accuracy: ', (y_test_pred == y_test).mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create sites, devices and sensors
Step2: Add everything together
Step3: Save Houseprint to file
Step4: Share this file with your friends!
|
<ASSISTANT_TASK:>
Python Code:
# use the `empty_init` flag to create an empty object
hp = houseprint.Houseprint(empty_init=True)
hp
site1 = houseprint.Site(key="Your house name")
# consult the docs to see what fields are available!
# houseprint.Site??
# houseprint.Fluksometer??
# houseprint.Fluksosensor??
device1 = houseprint.Fluksometer(key='fluksoname')
sensor1 = houseprint.Fluksosensor(key='keykeykey', token="blabla", type='electricity')
device1.add_sensor(sensor1)
site1.add_device(device1)
hp.add_site(site1)
hp
hp.save('sharable_houseprint.pkl')
hp2 = houseprint.load_houseprint_from_file('sharable_houseprint.pkl')
hp2
hp2.init_tmpo()
hp2.get_data()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: 2. Statistical functions
Step4: 3. Perform comparison
Step5: And below is the output from the Excel macro for comparison.
Step6: My code gives near-identical results to those from the Excel macro, although there are a few edge cases that might be worth investigating further. For example, if there are fewer than 10 non-null values, my code currently prints a warning. I'm not sure exactly what the Excel macro does yet, but in general it seems that for fewer than 10 values it's necessary to use a lookup table (see e.g. the Instructions sheet of the file here).
Step7: Looking at the ICPW_STATISTICS table in RESA2, it seems as though trends have been assessed for 14 parameters and several different time periods for each site of interest. The length and number of time periods vary from site to site, so I'll need to check with Heleen regarding how these varaibles should be chosen. The 14 parameters are as follows
Step8: 4.1.2. Sea-salt corrected values
Step9: 4.3. Extract time series
Step10: 4.4. Aggregate to annual
Step11: 4.4. Convert units and apply sea-salt correction
Step13: 4.5. Calculate trends
Step14: 5. Compare to previous trends analysis
Step15: For e.g. site 23499, I can now re-run my code for the period from 1990 to 2004 and compare my results to those above.
|
<ASSISTANT_TASK:>
Python Code:
# Read data and results from the Excel macro
in_xlsx = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\TOC_Trends_Analysis_2015'
r'\Data\mk_sen_test_data.xlsx')
raw_df = pd.read_excel(in_xlsx, sheetname='input')
res_df = pd.read_excel(in_xlsx, sheetname='results')
raw_df
res_df
def mk_test(x, stn_id, par, alpha=0.05):
Adapted from http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
by Sat Kumar Tomer.
Perform the MK test for monotonic trends. Uses the "normal
approximation" to determine significance and therefore should
only be used if the number of values is >= 10.
Args:
x: 1D array of data
name: Name for data series (string)
alpha: Significance level
Returns:
var_s: Variance of test statistic
s: M-K test statistic
z: Normalised test statistic
p: p-value of the significance test
trend: Whether to reject the null hypothesis (no trend) at
the specified significance level. One of:
'increasing', 'decreasing' or 'no trend'
import numpy as np
from scipy.stats import norm
n = len(x)
if n < 10:
print (' Data series for %s at site %s has fewer than 10 non-null values. '
'Significance estimates may be unreliable.' % (par, int(stn_id)))
# calculate S
s = 0
for k in xrange(n-1):
for j in xrange(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18.
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in xrange(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
# Sat Kumar's code has "+ np.sum", which is incorrect
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18.
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = np.nan
# calculate the p_value
p = 2*(1-norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2.)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
elif np.isnan(z):
trend = np.nan
else:
trend = 'no trend'
return var_s, s, z, p, trend
def wc_stats(raw_df, st_yr=None, end_yr=None):
Calculate key statistics for the TOC trends analysis:
'station_id'
'par_id'
'non_missing'
'median'
'mean'
'std_dev'
'period'
'mk_std_dev'
'mk_stat'
'norm_mk_stat'
'mk_p_val'
'trend'
'sen_slp'
Args:
raw_df: Dataframe with annual data for a single station. Columns must
be: [station_id, year, par1, par2, ... parn]
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
Returns:
df of key statistics.
import numpy as np, pandas as pd
from scipy.stats import theilslopes
# Checking
df = raw_df.copy()
assert list(df.columns[:2]) == ['STATION_ID', 'YEAR'], 'Columns must be: [STATION_ID, YEAR, par1, par2, ... parn]'
assert len(df['STATION_ID'].unique()) == 1, 'You can only process data for one site at a time'
# Get just the period of interest
if st_yr:
df = df.query('YEAR >= @st_yr')
if end_yr:
df = df.query('YEAR <= @end_yr')
# Get stn_id
stn_id = df['STATION_ID'].iloc[0]
# Tidy up df
df.index = df['YEAR']
df.sort_index(inplace=True)
del df['STATION_ID'], df['YEAR']
# Container for results
data_dict = {'station_id':[],
'par_id':[],
'non_missing':[],
'median':[],
'mean':[],
'std_dev':[],
'period':[],
'mk_std_dev':[],
'mk_stat':[],
'norm_mk_stat':[],
'mk_p_val':[],
'trend':[],
'sen_slp':[]}
# Loop over pars
for col in df.columns:
# 1. Station ID
data_dict['station_id'].append(stn_id)
# 2. Par ID
data_dict['par_id'].append(col)
# 3. Non-missing
data_dict['non_missing'].append(pd.notnull(df[col]).sum())
# 4. Median
data_dict['median'].append(df[col].median())
# 5. Mean
data_dict['mean'].append(df[col].mean())
# 6. Std dev
data_dict['std_dev'].append(df[col].std())
# 7. Period
st_yr = df.index.min()
end_yr = df.index.max()
per = '%s-%s' % (st_yr, end_yr)
data_dict['period'].append(per)
# 8. M-K test
# Drop missing values
mk_df = df[[col]].dropna(how='any')
# Only run stats if more than 1 valid value
if len(mk_df) > 1:
var_s, s, z, p, trend = mk_test(mk_df[col].values, stn_id, col)
data_dict['mk_std_dev'].append(np.sqrt(var_s))
data_dict['mk_stat'].append(s)
data_dict['norm_mk_stat'].append(z)
data_dict['mk_p_val'].append(p)
data_dict['trend'].append(trend)
# 8. Sen's slope
# First element of output gives median slope. Other results could
# also be useful - see docs
sslp = theilslopes(mk_df[col].values, mk_df.index, 0.95)[0]
data_dict['sen_slp'].append(sslp)
# Otherwise all NaN
else:
for par in ['mk_std_dev', 'mk_stat', 'norm_mk_stat',
'mk_p_val', 'trend', 'sen_slp']:
data_dict[par].append(np.nan)
# Build to df
res_df = pd.DataFrame(data_dict)
res_df = res_df[['station_id', 'par_id', 'period', 'non_missing',
'mean', 'median', 'std_dev', 'mk_stat', 'norm_mk_stat',
'mk_p_val', 'mk_std_dev', 'trend', 'sen_slp']]
return res_df
# Run analysis on test data and print results
out_df = wc_stats(raw_df)
del out_df['station_id']
out_df
res_df
# Use custom RESA2 function to connect to db
r2_func_path = r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Upload_Template\useful_resa2_code.py'
resa2 = imp.load_source('useful_resa2_code', r2_func_path)
engine, conn = resa2.connect_to_resa2()
# Tabulate chemical properties
chem_dict = {'molar_mass':[96, 35, 40, 24, 14],
'valency':[2, 1, 2, 2, 1],
'resa2_ref_ratio':[0.103, 1., 0.037, 0.196, 'N/A']}
chem_df = pd.DataFrame(chem_dict, index=['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N'])
chem_df = chem_df[['molar_mass', 'valency', 'resa2_ref_ratio']]
chem_df
# Get stations for a specified list of projects
proj_list = ['ICPW_TOCTRENDS_2015_CZ', 'ICPW_TOCTRENDS_2015_IT']
sql = ('SELECT station_id, station_code '
'FROM resa2.stations '
'WHERE station_id IN (SELECT UNIQUE(station_id) '
'FROM resa2.projects_stations '
'WHERE project_id IN (SELECT project_id '
'FROM resa2.projects '
'WHERE project_name IN %s))'
% str(tuple(proj_list)))
stn_df = pd.read_sql(sql, engine)
stn_df
# Specify parameters of interest
par_list = ['SO4', 'Cl', 'Ca', 'Mg', 'NO3-N', 'TOC', 'Al']
if 'DOC' in par_list:
print ('The database treats DOC and TOC similarly.\n'
'You should probably enter "TOC" instead')
# Check pars are valid
if len(par_list)==1:
sql = ("SELECT * FROM resa2.parameter_definitions "
"WHERE name = '%s'" % par_list[0])
else:
sql = ('SELECT * FROM resa2.parameter_definitions '
'WHERE name in %s' % str(tuple(par_list)))
par_df = pd.read_sql_query(sql, engine)
assert len(par_df) == len(par_list), 'One or more parameters not valid.'
# Get results for ALL pars for sites and period of interest
if len(stn_df)==1:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id = %s)"
% stn_df['station_id'].iloc[0])
else:
sql = ("SELECT * FROM resa2.water_chemistry_values2 "
"WHERE sample_id IN (SELECT water_sample_id FROM resa2.water_samples "
"WHERE station_id IN %s)"
% str(tuple(stn_df['station_id'].values)))
wc_df = pd.read_sql_query(sql, engine)
# Get all sample dates for sites and period of interest
if len(stn_df)==1:
sql = ("SELECT water_sample_id, station_id, sample_date "
"FROM resa2.water_samples "
"WHERE station_id = %s " % stn_df['station_id'].iloc[0])
else:
sql = ("SELECT water_sample_id, station_id, sample_date "
"FROM resa2.water_samples "
"WHERE station_id IN %s " % str(tuple(stn_df['station_id'].values)))
samp_df = pd.read_sql_query(sql, engine)
# Join in par IDs based on method IDs
sql = ('SELECT * FROM resa2.wc_parameters_methods')
meth_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, meth_par_df, how='left',
left_on='method_id', right_on='wc_method_id')
# Get just the parameters of interest
wc_df = wc_df.query('wc_parameter_id in %s' % str(tuple(par_df['parameter_id'].values)))
# Join in sample dates
wc_df = pd.merge(wc_df, samp_df, how='left',
left_on='sample_id', right_on='water_sample_id')
# Join in parameter units
sql = ('SELECT * FROM resa2.parameter_definitions')
all_par_df = pd.read_sql_query(sql, engine)
wc_df = pd.merge(wc_df, all_par_df, how='left',
left_on='wc_parameter_id', right_on='parameter_id')
# Join in station codes
wc_df = pd.merge(wc_df, stn_df, how='left',
left_on='station_id', right_on='station_id')
# Convert units
wc_df['value'] = wc_df['value'] * wc_df['conversion_factor']
# Extract columns of interest
wc_df = wc_df[['station_id', 'sample_date', 'name', 'value']]
# Unstack
wc_df.set_index(['station_id', 'sample_date', 'name'], inplace=True)
wc_df = wc_df.unstack(level='name')
wc_df.columns = wc_df.columns.droplevel()
wc_df.reset_index(inplace=True)
wc_df.columns.name = None
wc_df.head()
# Extract year from date column
wc_df['year'] = wc_df['sample_date'].map(lambda x: x.year)
del wc_df['sample_date']
# Groupby station_id and year
grpd = wc_df.groupby(['station_id', 'year'])
# Calculate median
wc_df = grpd.agg('median')
wc_df.head()
# 1. Convert to ueq/l
for par in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N']:
val = chem_df.ix[par, 'valency']
mm = chem_df.ix[par, 'molar_mass']
if par == 'NO3-N':
wc_df['ENO3'] = wc_df[par] * val / mm
else:
wc_df['E%s' % par] = wc_df[par] * val * 1000. / mm
# 2. Apply sea-salt correction
for par in ['ESO4', 'EMg', 'ECa']:
ref = chem_df.ix[par[1:], 'resa2_ref_ratio']
wc_df['%sX' % par] = wc_df[par] - (ref*wc_df['ECl'])
# 3. Calculate combinations
# 3.1. ESO4 + ECl
wc_df['ESO4_ECl'] = wc_df['ESO4'] + wc_df['ECl']
# 3.2. ECa + EMg
wc_df['ECa_EMg'] = wc_df['ECa'] + wc_df['EMg']
# 3.3. ECaX + EMgX
wc_df['ECaX_EMgX'] = wc_df['ECaX'] + wc_df['EMgX']
# 3.4. ESO4 + ECl + ENO3
wc_df['ESO4_ECl_ENO3'] = wc_df['ESO4'] + wc_df['ECl'] + wc_df['ENO3']
# 4. Delete unnecessary columns and tidy
for col in ['SO4', 'Cl', 'Mg', 'Ca', 'NO3-N']:
del wc_df[col]
wc_df.reset_index(inplace=True)
wc_df.head()
def process_water_chem_df(stn_df, wc_df, st_yr=None, end_yr=None):
Calculate statistics for the stations, parameters and time
periods specified.
Args:
stn_df: Dataframe of station_ids
wc_df: Dataframe of water chemistry time series for stations
and parameters of interest
st_yr: First year to include in analysis. Pass None to start
at the beginning of the series
end_year: Last year to include in analysis. Pass None to start
at the beginning of the series
Returns:
Dataframe of statistics
# Container for output
df_list = []
# Loop over sites
for stn_id in stn_df['station_id']:
# Extract data for this site
df = wc_df.query('station_id == @stn_id')
# Modify col names
names = list(df.columns)
names[:2] = ['STATION_ID', 'YEAR']
df.columns = names
# Run analysis
df_list.append(toc_stats(df, st_yr=st_yr, end_yr=end_yr))
res_df = pd.concat(df_list, axis=0)
return res_df
res_df = process_water_chem_df(stn_df, wc_df)
res_df.head()
# Get results for test sites from RESA2
sql = ('SELECT * FROM resa2.icpw_statistics3 '
'WHERE station_id IN %s'
% str(tuple(stn_df['station_id'].values)))
stat_df = pd.read_sql(sql, engine)
# Get just the cols to compare to my output
stat_df = stat_df[['station_id', 'parameter', 'period', 'nonmiss',
'average', 'median', 'stdev', 'test_stat',
'mk_stat', 'mkp', 'senslope']]
stat_df.head(14).sort_values(by='parameter')
# Re-run python analysis for the period 1990 - 2004
res_df = process_water_chem_df(stn_df, wc_df, st_yr=1990, end_yr=2004)
# Delete mk_std_dev as not relevant here
del res_df['mk_std_dev']
res_df.head(14).sort_values(by='par_id')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Veja que há um fenômeno interessante que acontece. A nota que deveria ter frequência de 880.0 Hz (um intervalo de oitava em relação à referência inicial) na verdade é calculada como 892 Hz. Isso representa um erro calculável, na forma
Step2: Esse erro, de 1.36%, é bem conhecido e é chamado de Coma Pitagórico. Ele representa uma espécie de erro perceptual acumulado do sistema de afinação. Mas, veja
Step3: Quão desafinado é um sistema de afinação?
|
<ASSISTANT_TASK:>
Python Code:
referencia_inicial = 440.0 # Hz
frequencias = [] # Esta lista recebera todas as frequencias de uma escala
f = referencia_inicial
while len(frequencias) < 12:
if f > (referencia_inicial * 2):
f /= 2.
frequencias.append(f)
f *= (3/2.)
frequencias.sort()
print frequencias
print f
print 100*(f - (referencia_inicial * 2)) / (referencia_inicial*2)
frequencias_t = [] # Esta lista recebera todas as frequencias de uma escala
ft = referencia_inicial
while len(frequencias_t) < 12:
frequencias_t.append(ft)
ft *= 2**(1/12.)
frequencias_t.sort()
print frequencias_t
print ft
intervalos_diatonica = [2, 3, 4, 5, 6, 7]
intervalos_cromatica = [2, 4, 5, 7, 9, 11]
razoes = [9/8., 5/4., 4/3., 3/2., 5/3., 15/8.]
for i in xrange(len(intervalos_diatonica)):
frequencia_ideal = referencia_inicial * razoes[i]
frequencia_pitagorica = frequencias[intervalos_cromatica[i]]
frequencia_temperada = frequencias_t[intervalos_cromatica[i]]
erro_pitagorica = 100*(frequencia_pitagorica - (frequencia_ideal)) / (frequencia_ideal)
erro_temperada = 100*(frequencia_temperada - (frequencia_ideal)) / (frequencia_ideal)
print "Intervalo:", intervalos_diatonica[i]
print "Erro pitagorica:", erro_pitagorica
print "Erro temperada:", erro_temperada
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def isDivisible(n ) :
while n // 100 :
d = n % 10
n //= 10
n = abs(n -(d * 14 ) )
return(n % 47 == 0 )
if __name__== "__main __":
n = 59173
if(isDivisible(n ) ) :
print("Yes ")
else :
print("No ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specifying a language such as 'en' (English) is optional. Adding texts from a directory to the corpus is easy
Step2: By default, this function assumes that all texts under this directory have been encoded in UTF-8 and that they have a .txt extension. Additionally, the syntax of the filename should be <category>_<title>.txt, where category is a label indicates e.g. a text's authorship, genre or date of composition. In our case, this directory looked like
Step3: Our corpus currently holds these 7 texts in their raw form
Step4: In stylometry, it typical to preprocess your corpus and remove, let's say, punctuation and lowercase texts. In pystyl, we achieve this via the preprocess() method, where the alpha_only parameter controls whether we only wish to keep alphabetic symbols
Step5: Now, the corpus is ready to be tokenized, which is helpful if we want to start counting words
Step6: The corpus now holds our texts in a tokenized form. Of course, the novels wildly vary in length. If we would like to split these into shorter segments of e.g. 10,000 words, we can use the segment() function.
Step7: In stylometry, it is common to manually remove certain words, such as personal pronouns, which are more strongly tied to narrative perspective than authorial writing style. To remove these from our English texts, we can do
Step8: As you can see, all personal pronouns have now been removed from our corpus segments. We are now ready to vectorize our corpus, which means that we will represent it as as a large two-dimensional matrix in which each row represents one of our textual segments, and each individual feature (e.g. a function word frequency) is represented in a column.
Step9: As you can see, we have now included the 30 most common words in our corpus model (mfi stands for 'most frequent items'). These features are returned by the vectorize() method. Many other options are available; to extract the 50 most common character trigrams, for instance, you could run
Step10: A more fundamental issue is the vectorization model we select. By default, the vectorizer will create a simple term-frequency model, which means that we will record the relative frequencies of our most frequent items in each text. In stylometry, however, there exist many more models. PyStyl also supports the tf-idf model (term frequency-inverse document frequency), which is commonly used in information retrieval to assign more weight to lower-frequency items.
Step11: PyStyl also supports the std model which underpins Burrows's famous Delta method (and which is typically also a solid model for other applications)
Step12: Vectorization is a foundational issue in stylometry, since it very much controls how our analyses 'see' texts. Luckily, the vectorize() method comes with many options to control this process. With the following options, we can for install control the proportion of segments to control in how many segments a feature should minimally occur (a procedure also known as 'culling')
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 1
%matplotlib inline
from pystyl.corpus import Corpus
corpus = Corpus(language='en')
corpus.add_directory(directory='data/dummy')
ls data/dummy
print(corpus)
corpus.preprocess(alpha_only=True, lowercase=True)
print(corpus)
corpus.tokenize()
print(corpus)
corpus.segment(segment_size=20000)
print(corpus)
corpus.remove_tokens(rm_pronouns=True)
print(corpus)
corpus.vectorize(mfi=100)
corpus.vectorize(mfi=20, ngram_type='char', ngram_size=3)
corpus.vectorize(mfi=30, vector_space='tf_idf')
corpus.vectorize(mfi=30, vector_space='tf_std')
corpus.vectorize(mfi=30, min_df=0.80)
from pystyl.analysis import distance_matrix, hierarchical_clustering
from pystyl.visualization import scatterplot, scatterplot_3d
from pystyl.analysis import pca
pca_coor, pca_loadings = pca(corpus, nb_dimensions=2)
scatterplot(corpus, coor=pca_coor, nb_clusters=0, loadings=pca_loadings, plot_type='static',\
save=False, show=False, return_svg=False, outputfile="/Users/mike/Desktop/pca.pdf")
pca_coor, pca_loadings = pca(corpus, nb_dimensions=3)
scatterplot_3d(corpus, coor=pca_coor, outputfile="/Users/mike/Desktop/3d.pdf",\
save=True, show=False, return_svg=False)
from pystyl.analysis import distance_matrix
dm = distance_matrix(corpus, 'minmax')
from pystyl.visualization import clustermap
clustermap(corpus, distance_matrix=dm, fontsize=8, color_leafs=True,\
outputfile='/Users/mike/Desktop/cm.pdf',
show=False, save=False, return_svg=False)
from pystyl.analysis import hierarchical_clustering
cluster_tree = hierarchical_clustering(dm, linkage='ward')
from pystyl.visualization import scipy_dendrogram, ete_dendrogram
scipy_dendrogram(corpus=corpus, tree=cluster_tree, outputfile='~/Desktop/scipy_dendrogram.pdf',\
fontsize=5, color_leafs=True, show=False, save=F, return_svg=False)
ete_dendrogram(corpus=corpus, tree=cluster_tree, outputfile='~/Desktop/ete_dendrogram.png',\
fontsize=5, color_leafs=True, show=False, save=True, return_svg=False,
save_newick=False)
from IPython.display import Image
Image(filename='/Users/mike/Desktop/ete_dendrogram.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we get hold of the lines list from the OrbitPlotter.plot method this is a list of lines. The first is the orbit line. The second is the current position marker. With the matplotlib lines objects we can start changing the style. First we make the line solid but thin line. Then we change the current position marker to a large hexagon.
Step2: You can also change the style of the plot using the matplotlib axis which can be aquired from the OrbitPlotter()
|
<ASSISTANT_TASK:>
Python Code:
from astropy.time import Time
import matplotlib.pyplot as plt
from poliastro.plotting import StaticOrbitPlotter
from poliastro.frames import Planes
from poliastro.bodies import Earth, Mars, Jupiter, Sun
from poliastro.twobody import Orbit
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = StaticOrbitPlotter(plane=Planes.EARTH_ECLIPTIC)
plotter.plot_body_orbit(Earth, epoch, label="Earth")
plotter.plot_body_orbit(Mars, epoch, label="Mars")
plotter.plot_body_orbit(Jupiter, epoch, label="Jupiter");
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = StaticOrbitPlotter(plane=Planes.EARTH_ECLIPTIC)
earth_plots_traj, earth_plots_pos = plotter.plot_body_orbit(Earth, epoch, label=Earth)
earth_plots_traj[0].set_linestyle("-") # solid line
earth_plots_traj[0].set_linewidth(0.5)
earth_plots_pos.set_marker("H") # Hexagon
earth_plots_pos.set_markersize(15)
mars_plots = plotter.plot_body_orbit(Mars, epoch, label=Mars)
jupiter_plots = plotter.plot_body_orbit(Jupiter, epoch, label=Jupiter)
epoch = Time("2018-08-17 12:05:50", scale="tdb")
plotter = StaticOrbitPlotter()
earth_plots_t, earth_plots_p = plotter.plot_body_orbit(Earth, epoch, label=Earth)
earth_plots_t[0].set_linestyle("-") # solid line
earth_plots_t[0].set_linewidth(0.5)
earth_plots_p.set_marker("H") # Hexagon
earth_plots_p.set_markersize(15)
mars_plots_t, mars_plots_p = plotter.plot_body_orbit(Mars, epoch, label=Mars)
mars_plots_t[0].set_dashes([0, 1, 0, 1, 1, 0])
mars_plots_t[0].set_linewidth(2)
mars_plots_p.set_marker("D") # Diamond
mars_plots_p.set_markersize(15)
mars_plots_p.set_fillstyle("none")
# make sure this is set if you use fillstyle 'none'
mars_plots_p.set_markeredgewidth(1)
jupiter_plots_t, jupiter_plots_p = plotter.plot_body_orbit(Jupiter, epoch, label=Jupiter)
jupiter_plots_t[0].set_linestyle("") # No line
jupiter_plots_p.set_marker("*") # star
jupiter_plots_p.set_markersize(15)
epoch = Time("2018-08-17 12:05:50", scale="tdb")
fig, ax = plt.subplots()
ax.grid(True)
ax.set_title("Earth, Mars, and Jupiter")
ax.set_facecolor("None")
plotter = StaticOrbitPlotter(ax)
plotter.plot_body_orbit(Earth, epoch, label=Earth)
plotter.plot_body_orbit(Mars, epoch, label=Mars)
plotter.plot_body_orbit(Jupiter, epoch, label=Jupiter)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Running a trajectory using just a single surface
Step2: Running a trajectory using MMST with no coupling
Step3: Running a trajectory with the real MMST
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
sys11 = mm.openmm.System()
sys12 = mm.openmm.System()
sys22 = mm.openmm.System()
sys00 = mm.openmm.System()
for sys in [sys11, sys12, sys22, sys00]:
mass = 1980.0 * unit.amu
sys.addParticle(mass)
V11 = mm.openmm.CustomExternalForce("V0*tanh(a*x)")
V11.addGlobalParameter("V0", 0.01)
V11.addGlobalParameter("a", 1.6)
V11.addParticle(0, ())
V22 = mm.openmm.CustomExternalForce("-V0*tanh(a*x)")
V22.addGlobalParameter("V0", 0.01)
V22.addGlobalParameter("a", 1.6)
V22.addParticle(0, ())
V12 = mm.openmm.CustomExternalForce("C*exp(-D*(x+f))")
V12.addGlobalParameter("C", 0.005)
V12.addGlobalParameter("D", 1.0)
V12.addGlobalParameter("f", 0.0)
V12.addParticle(0, ())
V00 = mm.openmm.CustomExternalForce("0.0*x")
V00.addParticle(0, ())
sys00.addForce(V00)
sys11.addForce(V11)
sys12.addForce(V12)
sys22.addForce(V22)
topology = app.Topology()
dt = 5*46.0 * unit.femtoseconds
init_pos = np.array([[-5.0, 0.0, 0.0]]) #* unit.nanometer
#init_vel = np.array([[19.9/1980.0, 0.0, 0.0]]) #* unit.nanometer / unit.picosecond
init_vel = np.array([[0.0022, 0.0, 0.0]])
integ = mm.VerletIntegrator(dt)
simulation = app.Simulation(topology, sys11, integ)
simulation.context.setPositions(init_pos)
simulation.context.setVelocities(init_vel)
traj = []
forces = []
energies = []
for i in range(40000):
state = simulation.context.getState(getPositions=True,
getForces=True, getEnergy=True
)
pos = state.getPositions(asNumpy=True)
force = state.getForces(asNumpy=True)
energy = state.getPotentialEnergy()
forces.append(force[0][0] / force.unit)
energies.append(energy / energy.unit)
traj.append(pos[0][0] / pos.unit)
simulation.step(1)
plt.plot(traj, forces)
plt.plot(traj, energies)
plt.plot(traj)
import openmm_mmst as mmst
Hmat = [[sys11, sys00], [sys00, sys22]]
#mmst_integ = NonadiabaticIntegrator(Hmat)
Hmat = [[sys11, sys12], [sys12, sys22]]
#mmst_integ = NonadiabaticIntegrator(Hmat)
f = simulation.system.getForce(0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Define Dataset Metadata
Step2: 2. Define Data Input Function
Step3: b. data pipeline input function
Step4: 3. Create Estimator
Step5: 4. Run Experiment
Step6: b. Create Experiment Function
Step7: c. Set HParam and RunConfig
Step8: d. Run Experiment
Step9: 5. Perform Predictions (Assign Instance to Clusters)
Step10: Serving via the Saved Model
|
<ASSISTANT_TASK:>
Python Code:
train_data_files = ['data/train-data.csv']
test_data_files = ['data/test-data.csv']
model_name = 'clust-model-02'
resume = False
train = True
preprocess_features = False
extend_feature_colums = False
HEADER = ['key', 'x1', 'x2', 'x3', 'cluster']
HEADER_DEFAULTS = [[0], [0.0], [0.0], [0.0], ['NA']]
FEATURE_NAMES = ['x1', 'x2', 'x3']
UNUSED_FEATURE_NAMES = list(set(HEADER) - set(FEATURE_NAMES))
print("Header: {}".format(HEADER))
print("Input Features: {}".format(FEATURE_NAMES))
print("Unused Features: {}".format(UNUSED_FEATURE_NAMES))
def parse_csv_row(csv_row):
columns = tf.decode_csv(csv_row, record_defaults=HEADER_DEFAULTS)
columns = [tf.expand_dims(tensor, -1) for tensor in columns]
features = dict(zip(HEADER, columns))
for column in UNUSED_FEATURE_NAMES:
features.pop(column)
return features
def process_features(features):
if preprocess_features:
features = features
return features
def csv_input_fn(file_names, mode=tf.estimator.ModeKeys.TRAIN,
skip_header_lines=0,
num_epochs=None,
batch_size=200):
shuffle = False
print("")
print("* data input_fn:")
print("================")
print("Input file(s): {}".format(file_names))
print("Batch size: {}".format(batch_size))
print("Epoch Count: {}".format(num_epochs))
print("Mode: {}".format(mode))
print("Shuffle: {}".format(shuffle))
print("================")
print("")
dataset = data.TextLineDataset(filenames=file_names)
dataset = dataset.skip(skip_header_lines)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda csv_row: parse_csv_row(csv_row))
dataset = dataset.map(lambda features: process_features(features))
dataset = dataset.repeat(num_epochs)
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
return features, None
features, _ = csv_input_fn(file_names=train_data_files)
print("Feature read from CSV: {}".format(list(features.keys())))
def create_estimator(run_config, hparams):
estimator = tf.contrib.learn.KMeansClustering(
num_clusters = hparams.num_clusters,
initial_clusters= tf.contrib.factorization.RANDOM_INIT,
distance_metric= tf.contrib.factorization.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=1,
kmeans_plus_plus_num_retries=10,
relative_tolerance=None,
config= run_config
)
print("")
print("Estimator Type: {}".format(type(estimator)))
print("")
return estimator
def csv_serving_input_fn():
SERVING_HEADER = ['x1','x2','x3']
SERVING_HEADER_DEFAULTS = [[0.0], [0.0], [0.0]]
rows_string_tensor = tf.placeholder(dtype=tf.string,
shape=[None],
name='csv_rows')
receiver_tensor = {'csv_rows': rows_string_tensor}
row_columns = tf.expand_dims(rows_string_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=SERVING_HEADER_DEFAULTS)
columns = [tf.expand_dims(tensor, -1) for tensor in columns]
features = dict(zip(SERVING_HEADER, columns))
return tf.contrib.learn.InputFnOps(
process_features(features),
None,
receiver_tensor
)
def generate_experiment_fn(**experiment_args):
def _experiment_fn(run_config, hparams):
train_input_fn = lambda: csv_input_fn(
train_data_files,
mode = tf.contrib.learn.ModeKeys.TRAIN,
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size*10
)
eval_input_fn = lambda: csv_input_fn(
train_data_files,
mode = tf.contrib.learn.ModeKeys.EVAL,
num_epochs=1,
batch_size=hparams.batch_size
)
estimator = create_estimator(run_config, hparams)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_steps=None,
**experiment_args
)
return _experiment_fn
hparams = tf.contrib.training.HParams(
num_epochs = 1000,
batch_size = 500,
num_clusters=3
)
model_dir = 'trained_models/{}'.format(model_name)
run_config = tf.contrib.learn.RunConfig(
save_checkpoints_steps=100,
tf_random_seed=19850610,
model_dir=model_dir
)
print(run_config.model_dir)
if not resume:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
if train:
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
learn_runner.run(
experiment_fn=generate_experiment_fn(
export_strategies=[make_export_strategy(
csv_serving_input_fn,
exports_to_keep=1
)]
), # not executing export_savedmodel()
run_config=run_config,
schedule="train", #evaluate function is borken, thus not using train_and_evaluate
hparams=hparams
)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
### evaluate function is broken! as well as score function
#estimator.evaluate(input_fn=test_input_fn)
#estimator.score(input_fn=test_input_fn)
train_input_fn = lambda: csv_input_fn(
train_data_files,
num_epochs=1,
batch_size=1500
)
test_input_fn = lambda: csv_input_fn(
test_data_files,
mode=tf.estimator.ModeKeys.EVAL,
num_epochs=1,
batch_size=500
)
estimator = create_estimator(run_config, hparams)
train_assignments = list(estimator.predict_cluster_idx(input_fn=train_input_fn))
test_assignments = list(estimator.predict_cluster_idx(input_fn=test_input_fn))
import pandas as pd
import numpy as np
train_df = pd.read_csv(train_data_files[0], header=None, index_col=0)
test_df = pd.read_csv(test_data_files[0], header=None, index_col=0)
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(121, projection='3d')
ax.scatter(train_df.iloc[:,0], train_df.iloc[:,1], train_df.iloc[:,2], c=train_assignments, marker='o')
ax = fig.add_subplot(122, projection='3d')
ax.scatter(test_df.iloc[:,0], test_df.iloc[:,1], test_df.iloc[:,2], c=test_assignments, marker='o')
plt.show()
clusters = estimator.clusters()
print("Cluster Centriods:")
print("==================")
print(clusters)
export_dir = model_dir + "/export"
estimator.export_savedmodel(
export_dir_base = export_dir,
serving_input_fn = csv_serving_input_fn,
as_text=False
)
%%bash
MODEL_NAME='clust-model-02'
LAST=$(ls trained_models/${MODEL_NAME}/export | tail -1)
SAVE_MODEL_DIR=trained_models/$MODEL_NAME/export/$LAST
ls $SAVE_MODEL_DIR
gcloud ml-engine local predict --model-dir=$SAVE_MODEL_DIR --text-instances='data/new-data.csv'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: <h2> Create ML dataset by sampling using BigQuery </h2>
Step3: Lab Task #1
|
<ASSISTANT_TASK:>
Python Code:
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
# Create SQL query using natality data after the year 2000
from google.cloud import bigquery
query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
traindf.to_csv('train.csv', index=False, header=False)
evaldf.to_csv('eval.csv', index=False, header=False)
%%bash
wc -l *.csv
head *.csv
tail *.csv
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a decaying oscillation
Step2: Now, using curve_fit to fit this model and determine the estimates and uncertainties for the parameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
f = np.load('decay_osc.npz')
tdata = np.array(f['tdata'])
ydata = np.array(f['ydata'])
dy = np.array(f['dy'])
plt.figure(figsize=(8,6))
plt.errorbar(tdata, ydata, dy, fmt='.k', ecolor='lightgray')
plt.tick_params(axis='x', direction='out', top='off')
plt.tick_params(axis='y', direction='out', right='off')
plt.xlabel('t'), plt.ylabel('y'), plt.title('Oscillation Raw Data');
assert True # leave this to grade the data import and raw data plot
def model(t, A, lam, omega, delta):
return A*np.exp(-lam*t)*np.cos(omega*t) + delta
theta_best, theta_cov = opt.curve_fit(model, tdata, ydata, sigma=dy, absolute_sigma=True)
print('A = {0:.3f} +/- {1:.3f}'.format(theta_best[0], np.sqrt(theta_cov[0,0])))
print('λ = {0:.3f} +/- {1:.3f}'.format(theta_best[1], np.sqrt(theta_cov[1,1])))
print('ω = {0:.3f} +/- {1:.3f}'.format(theta_best[2], np.sqrt(theta_cov[2,2])))
print('δ = {0:.3f} +/- {1:.3f}'.format(theta_best[3], np.sqrt(theta_cov[3,3])))
tfit = np.linspace(0,20)
yfit = model(tfit, theta_best[0], theta_best[1], theta_best[2], theta_best[3])
plt.figure(figsize=(8,6))
plt.plot(tfit, yfit)
plt.plot(tdata, ydata, 'k.')
plt.tick_params(axis='x', direction='out', top='off')
plt.tick_params(axis='y', direction='out', right='off')
plt.xlabel('t'), plt.ylabel('y'), plt.title('Oscillation Curve Fitting');
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us create an LNN with 2 dimension input.
Step2: Schooling
Step3: Looks like our LNN was a good student scoring perfect F1 score of 1.0 in each task.
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
sys.path.insert(0,'..')
sys.path.insert(0,'../layeredneuralnetwork/')
from layered_neural_network import LayeredNeuralNetwork
input_dimension = 2
lnn = LayeredNeuralNetwork(input_dimension=input_dimension)
from school.binary import Binary
Binary.teach_and(lnn)
Binary.teach_or(lnn)
Binary.teach_xor(lnn)
%pylab inline
weights = lnn.get_weights()
print(weights.shape)
plt.imshow(weights, cmap='gray', interpolation='none')
plt.title('Weights of LNN')
plt.xlabel('Inputs')
plt.ylabel('Classifiers')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: Since there isn't much data, there will likely be a lot of uncertainty in the hyperparameter values.
Step3: The results show that the hyperparameters were recovered pretty well, but definitely with a high degree of uncertainty. Lets look at the predicted fits and uncertainty next using samples from the full posterior.
Step4: The sample_gp function draws realizations of the GP from the predictive distribution.
Step5: Example 2
Step6: In the plot of the observed data, the periodic component is barely distinguishable by eye. It is plausible that there isn't a periodic component, and the observed data is just the drift component and white noise.
Step7: Lets see if we can infer the correct values of the hyperparameters.
Step8: Some large samples make the histogram of s2_p hard to read. Below is a zoomed in histogram.
Step9: Comparing the histograms of the results to the true values, we can see that the PyMC3's MCMC methods did a good job estimating the true GP hyperparameters. Although the periodic component is faintly apparent in the observed data, the GP model is able to extract it with high accuracy.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
cm = cmap.inferno
import numpy as np
import scipy as sp
import theano
import theano.tensor as tt
import theano.tensor.nlinalg
import sys
sys.path.insert(0, "../../..")
import pymc3 as pm
np.random.seed(20090425)
n = 20
X = np.sort(3*np.random.rand(n))[:,None]
with pm.Model() as model:
# f(x)
l_true = 0.3
s2_f_true = 1.0
cov = s2_f_true * pm.gp.cov.ExpQuad(1, l_true)
# noise, epsilon
s2_n_true = 0.1
K_noise = s2_n_true**2 * tt.eye(n)
K = cov(X) + K_noise
# evaluate the covariance with the given hyperparameters
K = theano.function([], cov(X) + K_noise)()
# generate fake data from GP with white noise (with variance sigma2)
y = np.random.multivariate_normal(np.zeros(n), K)
fig = plt.figure(figsize=(14,5)); ax = fig.add_subplot(111)
ax.plot(X, y, 'ok', ms=10);
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
Z = np.linspace(0,3,100)[:,None]
with pm.Model() as model:
# priors on the covariance function hyperparameters
l = pm.Uniform('l', 0, 10)
# uninformative prior on the function variance
log_s2_f = pm.Uniform('log_s2_f', lower=-10, upper=5)
s2_f = pm.Deterministic('s2_f', tt.exp(log_s2_f))
# uninformative prior on the noise variance
log_s2_n = pm.Uniform('log_s2_n', lower=-10, upper=5)
s2_n = pm.Deterministic('s2_n', tt.exp(log_s2_n))
# covariance functions for the function f and the noise
f_cov = s2_f * pm.gp.cov.ExpQuad(1, l)
y_obs = pm.gp.GP('y_obs', cov_func=f_cov, sigma=s2_n, observed={'X':X, 'Y':y})
with model:
trace = pm.sample(2000)
pm.traceplot(trace[1000:], varnames=['l', 's2_f', 's2_n'],
lines={"l": l_true,
"s2_f": s2_f_true,
"s2_n": s2_n_true});
with model:
gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50, random_seed=42)
fig, ax = plt.subplots(figsize=(14,5))
[ax.plot(Z, x, color=cm(0.3), alpha=0.3) for x in gp_samples]
# overlay the observed data
ax.plot(X, y, 'ok', ms=10);
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
ax.set_title("Posterior predictive distribution");
np.random.seed(200)
n = 150
X = np.sort(40*np.random.rand(n))[:,None]
# define gp, true parameter values
with pm.Model() as model:
l_per_true = 2
cov_per = pm.gp.cov.Cosine(1, l_per_true)
l_drift_true = 4
cov_drift = pm.gp.cov.Matern52(1, l_drift_true)
s2_p_true = 0.3
s2_d_true = 1.5
s2_w_true = 0.3
periodic_cov = s2_p_true * cov_per
drift_cov = s2_d_true * cov_drift
signal_cov = periodic_cov + drift_cov
noise_cov = s2_w_true**2 * tt.eye(n)
K = theano.function([], signal_cov(X, X) + noise_cov)()
y = np.random.multivariate_normal(np.zeros(n), K)
fig = plt.figure(figsize=(12,5)); ax = fig.add_subplot(111)
ax.plot(X, y, '--', color=cm(0.4))
ax.plot(X, y, 'o', color="k", ms=10);
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
with pm.Model() as model:
# prior for periodic lengthscale, or frequency
l_per = pm.Uniform('l_per', lower=1e-5, upper=10)
# prior for the drift lengthscale hyperparameter
l_drift = pm.Uniform('l_drift', lower=1e-5, upper=10)
# uninformative prior on the periodic amplitude
log_s2_p = pm.Uniform('log_s2_p', lower=-10, upper=5)
s2_p = pm.Deterministic('s2_p', tt.exp(log_s2_p))
# uninformative prior on the drift amplitude
log_s2_d = pm.Uniform('log_s2_d', lower=-10, upper=5)
s2_d = pm.Deterministic('s2_d', tt.exp(log_s2_d))
# uninformative prior on the white noise variance
log_s2_w = pm.Uniform('log_s2_w', lower=-10, upper=5)
s2_w = pm.Deterministic('s2_w', tt.exp(log_s2_w))
# the periodic "signal" covariance
signal_cov = s2_p * pm.gp.cov.Cosine(1, l_per)
# the "noise" covariance
drift_cov = s2_d * pm.gp.cov.Matern52(1, l_drift)
y_obs = pm.gp.GP('y_obs', cov_func=signal_cov + drift_cov, sigma=s2_w, observed={'X':X, 'Y':y})
with model:
trace = pm.sample(2000, step=pm.NUTS(integrator="two-stage"), init=None)
pm.traceplot(trace[1000:], varnames=['l_per', 'l_drift', 's2_d', 's2_p', 's2_w'],
lines={"l_per": l_per_true,
"l_drift": l_drift_true,
"s2_d": s2_d_true,
"s2_p": s2_p_true,
"s2_w": s2_w_true});
ax.get_ybound()
fig = plt.figure(figsize=(12,6)); ax = fig.add_subplot(111)
ax.hist(trace['s2_p', 1000:], 100, range=(0,4), color=cm(0.3), ec='none');
ax.plot([0.3, 0.3], [0, ax.get_ybound()[1]], "k", lw=2);
ax.set_title("Histogram of s2_p");
ax.set_ylabel("Number of samples");
ax.set_xlabel("s2_p");
Z = np.linspace(0, 40, 100).reshape(-1, 1)
with model:
gp_samples = pm.gp.sample_gp(trace[1000:], y_obs, Z, samples=50, random_seed=42, progressbar=False)
fig, ax = plt.subplots(figsize=(14,5))
[ax.plot(Z, x, color=cm(0.3), alpha=0.3) for x in gp_samples]
# overlay the observed data
ax.plot(X, y, 'o', color="k", ms=10);
ax.set_xlabel("x");
ax.set_ylabel("f(x)");
ax.set_title("Posterior predictive distribution");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Durchführung
Step2: Verwendete Messgeräte
Step3: Auswertung
Step4: Resonanzmethode
Step5: Gasgemische
Step6: Fehlerrechnung
Step7: Gasgemische
Step8: Anhang
Step9: Resonanzmethode
|
<ASSISTANT_TASK:>
Python Code:
# Preparations
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
from scipy import stats
from scipy.optimize import curve_fit
import seaborn as sns
from IPython.display import Latex
import warnings
from PrettyTable import PrettyTable
from functools import partial
from PrettyFigure import PrettyFigure
warnings.filterwarnings("ignore", module="matplotlib")
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
plt.rcParams['savefig.dpi'] = 75
# plt.rcParams['figure.autolayout'] = False
# plt.rcParams['figure.figsize'] = 10, 6
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.unicode'] = True
plt.rcParams['font.family'] = "STIX"
plt.rcParams['text.latex.preamble'] = "\\usepackage{subdepth}, \\usepackage{type1cm}"
results = {}
sns.set(color_codes=True)
def average(data):
return 1 / len(data) * sum(data)
def error(data, average_of_data):
s = sum([(x - average_of_data)**2 for x in data])
return math.sqrt(s / (len(data) * (len(data) - 1)))
def std_deviation(error_of_average, length_of_dataset):
return error_of_average * math.sqrt(length_of_dataset)
def average_with_weights(data, weights):
d = data
w = weights
return (d * w**-2).sum() / (w**-2).sum()
def error_with_weights(weights):
w = weights
return 1 / math.sqrt((w**-2).sum())
def wavg(group, avg_name, weight_name):
d = group[avg_name]
w = group[weight_name]
return (d * w**-2).sum() / (w**-2).sum()
def werr(group, weight_name):
return 1 / math.sqrt((group[weight_name]**-2).sum())
# Constants
name = ['Luft', 'Helium', 'SF6']
mm = [28.95, 4.00, 146.06]
ri = [287, 2078, 56.92]
cp = [1.01, 5.23, 0.665]
cv = [0.72, 3.21, 0.657]
k = [1.63, 1.40, 1.012]
c0 = [971, 344, 129]
constants_tbl = PrettyTable(
list(zip(name, mm, ri, cp, cv, k, c0)),
label='tab:gase',
caption='Kennwerte und Konstanten der verwendeten Gase.',
extra_header=[
'Gas',
r'$M_m[\frac{g}{mol}]$',
r'$R_i[\frac{J}{kg K}]$',
r'$c_p[\frac{kJ}{kg K}]$',
r'$c_v[\frac{kJ}{kg K}]$',
r'$K$',
r'$c_0[\frac{m}{s}]$'
], entries_per_column=3)
constants_tbl.show()
# Utilities
name = ['Oszilloskop', 'Zeitmesser', 'Funktionsgenerator', 'Verstärker', 'Vakuumpumpe', 'Netzgerät', 'Temperaturmessgerät']
manufacturer = ['LeCroy', 'Keithley', 'HP', 'WicTronic', 'Pfeiffer', ' ', ' ']
device = ['9631 Dual 300MHz Oscilloscope 2.5 GS/s', '775 Programmable Counter/Timer', '33120A 15MHz Waveform Generator', 'Zweikanalverstärker', 'Vacuum', ' ', ' ']
utilities_tbl = PrettyTable(
list(zip(name, manufacturer, device)),
label='tab:utilities',
caption='Verwendete Gerätschaften',
extra_header=[
'Funktion',
'Hersteller',
'Gerätename',
], entries_per_column=7)
utilities_tbl.show()
# Laufzeitenmethode Luft, Helium, SF6
import collections
# Read Data
dfb = pd.read_csv('data/laufzeitmethode.csv')
ax = None
i = 0
for gas1 in ['luft', 'helium', 'sf6']:
df = dfb.loc[dfb['gas1'] == gas1].loc[dfb['gas2'] == gas1].loc[dfb['p'] == 1]
slope, intercept, sem, r, p = stats.linregress(df['t'], df['s'])
n = np.linspace(0.0, df['t'][9 + i * 10] * 1.2, 100)
results[gas1] = {
gas1: {
}
}
results[gas1][gas1]['1_l_df'] = df
results[gas1][gas1]['1_l_slope'] = slope
results[gas1][gas1]['1_l_intercept'] = intercept
results[gas1][gas1]['1_l_sem'] = sem
ax = df.plot(kind='scatter', x='t', y='s', label='gemessene Laufzeit')
plt.plot(n, [i * slope + intercept for i in n], label='linearer Fit der Laufzeit', axes=ax)
plt.xlabel('Laufzeit [s]')
plt.ylabel('Strecke [m]')
plt.xlim([0, df['t'][9 + i * 10] * 1.1])
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
i += 1
plt.close()
figure = PrettyFigure(
ax.figure,
label='fig:laufzeiten_{}'.format(gas1),
caption='Laufzeiten in {}. Dazu einen linearen Fit um die Mittlere Geschwindigkeit zu bestimmen.'.format(gas1.title()))
figure.show()
# Resonanzmethode Luft, Helium, SF6
import collections
# Read Data
dfb2 = pd.read_csv('data/resonanzfrequenz.csv')
ax = None
i = 0
for gas1 in ['luft', 'helium', 'sf6']:
df = dfb2.loc[dfb2['gas1'] == gas1].loc[dfb2['gas2'] == gas1].loc[dfb2['p'] == 1]
df['lbd'] = 1 / (df['s'] * 2)
df['v'] = 2 * df['f'] * df['s']
slope, intercept, sem, r, p = stats.linregress(df['lbd'], df['f'])
n = np.linspace(0.0, df['lbd'][(5 + i * 6) if i < 2 else 15] * 1.2, 100)
results[gas1][gas1]['1_r_df'] = df
results[gas1][gas1]['1_r_slope'] = slope
results[gas1][gas1]['1_r_intercept'] = intercept
results[gas1][gas1]['1_r_sem'] = sem
ax = df.plot(kind='scatter', x='lbd', y='f', label='gemessenes $\lambda^{-1}$')
plt.plot(n, [i * slope + intercept for i in n], label='linearer Fit von $\lambda^{-1}$', axes=ax)
plt.xlabel(r'$1 / \lambda [m^{-1}]$')
plt.ylabel(r'$Frequenz [s^{-1}]$')
plt.xlim([0, df['lbd'][(5 + i * 6) if i < 2 else 15] * 1.1])
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
i += 1
plt.close()
figure = PrettyFigure(
ax.figure,
label='fig:laufzeiten_{}'.format(gas1),
caption='Abstände der Maxima bei resonanten Frequenzen in {}. Dazu einen linearen Fit um die Mittlere Geschwindigkeit zu bestimmen.'.format(gas1.title()))
figure.show()
# Laufzeitenmethode Helium-SF6-Gemisch
import collections
# Read Data
dfb = pd.read_csv('data/laufzeitmethode.csv')
ax = None
colors = ['blue', 'green', 'red', 'purple']
results['helium']['sf6'] = {}
v_exp = []
for i in range(1, 5):
i /= 5
df = dfb.loc[dfb['gas1'] == 'helium'].loc[dfb['gas2'] == 'sf6'].loc[dfb['p'] == i]
slope, intercept, sem, r, p = stats.linregress(df['t'], df['s'])
v_exp.append(slope)
n = np.linspace(0.0, df['t'][29 + i * 15] * 2, 100)
results['helium']['sf6']['0{}_l_df'.format(int(i * 10))] = df
results['helium']['sf6']['0{}_l_slope'.format(int(i * 10))] = slope
results['helium']['sf6']['0{}_l_intercept'.format(int(i * 10))] = intercept
results['helium']['sf6']['0{}_l_sem'.format(int(i * 10))] = sem
if i == 0.2:
ax = df.plot(kind='scatter', x='t', y='s', label='gemessene Laufzeit', color=colors[int(i * 5) - 1])
else:
plt.scatter(df['t'], df['s'], axes=ax, label=None, color=colors[int(i * 5) - 1])
plt.plot(n, [i * slope + intercept for i in n], label='Laufzeit ({:.1f}\% Helium, {:.1f}\% SF6)'.format(i, 1 - i), axes=ax, color=colors[int(i * 5) - 1])
plt.xlabel('Laufzeit [s]')
plt.ylabel('Strecke [m]')
plt.legend(bbox_to_anchor=(0.02, 0.98), loc=2, borderaxespad=0.2)
i += 0.2
plt.xlim([0, 0.006])
plt.close()
figure = PrettyFigure(
ax.figure,
label='fig:laufzeiten_HESF6',
caption='Laufzeiten in verschiedenen Helium/SF6-Gemischen. Dazu lineare Regression um die Mittlere Geschwindigkeit zu bestimmen.')
figure.show()
# Literature & Calcs
T = 21.3 + 273.15
Ri = 287
K = 1.402
results['luft']['luft']['berechnet'] = math.sqrt(K * Ri * T)
results['luft']['luft']['literatur'] = 343
Ri = 2078
K = 1.63
results['helium']['helium']['berechnet'] = math.sqrt(K * Ri * T)
results['helium']['helium']['literatur'] = 971
Ri = 56.92
K = 1.012
results['sf6']['sf6']['berechnet'] = math.sqrt(K * Ri * T)
results['sf6']['sf6']['literatur'] = 129
cp1 = cp[1]
cp2 = cp[2]
cv1 = cv[1]
cv2 = cv[2]
RL1 = ri[1]
RL2 = ri[2]
m1 = 0.2
m2 = 0.8
s1 = (m1 * cp1) + (m2 * cp2)
s2 = (m1 + cv1) + (m2 * cv2)
s3 = (m1 + RL1) + (m2 * RL2)
results['helium']['sf6']['02_l_berechnet'] = math.sqrt(s1 / s2 * s3 * T)
m1 = 0.4
m2 = 0.6
s1 = (m1 * cp1) + (m2 * cp2)
s2 = (m1 + cv1) + (m2 * cv2)
s3 = (m1 + RL1) + (m2 * RL2)
results['helium']['sf6']['04_l_berechnet'] = math.sqrt(s1 / s2 * s3 * T)
m1 = 0.6
m2 = 0.4
s1 = (m1 * cp1) + (m2 * cp2)
s2 = (m1 + cv1) + (m2 * cv2)
s3 = (m1 + RL1) + (m2 * RL2)
results['helium']['sf6']['06_l_berechnet'] = math.sqrt(s1 / s2 * s3 * T)
m1 = 0.8
m2 = 0.2
s1 = (m1 * cp1) + (m2 * cp2)
s2 = (m1 + cv1) + (m2 * cv2)
s3 = (m1 + RL1) + (m2 * RL2)
results['helium']['sf6']['08_l_berechnet'] = math.sqrt(s1 / s2 * s3 * T)
p = [p for p in np.linspace(0, 1, 1000)]
v = [math.sqrt(((n * cp1) + ((1 - n) * cp2)) / ((n + cv1) + ((1 - n) * cv2)) * ((n + RL1) + ((1 - n) * RL2)) * T) for n in p]
fig = plt.figure()
plt.plot(p, v, label='errechnete Laufzeit')
plt.scatter([0.2, 0.4, 0.6, 0.8], v_exp, label='experimentelle Laufzeit')
plt.xlabel('Heliumanteil')
plt.ylabel('Schallgeschwindigkeit [v]')
plt.xlim([0, 1])
plt.close()
figure = PrettyFigure(
fig,
label='fig:laufzeiten_vgl',
caption='Laufzeiten in Helium/SF6-Gemischen. Experimentelle Werte verglichen mit den berechneten.')
figure.show()
# Show results
values = [
'Luft',
'Helium',
'SF6'
]
means_l = [
'{0:.2f}'.format(results['luft']['luft']['1_l_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['1_l_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['1_l_slope']) + r'$\frac{m}{s}$'
]
means_r = [
'{0:.2f}'.format(results['luft']['luft']['1_r_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['1_r_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['1_r_slope']) + r'$\frac{m}{s}$'
]
sem_l = [
'{0:.2f}'.format(results['luft']['luft']['1_l_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['1_l_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['1_l_sem']) + r'$\frac{m}{s}$'
]
sem_r = [
'{0:.2f}'.format(results['luft']['luft']['1_r_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['1_r_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['1_r_sem']) + r'$\frac{m}{s}$'
]
berechnet = [
'{0:.2f}'.format(results['luft']['luft']['berechnet']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['berechnet']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['berechnet']) + r'$\frac{m}{s}$'
]
literatur = [
'{0:.2f}'.format(results['luft']['luft']['literatur']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['helium']['literatur']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['sf6']['sf6']['literatur']) + r'$\frac{m}{s}$'
]
v2_results_tbl = PrettyTable(
list(zip(values, means_l, sem_l, means_r, sem_r, berechnet, literatur)),
label='tab:resultat_rein',
caption='Resultate aus den Versuchen mit reinen Gasen.',
extra_header=[
'Wert',
'Laufzeitmethode $v_{L}$',
'stat. Fehler',
'Resonanzmethode $v_{R}$',
'stat. Fehler',
'berechnet',
'Literatur'
], entries_per_column=3)
v2_results_tbl.show()
# Show results
values = [
'20% / 80%',
'40% / 60%',
'60% / 40%',
'80% / 20%'
]
means_x = [
'{0:.2f}'.format(results['helium']['sf6']['02_l_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['04_l_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['06_l_slope']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['08_l_slope']) + r'$\frac{m}{s}$'
]
sem_x = [
'{0:.2f}'.format(results['helium']['sf6']['02_l_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['04_l_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['06_l_sem']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['08_l_sem']) + r'$\frac{m}{s}$'
]
berechnet_x = [
'{0:.2f}'.format(results['helium']['sf6']['02_l_berechnet']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['04_l_berechnet']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['06_l_berechnet']) + r'$\frac{m}{s}$',
'{0:.2f}'.format(results['helium']['sf6']['08_l_berechnet']) + r'$\frac{m}{s}$'
]
v2_results_tbl = PrettyTable(
list(zip(values, means_x, sem_x, berechnet_x)),
label='tab:resultat_gasgemisch',
caption='Resultate aus dem Versuch mit den Gasgemischen.',
extra_header=[
'Helium / SF6',
'mit Laufzeitmethode $v_{L}$',
'statistischer Fehler',
'berechnet',
], entries_per_column=4)
v2_results_tbl.show()
data = PrettyTable(
list(zip(dfb['gas1'], dfb['gas2'], dfb['p'], dfb['s'], dfb['t'])),
caption='Messwerte der Laufzeitmethode.',
entries_per_column=len(dfb['gas1']),
extra_header=['Gas 1', 'Gas 2', 'Anteil Gas 1', 'Strecke [m]', 'Laufzeit [s]']
)
data.show()
data = PrettyTable(
list(zip(dfb2['gas1'], dfb2['gas2'], dfb2['p'], dfb2['f'], dfb2['s'])),
caption='Messwerte der Resonanzmethode.',
entries_per_column=len(dfb2['gas1']),
extra_header=['Gas 1', 'Gas 2', 'Anteil Gas 1', 'Frequenz [Hz]', 'Strecke [m]']
)
data.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 模型平均
Step2: 构建模型
Step3: 准备数据集
Step4: 我们在这里比较三个优化器:
Step5: MovingAverage 和 StocasticAverage 优化器均使用 ModelAverageCheckpoint。
Step6: 训练模型
Step7: 移动平均 SGD
Step8: 随机加权平均 SGD
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -U tensorflow-addons
import tensorflow as tf
import tensorflow_addons as tfa
import numpy as np
import os
def create_model(opt):
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=opt,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
#Load Fashion MNIST dataset
train, test = tf.keras.datasets.fashion_mnist.load_data()
images, labels = train
images = images/255.0
labels = labels.astype(np.int32)
fmnist_train_ds = tf.data.Dataset.from_tensor_slices((images, labels))
fmnist_train_ds = fmnist_train_ds.shuffle(5000).batch(32)
test_images, test_labels = test
#Optimizers
sgd = tf.keras.optimizers.SGD(0.01)
moving_avg_sgd = tfa.optimizers.MovingAverage(sgd)
stocastic_avg_sgd = tfa.optimizers.SWA(sgd)
#Callback
checkpoint_path = "./training/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir,
save_weights_only=True,
verbose=1)
avg_callback = tfa.callbacks.AverageModelCheckpoint(filepath=checkpoint_dir,
update_weights=True)
#Build Model
model = create_model(sgd)
#Train the network
model.fit(fmnist_train_ds, epochs=5, callbacks=[cp_callback])
#Evalute results
model.load_weights(checkpoint_dir)
loss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)
print("Loss :", loss)
print("Accuracy :", accuracy)
#Build Model
model = create_model(moving_avg_sgd)
#Train the network
model.fit(fmnist_train_ds, epochs=5, callbacks=[avg_callback])
#Evalute results
model.load_weights(checkpoint_dir)
loss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)
print("Loss :", loss)
print("Accuracy :", accuracy)
#Build Model
model = create_model(stocastic_avg_sgd)
#Train the network
model.fit(fmnist_train_ds, epochs=5, callbacks=[avg_callback])
#Evalute results
model.load_weights(checkpoint_dir)
loss, accuracy = model.evaluate(test_images, test_labels, batch_size=32, verbose=2)
print("Loss :", loss)
print("Accuracy :", accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lmec', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 训练后 float16 量化
Step2: 训练并导出模型
Step3: 在此示例中,您只对模型进行了一个周期的训练,因此只训练到约 96% 的准确率。
Step4: 将其写入 .tflite 文件:
Step5: 要改为在导出时将模型量化为 float16,首先将 optimizations 标记设置为使用默认优化。然后将 float16 指定为目标平台支持的类型:
Step6: 最后,像往常一样转换模型。请注意,为了方便调用,转换后的模型默认仍将使用浮点输入和输出。
Step7: 请注意,生成文件的大小约为 1/2。
Step8: 运行 TensorFlow Lite 模型
Step9: 在单个图像上测试模型
Step10: 评估模型
Step11: 在 float16 量化模型上重复评估,以获得如下结果:
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pathlib
tf.float16
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_fp16_model = converter.convert()
tflite_model_fp16_file = tflite_models_dir/"mnist_model_quant_f16.tflite"
tflite_model_fp16_file.write_bytes(tflite_fp16_model)
!ls -lh {tflite_models_dir}
interpreter = tf.lite.Interpreter(model_path=str(tflite_model_file))
interpreter.allocate_tensors()
interpreter_fp16 = tf.lite.Interpreter(model_path=str(tflite_model_fp16_file))
interpreter_fp16.allocate_tensors()
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
predictions = interpreter.get_tensor(output_index)
import matplotlib.pylab as plt
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
test_image = np.expand_dims(test_images[0], axis=0).astype(np.float32)
input_index = interpreter_fp16.get_input_details()[0]["index"]
output_index = interpreter_fp16.get_output_details()[0]["index"]
interpreter_fp16.set_tensor(input_index, test_image)
interpreter_fp16.invoke()
predictions = interpreter_fp16.get_tensor(output_index)
plt.imshow(test_images[0])
template = "True:{true}, predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[0]),
predict=str(np.argmax(predictions[0]))))
plt.grid(False)
# A helper function to evaluate the TF Lite model using "test" dataset.
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for test_image in test_images:
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
# Compare prediction results with ground truth labels to calculate accuracy.
accurate_count = 0
for index in range(len(prediction_digits)):
if prediction_digits[index] == test_labels[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction_digits)
return accuracy
print(evaluate_model(interpreter))
# NOTE: Colab runs on server CPUs. At the time of writing this, TensorFlow Lite
# doesn't have super optimized server CPU kernels. For this reason this may be
# slower than the above float interpreter. But for mobile CPUs, considerable
# speedup can be observed.
print(evaluate_model(interpreter_fp16))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Einführung in<br/> Software Analytics
Step2: "100" == max. Beliebtheit!
Step3: Wir sehen uns Basisinfos über den Datensatz an.
Step4: <b>1</b> DataFrame (~ programmierbares Excel-Arbeitsblatt), <b>6</b> Series (= Spalten), <b>1128819</b> entries (= Reihen)
Step5: Wir sehen uns nur die jüngsten Änderungen an.
Step6: Wir wollen nur Java-Code verwenden.
Step7: III. Formale Modellierung
Step8: Wir holen Infos über die Code-Zeilen hinzu...
Step9: ...und verschneiden diese mit den vorhandenen Daten.
Step10: VI. Interpretation
Step11: V. Kommunikation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
pd.read_csv("../dataset/google_trends_datascience.csv", index_col=0).plot();
log = pd.read_csv("../dataset/git_log_intellij.csv.gz")
log.head()
log.info()
log['timestamp'] = pd.to_datetime(log['timestamp'])
log.head()
# use log['timestamp'].max() instead of pd.Timedelta('today') to avoid outdated data in the future
recent = log[log['timestamp'] > log['timestamp'].max() - pd.Timedelta('90 days')]
recent.head()
java = recent[recent['filename'].str.endswith(".java")].copy()
java.head()
changes = java.groupby('filename')[['sha']].count()
changes.head()
loc = pd.read_csv("../dataset/cloc_intellij.csv.gz", index_col=1)
loc.head()
hotspots = changes.join(loc[['code']]).dropna()
hotspots.head()
top10 = hotspots.sort_values(by="sha", ascending=False).head(10)
top10
ax = top10.plot.scatter('sha', 'code');
for k, v in top10.iterrows():
ax.annotate(k.split("/")[-1], v)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
vocab_to_int = {word:integer for integer,word in enumerate(set(text))}
int_to_vocab = {integer:word for integer,word in enumerate(set(text))}
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
dictionary = {
'.' : '||Period||',
',' : '||Comma||',
'"' : '||Quotation_Mark||',
';' : '||Semicolon||',
'!' : '||Exclamation_Mark||',
'?' : '||Question_Mark||',
'(' : '||Left_Parentheses||',
')' : '||Right_Parentheses||',
'--': '||Dash||',
'\n': '||Return||',
}
#print(dictionary)
return dictionary
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input')
targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name='targets')
learningRate = tf.placeholder(dtype=tf.float32, shape=None, name='learning_rate')
return inputs, targets, learningRate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size)
cell = tf.contrib.rnn.MultiRNNCell([cell])
initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name='initial_state')
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
n_batch = len(int_text) // (batch_size * seq_length)
int_text_x = np.array(int_text[:batch_size * seq_length * n_batch])
int_text_y = np.roll(int_text_x, -1)
x_batches = np.split(int_text_x.reshape(batch_size, -1), n_batch, 1)
y_batches = np.split(int_text_y.reshape(batch_size, -1), n_batch, 1)
return np.array(list(zip(x_batches, y_batches)))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
# Number of Epochs
num_epochs = 200
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Embedding Dimension Size
embed_dim = 300
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 13
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size, embed_dim)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
input_tensor=loaded_graph.get_tensor_by_name("input:0")
initial_state_tensor=loaded_graph.get_tensor_by_name("initial_state:0")
final_state_tensor=loaded_graph.get_tensor_by_name("final_state:0")
probs_tensor=loaded_graph.get_tensor_by_name("probs:0")
return input_tensor, initial_state_tensor, final_state_tensor, probs_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
return int_to_vocab[np.argmax(probabilities)]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to TensorFlow Part 2 - Debugging and Control Flow
Step2: What this notebook covers
Step3: tf.print
Step4: If you're using eager execution mode, that's all you need to know. For deferred execution however there are some significant complications that we'll discuss in the next section.
Step5: Then we don't get any output. Nothing depends on print_trace (in fact nothing can depend on it
Step6: That's fine for our noddy sample above. But obviously has problems as your graph grows larger or the sess.run method gets further removed from the graph definition. The solution for that is tf.control_dependencies. This signals to tensorflow that the given set of prerequisite ops must be evaluated before a set of dependent ops.
Step7: Note that if all of the dependent ops are pruned from the dependency tree and thus not evaluated, then the prerequisites will not be evaluated either
Step8: tf.debugging.Assert
Step9: There are also a bunch of helper methods, such as
Step10: Control Flow
Step11: The exact order of execution is a little complicated
Step12: tf.while_loop
Step13: but our second-level approximation will do for now).
Step16: Exercise
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title Upgrade to TensorFlow 2.5+
!pip install --upgrade tensorflow
#@title Install and import Libraries for this colab. RUN ME FIRST!
!pip install matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.summary.writer.writer import FileWriter
%load_ext tensorboard
def plus_one(x):
print("input has type %s, value %s"%(type(x), x))
output = x + 1.0
print("output has type %s, value %s"%(type(output), output))
return output
# Let us create a graph where `plus_one` is invoked during the graph contruction
g = tf.Graph()
with g.as_default():
x = tf.constant([1.0,2.0,3.0])
# Notice that print statemets are not called during the graph construction
a = tf.py_function(plus_one, inp = [x], Tout=tf.float32)
with tf.compat.v1.Session(graph=g) as sess:
# During the runtime, input `x` is passed as an EagerTensor to `plus_one`
print(sess.run(a))
# Define a TensorFlow function
@tf.function
def print_fn(x):
# Note that `print_trace` is a TensorFlow Op. See the next section for details
print_trace = tf.print(
"`input` has value", x, ", type", type(x), "and shape", tf.shape(x))
# Create some inputs
a = tf.constant([1, 2])
# Call the function
print_fn(a)
g = tf.Graph()
with g.as_default():
a = tf.constant(1) + tf.constant(1)
print_trace = tf.print("a is set to ", a)
b = a * 2
with tf.compat.v1.Session(graph=g) as sess:
results = sess.run(b)
g = tf.Graph()
with g.as_default():
a = tf.constant(1) + tf.constant(1)
print_trace = tf.compat.v1.print("a is set to", a)
b = a * 2
with tf.compat.v1.Session(graph=g) as sess:
results = sess.run((b, print_trace))
g = tf.Graph()
with g.as_default():
a = tf.constant(1) + tf.constant(1)
print_trace = tf.print("a is set to", a)
hello_world = tf.print("hello world")
with tf.control_dependencies((print_trace, hello_world)):
# print_trace and hello_world will always be evaluated
# before b can be evaluated
b = a * 2
c = a * 3
with tf.compat.v1.Session(graph=g) as sess:
results = sess.run(b)
# Nothing gets printed
with tf.compat.v1.Session(graph=g) as sess:
results = sess.run(c)
g = tf.Graph()
with g.as_default():
x = tf.compat.v1.placeholder(tf.float32, shape=[])
with tf.control_dependencies([
tf.debugging.Assert(tf.not_equal(x, 0), ["Invalid value for x:",x])]):
y = 2.0 / x
with tf.compat.v1.Session(graph=g) as sess:
try:
results = sess.run(y, feed_dict={x: 0.0})
except tf.errors.InvalidArgumentError as e:
print('Value of x is zero\nError message:')
print(e.message)
g = tf.Graph()
with g.as_default():
x = tf.compat.v1.placeholder(tf.float32, shape=[])
with tf.control_dependencies([tf.debugging.assert_none_equal(x, 0.0)]):
y = 2.0 / x
with tf.compat.v1.Session(graph=g) as sess:
try:
results = sess.run(y, feed_dict={x: 0.0})
except tf.errors.InvalidArgumentError as e:
print('Value of x is zero\nError message:')
print(e.message)
# This won't work
try:
tf.cond(tf.constant(True), tf.constant(1), tf.constant(2))
except TypeError as e:
pass
# You need a callable:
tf.cond(tf.constant(True), lambda: tf.constant(1), lambda: tf.constant(2))
def dependency_fn():
print ("DEPENDENCY: I'm always evaluated at execution time because I'm a dependency\n")
return tf.constant(2)
dependency = tf.py_function(dependency_fn, inp=[], Tout=tf.int32)
def true_op_fn():
print ("TRUE_OP_FN: I'm evaluated at execution time because condition is True\n")
return 1
def true_fn():
print ("TRUE_FN: I'm evaluated at graph building time")
return tf.py_function(true_op_fn, inp=[], Tout=tf.int32)
def false_op_fn(input):
print ("FALSE_OP_FN: I'm never evaluated because condition isn't False\n")
return 1 + input
def false_fn():
print ("FALSE_FN: I'm evaluated at graph building time")
return tf.py_function(false_op_fn, inp=[dependency], Tout=tf.int32)
def predicate_fn():
print("\n****** Executing the graph")
print("PREDICATE: I'm evaluated at execution time\n")
return tf.constant(True)
@tf.function
def test_fn():
print("****** Building graph")
tf.cond(tf.py_function(predicate_fn, inp=[], Tout=tf.bool),
true_fn, false_fn)
test_fn()
g = tf.Graph()
with g.as_default():
index = tf.constant(1)
accumulator = tf.constant(0)
loop = tf.while_loop(
loop_vars=[index, accumulator],
cond = lambda idx, acc: idx < 4,
body = lambda idx, acc: [idx+1, acc + idx] )
with tf.compat.v1.Session() as sess:
with FileWriter("logs", sess.graph):
results = sess.run(loop)
# Graph visualization
%tensorboard --logdir logs
# First let us explicitly disable Autograph
@tf.function(autograph=False)
def loop_fn(index, max_iterations):
for index in range(max_iterations):
index += 1
if index == 4:
tf.print('index is equal to 4')
return index
# Create some inputs
index = tf.constant(0)
max_iterations = tf.constant(5)
# Try calling the loop
try:
loop_fn(index, max_iterations)
except TypeError as e:
print(e)
# Autograph is enabled by default
@tf.function
def loop_fn(index, max_iterations):
for index in range(max_iterations):
index += 1
if index == 4:
tf.print('index is equal to 4')
return index
# Note that Autograph sucessfully converted Python code to TF graph
print(loop_fn(index, max_iterations))
MAX_ITERATIONS = 64
NUM_PIXELS = 512
def GenerateGrid(nX, nY, bottom_left=(-1.0, -1.0), top_right=(1.0, 1.0)):
Generates a complex matrix of shape [nX, nY].
Generates an evenly spaced grid of complex numbers spanning the rectangle
between the supplied diagonal points.
Args:
nX: A positive integer. The number of points in the horizontal direction.
nY: A positive integer. The number of points in the vertical direction.
bottom_left: The coordinates of the bottom left corner of the rectangle to
cover.
top_right: The coordinates of the top right corner of the rectangle to
cover.
Returns:
A constant tensor of type complex64 and shape [nX, nY].
x = tf.linspace(bottom_left[0], top_right[0], nX)
y = tf.linspace(bottom_left[1], top_right[1], nY)
real, imag = tf.meshgrid(x, y)
return tf.cast(tf.complex(real, imag), tf.complex128)
c_values = GenerateGrid(NUM_PIXELS, NUM_PIXELS)
initial_Z_values = tf.zeros_like(c_values, dtype=tf.complex128)
initial_diverged_after = tf.ones_like(c_values, dtype=tf.int32) * MAX_ITERATIONS
# You need to put the various values you want to change inside the loop here
loop_vars = ()
# this needs to take the same number of arguments as loop_vars contains and
# return a tuple of equal size with the next iteration's values
def body():
# hint: tf.abs will give the magnitude of a complex value
return ()
# this just needs to take the same number of arguments as loop_vars contains and
# return true (we'll use maximum_iterations to exit the loop)
def cond():
return True
results = tf.while_loop(
loop_vars=loop_vars,
body = body,
cond = cond,
maximum_iterations=MAX_ITERATIONS)
## extract the final value of diverged_after from the tuple
final_diverged_after = results[-1]
plt.matshow(final_diverged_after)
pass
#@title Solution: Mandlebrot set (Double-click to reveal)
MAX_ITERATIONS = 64
NUM_PIXELS = 512
def GenerateGrid(nX, nY, bottom_left=(-1.0, -1.0), top_right=(1.0, 1.0)):
Generates a complex matrix of shape [nX, nY].
Generates an evenly spaced grid of complex numbers spanning the rectangle
between the supplied diagonal points.
Args:
nX: A positive integer. The number of points in the horizontal direction.
nY: A positive integer. The number of points in the vertical direction.
bottom_left: The coordinates of the bottom left corner of the rectangle to
cover.
top_right: The coordinates of the top right corner of the rectangle to
cover.
Returns:
A constant tensor of type complex64 and shape [nX, nY].
x = tf.linspace(bottom_left[0], top_right[0], nX)
y = tf.linspace(bottom_left[1], top_right[1], nY)
real, imag = tf.meshgrid(x, y)
return tf.cast(tf.complex(real, imag), tf.complex128)
c_values = GenerateGrid(NUM_PIXELS, NUM_PIXELS)
initial_Z_values = tf.zeros_like(c_values, dtype=tf.complex128)
initial_diverged_after = tf.ones_like(c_values, dtype=tf.int32) * MAX_ITERATIONS
# You need to put the various values you want to change inside the loop here
loop_vars = (0, initial_Z_values, initial_diverged_after)
# this needs to take the same number of arguments as loop_vars contains and
# return a tuple of equal size with the next iteration's values
def body(iteration_count, Z_values, diverged_after):
new_Z_values = Z_values * Z_values + c_values
has_diverged = tf.abs(new_Z_values) > 2.0
new_diverged_after = tf.minimum(diverged_after, tf.where(
has_diverged, iteration_count, MAX_ITERATIONS))
return (iteration_count+1, new_Z_values, new_diverged_after)
# this just needs to take the same number of arguments as loop_vars contains and
# return true (we'll use maximum_iterations to exit the loop)
def cond(iteration_count, Z_values, diverged_after):
return True
results = tf.while_loop(
loop_vars=loop_vars,
body = body,
cond = cond,
maximum_iterations=MAX_ITERATIONS)
## extract the final value of diverged_after from the tuple
final_diverged_after = results[-1]
plt.matshow(final_diverged_after)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Tutorial
Step13: Quick peek at your data
Step14: Create the Dataset
Step15: Create and run training pipeline
Step16: Run the training pipeline
Step17: Review model evaluation scores
Step18: Send a batch prediction request
Step19: Make the batch prediction request
Step20: Wait for completion of batch prediction job
Step21: Get the predictions
Step22: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
)
print(dag)
model = dag.run(
dataset=dataset,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
disable_early_stopping=False,
target_column=label_column,
)
# Get model resource ID
models = aip.Model.list(filter="display_name=bank_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
! gsutil cat $IMPORT_FILE | head -n 1 > tmp.csv
! gsutil cat $IMPORT_FILE | tail -n 10 >> tmp.csv
! cut -d, -f1-16 tmp.csv > batch.csv
gcs_input_uri = BUCKET_NAME + "/test.csv"
! gsutil cp batch.csv $gcs_input_uri
batch_predict_job = model.batch_predict(
job_display_name="bank_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="csv",
sync=False,
)
print(batch_predict_job)
batch_predict_job.wait()
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vérifier ci-dessous que la plupart des variables ont deux versions, l'une quantitative et l'autre qualitative. La version en R de ce calepin compare deux stratégies
Step2: Q Combien d'individus et combien de variables sont finalement concernés?
Step3: Extraction des échantillons apprentissage et test
Step4: Régression logistique
Step5: Erreur de prévision
Step6: Optimisation ridge
Step7: Q Noter l'erreur de prévision; Comparer avec celle estimée par validation croisée.
Step8: Q Quelles sont les variables importantes? Comment interpréter?
Step9: Analyse discriminante
Step10: Dicriminante linéaire
Step11: Q Que dire de la qualité? Des possibilités d'interprétation?
Step12: K plus proches voisins
Step13: Courbes ROC
Step14: Arbres binaires de décision
Step15: Q Quel est le critère d'homogénéité des noeuds utilisé par défaut?
Step16: Courbes ROC
Step17: Commenter les résultats.
Step18: Q Exécuter plusieurs fois la cellule ci-dessus. Que penser de la stabilité de l'estimation de l'erreur et donc de sa fiabilité?
Step19: Gradient boosting
Step20: Courbes ROC
Step21: Q Quelles meilleure méthode interprétable? Quelle meilleure méthode?
Step22: Q Finalement, quelle meilleure méthode? Quelle meilleure méthode interprétable?
|
<ASSISTANT_TASK:>
Python Code:
# Importation des librairies.
import numpy as np
import pandas as pd
import random as rd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
# Lecture d'un data frame
vispremv = pd.read_table('vispremv.dat', delimiter=' ')
vispremv.shape
vispremv.head()
# Variables quantitatives
vispremv.describe()
vispremv.dtypes
# Transformation en indicatrices
vispremDum=pd.get_dummies(vispremv[["SEXEQ","FAMIQ","PCSPQ"]])
# Une seule est conservée pour les variables binaires
vispremDum.drop(["SEXEQ_Sfem","FAMIQ_Fseu"], axis = 1, inplace = True)
# Sélection des variables numériques
vispremNum = vispremv.select_dtypes(exclude=['object'])
# Concaténation des variables retenues
vispremR=pd.concat([vispremDum,vispremNum],axis=1)
vispremR.columns
vispremR.shape
# La variable à expliquer est recodée
y=vispremv["CARVP"].map(lambda x: 0 if x=="Cnon" else 1)
rd_seed=111 # Modifier cette valeur d'initialisation
npop=len(vispremv)
xApp,xTest,yApp,yTest=train_test_split(vispremR,y,test_size=200,random_state=rd_seed)
xApp.shape
from sklearn.linear_model import LogisticRegression
# Grille de valeurs du paramètre de pénalisaiton
param=[{"C":[0.5,1,5,10,12,15,30]}]
logitL = GridSearchCV(LogisticRegression(penalty="l1"), param,cv=5,n_jobs=-1)
logitLasso=logitL.fit(xApp, yApp)
# Sélection du paramètre optimal
logitLasso.best_params_["C"]
print("Meilleur score (apprentissage) = %f, Meilleur paramètre = %s" %
(1.-logitLasso.best_score_,logitLasso.best_params_))
# Prévision
yChap = logitLasso.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur sur l'échantillon test
print("Erreur de test régression Lasso = %f" % (1-logitLasso.score(xTest, yTest)))
# Grilles de valeurs du paramètre de pénalisation
param=[{"C":[0.5,1,5,10,12,15,30]}]
logitR = GridSearchCV(LogisticRegression(penalty="l2"), param,cv=5,n_jobs=-1)
logitRidge=logitR.fit(xApp, yApp)
# Sélection du paramètre optimal
logitRidge.best_params_["C"]
print("Meilleur score = %f, Meilleur paramètre = %s" % (1.-logitRidge.best_score_,logitRidge.best_params_))
# Prévision
yChap = logitRidge.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur sur l'échantillon test
print("Erreur de test régression Ridge = %f" % (1-logitRidge.score(xTest, yTest)))
LassoOpt=LogisticRegression(penalty="l1",C=12)
LassoOpt=LassoOpt.fit(xApp, yApp)
# Récupération des coefficients
vect_coef=np.matrix.transpose(LassoOpt.coef_)
vect_coef=vect_coef.ravel()
#Affichage des 25 plus importants
coef=pd.Series(abs(vect_coef),index=xApp.columns).sort_values(ascending=False)
print(coef)
plt.figure(figsize=(7,4))
coef.plot(kind='bar')
plt.title('Coeffients')
plt.tight_layout()
plt.show()
from sklearn.metrics import roc_curve
listMethod=[["Lasso",logitLasso],["Ridge",logitRidge]]
for method in enumerate(listMethod):
probas_ = method[1][1].predict_proba(xTest)
fpr, tpr, thresholds = roc_curve(yTest, probas_[:,1])
plt.plot(fpr, tpr, lw=1,label="%s"%method[1][0])
plt.xlabel('Taux de faux positifs')
plt.ylabel('Taux de vrais positifs')
plt.legend(loc="best")
plt.show()
from sklearn import discriminant_analysis
from sklearn.neighbors import KNeighborsClassifier
lda = discriminant_analysis.LinearDiscriminantAnalysis()
disLin=lda.fit(xApp, yApp)
# Prévision de l'échantillon test
yChap = disLin.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test lda = %f" % (1-disLin.score(xTest,yTest)))
qda = discriminant_analysis.QuadraticDiscriminantAnalysis()
disQua=qda.fit(xApp, yApp)
# Prévision de l'échantillon test
yChap = disQua.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test qda = %f" % (1-disQua.score(xTest,yTest)))
knn=KNeighborsClassifier(n_neighbors=10)
# Définition du modèle
disKnn=knn.fit(xApp, yApp)
# Prévision de l'échantillon test
yChap = disKnn.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test knn = %f" % (1-disKnn.score(xTest,yTest)))
yChap
#Optimisation du paramètre de complexité k
#Grille de valeurs
param_grid=[{"n_neighbors":list(range(1,15))}]
disKnn=GridSearchCV(KNeighborsClassifier(),param_grid,cv=5,n_jobs=-1)
disKnnOpt=disKnn.fit(xApp, yApp) # GridSearchCV est lui même un estimateur
# paramètre optimal
disKnnOpt.best_params_["n_neighbors"]
print("Meilleur score = %f, Meilleur paramètre = %s" % (1.-disKnnOpt.best_score_,disKnnOpt.best_params_))
# Prévision de l'échantillon test
yChap = disKnnOpt.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Estimation de l'erreur de prévision sur l'échantillon test
print("Erreur de test knn_opt = %f" % (1-disKnnOpt.score(xTest,yTest)))
from sklearn.metrics import roc_curve
# Liste des méthodes
listMethod=[["lda",disLin],["qda",disQua],["knn",disKnnOpt]]
# Tracé des courbes
for method in enumerate(listMethod):
probas_ = method[1][1].predict_proba(xTest)
fpr, tpr, thresholds = roc_curve(yTest, probas_[:,1])
plt.plot(fpr, tpr, lw=1,label="%s"%method[1][0])
plt.xlabel('Taux de faux positifs')
plt.ylabel('Taux de vrais positifs')
plt.legend(loc="best")
plt.show()
from sklearn.tree import DecisionTreeClassifier
# définition du modèle
tree= DecisionTreeClassifier()
treeC=tree.fit(xApp, yApp)
# Optimisation de la profondeur de l'arbre
param=[{"max_depth":list(range(2,10))}]
tree= GridSearchCV(DecisionTreeClassifier(),param,cv=10,n_jobs=-1)
treeOpt=tree.fit(xApp, yApp)
# paramètre optimal
print("Meilleur score = %f, Meilleur paramètre = %s" % (1. - treeOpt.best_score_,treeOpt.best_params_))
# Prévision de l'échantillon test
yChap = treeOpt.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)# Erreur de prévision sur le test
print("Erreur de test tree qualitatif = %f" % (1-treeOpt.score(xTest,yTest)))
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
import pydotplus
treeG=DecisionTreeClassifier(max_depth=treeOpt.best_params_['max_depth'])
treeG.fit(xApp,yApp)
dot_data = StringIO()
export_graphviz(treeG, out_file=dot_data)
graph=pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png("treeOpt.png")
from IPython.display import Image
Image(filename='treeOpt.png')
# Liste des méthodes
listMethod=[["Logit",logitLasso],["lda",disLin],["Arbre",treeOpt]]
# Tracé des courbes
for method in enumerate(listMethod):
probas_ = method[1][1].predict_proba(xTest)
fpr, tpr, thresholds = roc_curve(yTest, probas_[:,1])
plt.plot(fpr, tpr, lw=1,label="%s"%method[1][0])
plt.xlabel('Taux de faux positifs')
plt.ylabel('Taux de vrais positifs')
plt.legend(loc="best")
plt.show()
from sklearn.ensemble import BaggingClassifier
bag= BaggingClassifier(n_estimators=100,oob_score=False)
bagC=bag.fit(xApp, yApp)
# Prévision de l'échantillon test
yChap = bagC.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test avec le bagging = %f" % (1-bagC.score(xTest,yTest)))
from sklearn.ensemble import RandomForestClassifier
# Optimisation de max_features
param=[{"max_features":list(range(2,10,1))}]
rf= GridSearchCV(RandomForestClassifier(n_estimators=100),param,cv=5,n_jobs=-1)
rfOpt=rf.fit(xApp, yApp)
# paramètre optimal
print("Meilleur score = %f, Meilleur paramètre = %s" % (1. - rfOpt.best_score_,rfOpt.best_params_))
# Prévision de l'échantillon test
yChap = rfOpt.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test random forest opt -quantitatif = %f" % (1-rfOpt.score(xTest,yTest)))
from sklearn.ensemble import GradientBoostingClassifier
# Optimisation de deux paramètres
paramGrid = [
{'n_estimators': list(range(100,601,50)), 'learning_rate': [0.1,0.2,0.3,0.4]}
]
gbmC= GridSearchCV(GradientBoostingClassifier(),paramGrid,cv=5,n_jobs=-1)
gbmOpt=gbmC.fit(xApp, yApp)
# paramètre optimal
print("Meilleur score = %f, Meilleur paramètre = %s" % (1. - gbmOpt.best_score_,gbmOpt.best_params_))
# Prévision de l'échantillon test
yChap = gbmOpt.predict(xTest)
# matrice de confusion
table=pd.crosstab(yChap,yTest)
print(table)
# Erreur de prévision sur le test
print("Erreur de test gbm opt = %f" % (1-gbmOpt.score(xTest,yTest)))
# Liste des méthodes
listMethod=[["Logit",logitLasso],["lda",disLin],["Arbre",treeOpt],["RF",rfOpt],["GBM",gbmOpt]]
# Tracé des courbes
for method in enumerate(listMethod):
probas_ = method[1][1].predict_proba(xTest)
fpr, tpr, thresholds = roc_curve(yTest, probas_[:,1])
plt.plot(fpr, tpr, lw=1,label="%s"%method[1][0])
plt.xlabel('Taux de faux positifs')
plt.ylabel('Taux de vrais positifs')
plt.legend(loc="best")
plt.show()
from sklearn.utils import check_random_state
import time
check_random_state(13)
tps0=time.clock()
# définition des estimateurs
logit = LogisticRegression(penalty="l1")
lda = discriminant_analysis.LinearDiscriminantAnalysis()
arbre = DecisionTreeClassifier()
rf = RandomForestClassifier(n_estimators=200)
gbm = GradientBoostingClassifier()
# Nombre d'itérations
B=3 # pour utiliser le programme, mettre plutôt B=30
# définition des grilles de paramètres
listMethGrid=[
[logit,{"C":[0.5,1,5,10,12,15,30]}],
[lda,{}],
[arbre,{"max_depth":[2,3,4,5,6,7,8,9,10]}],
[rf,{"max_features":[2,3,4,5,6]}],
[gbm,{"n_estimators": list(range(100,601,50)),"learning_rate": [0.1,0.2,0.3,0.4]}]
]
# Initialisation à 0 des erreurs pour chaque méthode (colonne) et chaque itération (ligne)
arrayErreur=np.empty((B,5))
for i in range(B): # itérations sur B échantillons test
# extraction apprentissage et test
xApp,xTest,yApp,yTest=train_test_split(vispremR,y,test_size=200)
# optimisation de chaque méthode et calcul de l'erreur sur le test
for j,(method, grid_list) in enumerate(listMethGrid):
methodGrid=GridSearchCV(method,grid_list,cv=5,n_jobs=-1).fit(xApp, yApp)
methodOpt = methodGrid.best_estimator_
methFit=methodOpt.fit(xApp, yApp)
arrayErreur[i,j]=1-methFit.score(xTest,yTest)
tps1=time.clock()
print("Temps execution en mn :",(tps1 - tps0)/60)
dataframeErreur=pd.DataFrame(arrayErreur,columns=["Logit","LDA","Arbre","RF","GBM"])
# Distribution des erreurs
dataframeErreur[["Logit","LDA","Arbre","RF","GBM"]].boxplot(return_type='dict')
plt.show()
# Moyennes
dataframeErreur.mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lista de Exercícios #2 de Computação Gráfica
Step2: Questão #2
Step3: Questão #3
|
<ASSISTANT_TASK:>
Python Code:
# Demonstração algébrica, sem código.
# Desenho da parábola.
#
import math
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.axislines import SubplotZero
def prep_axis():
Preparação dos eixos do gráfico
fig = plt.figure(1)
ax = SubplotZero(fig, 111)
fig.add_subplot(ax)
for direction in ["xzero", "yzero"]:
ax.axis[direction].set_axisline_style("-|>")
ax.axis[direction].set_visible(True)
for direction in ["left", "right", "bottom", "top"]:
ax.axis[direction].set_visible(False)
return ax
ax = prep_axis()
# Domínio da função (em a e x).
a = np.linspace(-4, 4, 9)
x = np.linspace(-1, 1, 100)
# Desenha gráfico
for ai in [int(af) for af in a]:
# gráfico:
y = np.sqrt(4 * ai * x)
ax.plot(x, y)
# anotação do valor de a:
notex = -1 if ai < 0 else 1
notey = np.abs(ai)
if int(ai) != -10:
dist = 15 * ai
ax.annotate("$a={:d}$".format(int(ai)),
xy=(notex, notey), xycoords='data',
xytext=(-dist, 40-abs(dist) if ai != 0 else 10),
textcoords='offset points')
plt.show()
INF = float('Inf')
# Valores de u
u_table = [np.linspace(-10, 10, 21), np.linspace(-1, 1, 11)]
# Impressão das tabelas
#
for u in u_table:
print('\nValores para u = {}...', u)
# Equações paramétricas correspondentes
x = -u
y = [1/ui for ui in u - [0]]
# Equação de y em função de x
yx = [-1/xi for xi in x - [0]];
# Os vetores devem possuir o mesmo tamanho.
assert len(u) == len(x) == len(y) == len(yx)
print('Foram gerados {:d} pontos.'.format(len(u)))
# Gerar o código LaTex da tabela de valores.
print('$u$\t&$x$\t&$y$\t&$r(u)$\\\\ \midrule')
for k, ui in enumerate(u):
if y[k] == INF:
continue
print('{u:6.2}\t&{x:6.2}\t&{y:6.2f}\t&${x:6.2}\\mathbf{{i}} {signal} {yabs:6.2f}\\mathbf{{j}}$\\\\'.
format(
k=k+1, u=ui,
x=x[k],
y=y[k],
signal=('-' if y[k] < 0 else '+'),
yabs=np.abs(y[k])
)
)
## Desenho do gráfico da função.
#
# Valores de u
u_plot = [np.linspace(-10, 10, 200), np.linspace(-1, 1, 200)]
u = u_plot[0] # Mude de 0 para 1 aqui para plotar o gráfico entre -10 e 10 ou entre -1 e 1, respectivamente.
# Equações paramétricas da curva
x = -u
y = [1/ui for ui in u - [0]]
# Equação de y em função de x
yx = [-1/xi for xi in x - [0]]
# Desenha o gráfico.
ax = prep_axis()
ax.plot(x, yx)
plt.show()
from mpl_toolkits.mplot3d import Axes3D
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
circle = 2 * np.pi
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
x = theta / circle
y = 5 * np.cos(theta)
z = 3 * np.sin(theta)
plot_directors = True # Mude para True para plotar as retas diretoras.
if plot_directors:
ax.plot(x, y, z, label='curva paramétrica')
ax.plot(x, [5 for xi in x], [0 for xi in x], label='$\mathbf{D}_y$')
ax.plot(x, [-5 for xi in x], [0 for xi in x], label='$\mathbf{D\'}_y$')
ax.plot(x, [0 for xi in x], [3 for xi in x], label='$\mathbf{D}_z$')
ax.plot(x, [0 for xi in x], [-3 for xi in x], label='$\mathbf{D\'}_z$')
ax.legend()
else:
ax.plot(x, y, z)
fig = plt.figure()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature importance from from linear model
Step2: Get random permutation scores and compute feature importance
Step3: With embeddings
Step4: Get feature importance
Step5: Neural net with hidden layer and embeddings
Step6: Evaluate feature importance
Step7: Compare feature importances
Step8: Why is t2m_std not important?
Step9: What about the correlation between the error and PP spread?
Step10: Does the NN care at all about the fc spread?
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
from nn_src.imports import *
from nn_src.utils import get_datasets
#DATA_DIR = '/Users/stephanrasp/data/'
# DATA_DIR = '/scratch/srasp/ppnn_data/'
DATA_DIR = '/Volumes/SanDisk/data/ppnn_data/'
aux_train_set, aux_test_set = get_datasets(DATA_DIR, 'aux_15_16.pkl', ['2015-01-01', '2016-01-01'], aux=True)
n_features = aux_train_set.features.shape[1]; n_features
fc_aux = build_fc_model(n_features, 2, compile=True, lr=0.02)
fc_aux.summary()
fc_aux.fit(aux_train_set.features, aux_train_set.targets, 1024, 30, verbose=0)
# Get the reference score from the last model we trained
ref_score = fc_aux.evaluate(aux_test_set.features, aux_test_set.targets, 4096, 0); ref_score
def eval_shuf(m, idx, emb=False):
x_shuf = aux_test_set.features.copy()
x_shuf[:, idx] = np.random.permutation(x_shuf[:, idx])
x = [x_shuf, aux_test_set.cont_ids] if emb else x_shuf
return m.evaluate(x, aux_test_set.targets, 4096, 0)
def perm_imp(m):
scores = [eval_shuf(m, i) for i in range(len(aux_test_set.feature_names))]
fimp = np.array(scores) - ref_score
df = pd.DataFrame(columns=['Feature', 'Importance'])
df['Feature'] = aux_test_set.feature_names; df['Importance'] = fimp
return df
fimp_fc_aux = perm_imp(fc_aux)
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(data=fimp_fc_aux, y='Importance', x='Feature', ax=ax)
plt.xticks(rotation=90);
emb_size = 2
max_id = int(np.max([aux_train_set.cont_ids.max(), aux_test_set.cont_ids.max()]))
max_id
fc_aux_emb = build_emb_model(n_features, 2, [], emb_size, max_id, compile=True, lr=0.02)
fc_aux_emb.fit([aux_train_set.features, aux_train_set.cont_ids], aux_train_set.targets,
epochs=30, batch_size=1024, verbose=0);
ref_score = fc_aux_emb.evaluate([aux_test_set.features, aux_test_set.cont_ids], aux_test_set.targets, 4096, 0)
ref_score
def perm_imp_emb(m, ref):
scores = [eval_shuf(m, i, True) for i in range(len(aux_test_set.feature_names))]
ids_shuf = np.random.permutation(aux_test_set.cont_ids)
scores += [m.evaluate([aux_test_set.features, ids_shuf], aux_test_set.targets, 4096, 0)]
fimp = np.array(scores) - ref
df = pd.DataFrame(columns=['Feature', 'Importance'])
df['Feature'] = aux_test_set.feature_names + ['Embedding']; df['Importance'] = fimp
return df
fimp_fc_aux_emb = perm_imp_emb(fc_aux_emb, ref_score)
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(data=fimp_fc_aux_emb, y='Importance', x='Feature', ax=ax)
plt.xticks(rotation=90);
nn_aux_emb = build_emb_model(n_features, 2, [50], emb_size, max_id, compile=True, lr=0.01)
nn_aux_emb.fit([aux_train_set.features, aux_train_set.cont_ids], aux_train_set.targets,
epochs=30, batch_size=1024, verbose=0);
ref_score = nn_aux_emb.evaluate([aux_test_set.features, aux_test_set.cont_ids], aux_test_set.targets, 4096, 0)
ref_score
fimp_nn_aux_emb = perm_imp_emb(nn_aux_emb, ref_score)
fig, ax = plt.subplots(figsize=(12, 5))
sns.barplot(data=fimp_nn_aux_emb, y='Importance', x='Feature', ax=ax)
plt.xticks(rotation=90);
fimp_fc_aux = fimp_fc_aux.append({'Feature': 'Embedding'}, ignore_index=True)
fimps = [fimp_fc_aux, fimp_fc_aux_emb, fimp_nn_aux_emb]
comb_df = pd.DataFrame(data=fimp_fc_aux['Feature']); comb_df.head()
comb_df['FCN-aux'] = fimp_fc_aux['Importance']
comb_df['FCN-aux-emb'] = fimp_fc_aux_emb['Importance']
comb_df['NN-aux-emb'] = fimp_nn_aux_emb['Importance']
comb_df['Mean importance'] = comb_df.iloc[:, 1:].mean(axis=1)
comb_df.sort_values('Mean importance', ascending=False, inplace=True)
comb_df.head()
len(fimp_fc_aux)
comb_df.sum()
comb_df.iloc[0, 1:] /= 10
comb_df['Feature'].iloc[0] = 't2m_mean / 10'
melt_df = comb_df.iloc[:15, :4].melt(id_vars='Feature', var_name='Experiment', value_name='Importance')
melt_df['Feature'] = [f.replace('_fc_', '_') for f in melt_df['Feature']]
sns.set_style('white')
sns.set_palette(['#1E90FF', '#B22222', '#228B22'])
fig, ax = plt.subplots(figsize=(8, 4))
sns.barplot(data = melt_df, x='Feature', y='Importance', hue='Experiment', ax=ax)
plt.xticks(rotation=90);
sns.despine()
plt.title('Feature importance');
fig.savefig('./feature-importance.pdf', bbox_inches='tight')
fc_err = aux_test_set.features[:, 0] * aux_test_set.scale_factors[0] - aux_test_set.targets
ens_spread = aux_test_set.features[:, 1] * aux_test_set.scale_factors[1]
plt.scatter(np.abs(fc_err[::10]), ens_spread[::10], alpha=0.05, s=10);
np.corrcoef(np.abs(fc_err), ens_spread)[0, 1]
np.abs(fc_err).mean(), ens_spread.mean(), ens_spread.mean()/np.abs(fc_err).mean()
preds = nn_aux_emb.predict([aux_test_set.features, aux_test_set.cont_ids], 4096, 0)
pp_spread = np.abs(preds[:, 1])
plt.scatter(np.abs(fc_err[::10]), pp_spread[::10], alpha=0.05, s=10);
np.corrcoef(np.abs(fc_err), pp_spread)[0, 1]
pp_spread.mean()/np.abs(fc_err).mean()
plt.scatter(ens_spread[::10], pp_spread[::10], alpha=0.05, s=10);
np.corrcoef(ens_spread, pp_spread)[0, 1]
np.mean(pp_spread), np.mean(ens_spread)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The wedge function returns a tuple containing the model m, as well as the a top, base and ref denoting some key boundaries in the model. Let's put these objects on a plot.
Step2: The model m is a 2-D NumPy array where each element is an index corresponding to each layer. The purple layer region has 0, the green layer has 1 and the yellow layer is 2 . top and base, shown in red, are the depths down to the "top" of wedge layer and "base" of the wedge layer respectively. ref stands for "reference trace" and it denotes the position where the wedge has a thickness factor of 1.
Step3: We can use these to make vp and rho earth models. We can use NumPy's fancy indexing by passing our array of indicies to access the rock properties (in this case acoustic impedance) for every element at once.
Step4: And plot the acoustic impedance to check it looks reasonable
Step5: Acoustic reflectivity
Step6: The reflections coeffients are zero almost everywhere, except at the layer boundaries. The reflection coefficient between layer 1 and layer 2 is a negative, and between layer 2 and layer 3 is a positive number. This will determine the scale and polarity of the seismic amplitudes.
Step7: As with the elastic impedance calculation, we can create a range of angles at once
Step8: The 10-degree reflectivity we computed before is in there as rc[10], but we can also see how reflectivity varies with angle
Step9: A 1D wavelet
Step10: A 2D wavelet bank
Step11: A 1D convolution
Step12: In the 1D case, it would have been easy enough to use np.convolve to do this. The equivalent code is
Step13: Remember those are offsets in the first dimension. Let's look at the zero-offset synthetic alongside the 30-degree panel
Step14: Or we could look at a single 'angle gather'
Step15: Bend your mind
Step16: Notice that the time axis has moved back to dimension 2 as a result of the wavelet having two dimensions
Step17: But we could also look at how the amplitude varies in the stratigraphic middle (timeslice 50) of the thickest part of the wedge (trace ref), when we vary frequency and offset
|
<ASSISTANT_TASK:>
Python Code:
import bruges as bg
m, top, base, ref = bg.models.wedge(width=120)
import matplotlib.pyplot as plt
plt.imshow(m)
plt.plot(top, 'r', lw=4)
plt.plot(base, 'r', lw=4)
plt.axvline(ref, c='k', ls='--')
plt.show()
import numpy as np
vps = np.array([2320, 2350, 2350])
vss = np.array([1150, 1250, 1200])
rhos = np.array([2650, 2600, 2620])
vp = vps[m]
vs = vss[m]
rho = rhos[m]
impedance = vp * rho
plt.imshow(impedance, interpolation='none')
plt.colorbar()
plt.show()
rc = bg.reflection.acoustic_reflectivity(vp, rho)
plt.imshow(rc)
plt.colorbar()
plt.show()
rc_10 = bg.reflection.reflectivity(vp, vs, rho, theta=10)
plt.imshow(rc_10.real)
plt.colorbar()
plt.show()
rc = bg.reflection.reflectivity(vp, vs, rho, theta=np.arange(60), method='shuey')
rc.shape
offset = 10 # degrees
trace = 60
fig, axs = plt.subplots(ncols=2, figsize=(12, 5), sharey=True)
axs[0].imshow(rc[offset].real, vmin=-0.01, vmax=0.01)
axs[0].axvline(trace, c='k', ls='--')
axs[1].imshow(rc[:, :, trace].real.T, vmin=-0.01, vmax=0.01)
axs[1].axvline(offset, c='k', ls='--')
plt.show()
w, t = bg.filters.ricker(0.064, 0.001, 40)
plt.plot(t, w)
w_bank, t = bg.filters.ricker(0.096, 0.001, np.arange(12, 80))
plt.imshow(w_bank)
plt.xlabel('time [samples]')
plt.ylabel('frequency [Hz]')
rc_ref = rc[0, :, ref]
syn = bg.filters.convolve(rc_ref, w)
plt.plot(rc_ref)
plt.plot(syn)
plt.show()
syn = bg.filters.convolve(rc, w, axis=1)
syn.shape
# A quick way to set a sensible max, so both panels have the same colours.
ma = np.percentile(syn, 99.9)
near, far = 0, 30
fig, axs = plt.subplots(ncols=2, figsize=(12, 5))
axs[0].imshow(syn[near], cmap='seismic_r', vmin=-ma, vmax=ma)
axs[0].set_title(f'{near} degrees')
axs[1].imshow(syn[far], cmap='seismic_r', vmin=-ma, vmax=ma)
axs[1].set_title(f'{far} degrees')
plt.show()
trace = 70
plt.imshow(syn[:, :, trace].T)
plt.xlabel('angle')
syn = bg.filters.convolve(rc, w_bank, axis=1)
syn.shape
plt.imshow(syn[10, 30])
plt.imshow(syn[:, :, 50, ref])
plt.xlabel('offset')
plt.ylabel('frequency')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ejercicio 2
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stas
%matplotlib inline
x = np.arange(0.01, 1, 0.01)
values = [(0.5, 0.5),(5, 1),(1, 3),(2, 2),(2, 5)]
for i, j in values:
y = stas.beta.pdf(x,i,j)
plt.plot(x,y)
plt.show()
md = []
mn = []
mo = []
kur = []
ske = []
for i, j in values:
r = stas.beta.rvs(i, j, size=1000000)
md.append(np.median(r))
mn.append(np.mean(r))
mo.append(stas.mode(r)[0][0])
kur.append(stas.kurtosis(r))
ske.append(stas.skew(r))
fig = plt.figure()
ax1 = fig.add_subplot(151)
ax1.set_title('Median')
ax1.plot(md)
ax2 = fig.add_subplot(152)
ax2.set_title('Mean')
ax2.plot(mn)
ax3 = fig.add_subplot(153)
ax3.set_title('Mode')
ax3.plot(mo)
ax4 = fig.add_subplot(154)
ax4.set_title('Kurtosis')
ax4.plot(kur)
ax5 = fig.add_subplot(155)
ax5.set_title('Skewness')
ax5.plot(ske)
axes = [ax1, ax2, ax3, ax4, ax5]
for i in axes:
plt.setp(i.get_xticklabels(), visible=False)
plt.setp(i.get_yticklabels(), visible=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Calculate the AIC and BIC for each of these 10 models, and find the best model.
Step2: Plot the AIC and BIC
Step3: Define your PDF by evenly distributing 1000 points in some range. Look up what the eval method of the model instance does, and evaluate on your 1000 data points x. You should be able to extract a pdf, and the individual responsibilities for each of the components.
Step4: Plot x as a histogram, and the PDF values over your x_i values.
|
<ASSISTANT_TASK:>
Python Code:
gmms = [GMM(i).fit(X) for i in range(1,10)]
aics = [g.aic(X) for g in gmms]
bics = [g.bic(X) for g in gmms]
plt.plot(aics)
plt.plot(bics)
# Data x_i
x = np.linspace(-6,6,1000)
pdf = gmms[2].score_samples(x.reshape(-1,1))
plt.plot(np.linspace(-6,6,1000),np.exp(pdf[0]))
plt.hist(X,bins='auto',normed=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import list of 2000 SOC occupations identified in Hecker (2005) as technology intensive. These occupations are then concorded to 2010 SOC codes so that they can be used to identify STEM employment in more recent OES Industry-Occupation data.
Step2: Import OES BLS Industry-Occupation data for 2012 and 2014. These tables are at the detailed occupation and 4-digit 2012 NAICS industry level. The 2012 data comes split into two files.
Step3: Flag STEM or 'technology oriented' occupations on the OES data. Then calculate the total employment and STEM employment for each industry. Take the ratio of STEM employment to total employment in each industry and find the mean STEM emplyoment ratio across all industries. Finally, implement the cutoff rules for High Tech industries as defined in Hecker (2005)
Step4: Combine with list of 4-digit 2002 NAICS industries from Hecker (2005) and export to csv. Note that NAICS industries from Hecker (2005) are 2002 NAICS while the 2012 and 2014 OES data use 2012 NAICS. For comparability, these industries would need to be concorded to the same vintage of industry classification.
|
<ASSISTANT_TASK:>
Python Code:
# import libraries
import pandas as pd
import numpy as np
# data paths
xwalkPath = ''
blsPath = ''
# import list of 'technology intensive' occupations from Hecker (2005), Table 3
stemOcc = pd.read_csv(xwalkPath+'hecker2005_table3.txt')
stemOcc = stemOcc[['occupationcode']]
stemOcc.columns = ['occ00']
# import BLS soc crosswalk, 2000 to 2010
soc0010 = pd.read_csv(xwalkPath+'soc_2000_to_2010_crosswalk.csv')
soc0010 = soc0010[['2000 SOC code','2010 SOC code']]
soc0010.columns = ['occ00','occ10']
# concord Hecker (2005) high tech occupations
stemOcc = pd.merge(stemOcc, soc0010, on='occ00', how='left')
stemOcc = stemOcc[['occ10']]
stemOcc.columns = ['occ']
stemOcc = stemOcc.drop_duplicates()
print 'Count of STEM occupations (2010 SOC): ', len(stemOcc)
# import 2012 OES data
oes2012 = pd.read_csv(blsPath+'nat4d_M2012_dl_1_113300_517100.csv')
oes2012 = oes2012.append(pd.read_csv(blsPath+'nat4d_M2012_dl_2_517200_999300.csv'))
# keep only detail level records, dropping summary and aggregate records
oes2012 = oes2012[(oes2012.OCC_GROUP=='detailed') & (oes2012.OCC_CODE!='00-0000')]
oes2012 = oes2012.reset_index(drop=True)
oes2012 = oes2012[['NAICS','OCC_CODE','TOT_EMP']]
# subset to first 4 digits of naics, dropping zero padding
oes2012['NAICS'] = oes2012['NAICS'].astype(str)
oes2012['NAICS'] = oes2012['NAICS'].str[0:4]
# clean and destring total employment
oes2012['TOT_EMP'] = oes2012['TOT_EMP'].str.replace(' ','')
oes2012['TOT_EMP'] = oes2012['TOT_EMP'].str.replace(' ','')
oes2012['TOT_EMP'] = oes2012['TOT_EMP'].str.replace(',','')
oes2012['TOT_EMP'] = oes2012['TOT_EMP'].str.replace('\*\*','')
oes2012['TOT_EMP'] = pd.to_numeric(oes2012['TOT_EMP'])
oes2012.columns = ['naics', 'occ', 'tot_emp']
# import 2014 OES data
oes2014 = pd.read_csv(blsPath+'nat4d_M2014_dl.csv')
# keep only detail level records, dropping summary and aggregate records
oes2014 = oes2014[(oes2014.OCC_GROUP=='detailed') & (oes2014.OCC_CODE!='00-0000')]
oes2014 = oes2014.reset_index(drop=True)
oes2014 = oes2014[['NAICS','OCC_CODE','TOT_EMP']]
# subset to first 4 digits of naics, dropping zero padding
oes2014['NAICS'] = oes2014['NAICS'].astype(str)
oes2014['NAICS'] = oes2014['NAICS'].str[0:4]
# clean and destring total employment
oes2014['TOT_EMP'] = oes2014['TOT_EMP'].str.replace(' ','')
oes2014['TOT_EMP'] = oes2014['TOT_EMP'].str.replace(' ','')
oes2014['TOT_EMP'] = oes2014['TOT_EMP'].str.replace(',','')
oes2014['TOT_EMP'] = oes2014['TOT_EMP'].str.replace('\*\*','')
oes2014['TOT_EMP'] = pd.to_numeric(oes2014['TOT_EMP'])
oes2014.columns = ['naics', 'occ', 'tot_emp']
# flag STEM occupations 2012 OES
oes2012ht = pd.merge(oes2012, stemOcc, on='occ', how='left', indicator=True)
oes2012ht['htocc'] = 0
oes2012ht.loc[oes2012ht._merge=='both','htocc'] = 1
# calculate STEM employment
oes2012ht['htemp'] = oes2012ht.tot_emp * oes2012ht.htocc
# sum emp and STEM emp by industry, calc ratio and average
oes2012ht_gb = oes2012ht[['tot_emp','htemp','naics']].groupby('naics').agg(sum)
oes2012ht_gb['naics']=oes2012ht_gb.index
oes2012ht_gb = oes2012ht_gb.reset_index(drop=True)
oes2012ht_gb['htratio'] = oes2012ht_gb.htemp/oes2012ht_gb.tot_emp
oes2012ht_gb['htratio_mean'] = oes2012ht_gb.htratio.mean()
# flag industry by high tech level
oes2012ht_gb['oes12htlvl'] = ''
oes2012ht_gb.loc[oes2012ht_gb.htratio>=2*oes2012ht_gb.htratio_mean,'oes12htlvl'] = 'Level III'
oes2012ht_gb.loc[oes2012ht_gb.htratio>=3*oes2012ht_gb.htratio_mean,'oes12htlvl'] = 'Level II'
oes2012ht_gb.loc[oes2012ht_gb.htratio>=5*oes2012ht_gb.htratio_mean,'oes12htlvl'] = 'Level I'
# show count of 4-digit industries by level
print '#'*20 +' High Tech Industries - 2012 OES ' + '#'*20
print '\nCount of 4-digit 2012 NAICS by HT Level:'
print oes2012ht_gb.groupby('oes12htlvl').agg([len])['naics']
# list level I industries
print '\nList of Level I HT Industries:'
print list(oes2012ht_gb[oes2012ht_gb.oes12htlvl=='Level I']['naics'])
# flag STEM occupations 2014 OES
oes2014ht = pd.merge(oes2014, stemOcc, on='occ', how='left', indicator=True)
oes2014ht['htocc'] = 0
oes2014ht.loc[oes2014ht._merge=='both','htocc'] = 1
# calculate STEM employment
oes2014ht['htemp'] = oes2014ht.tot_emp * oes2014ht.htocc
# sum emp and STEM emp by industry, calc ratio and average
oes2014ht_gb = oes2014ht[['tot_emp','htemp','naics']].groupby('naics').agg(sum)
oes2014ht_gb['naics']=oes2014ht_gb.index
oes2014ht_gb = oes2014ht_gb.reset_index(drop=True)
oes2014ht_gb['htratio'] = oes2014ht_gb.htemp/oes2014ht_gb.tot_emp
oes2014ht_gb['htratio_mean'] = oes2014ht_gb.htratio.mean()
# flag industry by high tech level
oes2014ht_gb['oes14htlvl'] = ''
oes2014ht_gb.loc[oes2014ht_gb.htratio>=2*oes2014ht_gb.htratio_mean,'oes14htlvl'] = 'Level III'
oes2014ht_gb.loc[oes2014ht_gb.htratio>=3*oes2014ht_gb.htratio_mean,'oes14htlvl'] = 'Level II'
oes2014ht_gb.loc[oes2014ht_gb.htratio>=5*oes2014ht_gb.htratio_mean,'oes14htlvl'] = 'Level I'
# show count of 4-digit industries by level
print '#'*20 +' High Tech Industries - 2014 OES ' + '#'*20
print '\nCount of 4-digit 2012 NAICS by HT Level:'
print oes2014ht_gb.groupby('oes14htlvl').agg([len])['naics']
# list level I industries
print '\nList of Level I HT Industries:'
print list(oes2014ht_gb[oes2014ht_gb.oes14htlvl=='Level I']['naics'])
hecker05 = pd.read_csv(xwalkPath+'hecker2005_table4.txt')[['naics','level']]
hecker05['naics'] = hecker05['naics'].astype(str)
hecker05.columns = ['naics','hkr05htlvl']
outDF = pd.merge(hecker05,oes2012ht_gb[['naics','oes12htlvl']], on='naics', how='outer')
outDF = pd.merge(outDF, oes2014ht_gb[['naics','oes14htlvl']], on='naics', how='outer')
outDF['oes12htlvl'] = outDF.oes12htlvl.fillna(np.nan)
outDF.loc[outDF.oes12htlvl=='','oes12htlvl'] = np.nan
outDF['oes14htlvl'] = outDF.oes14htlvl.fillna(np.nan)
outDF.loc[outDF.oes14htlvl=='','oes14htlvl'] = np.nan
outDF = outDF.dropna(how='all',subset=['hkr05htlvl','oes12htlvl','oes14htlvl'])
outDF = outDF.sort_values(by='naics')
outDF.to_csv(xwalkPath+'ht_stem_industries.csv', index=False)
print outDF
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initializing the model
Step2: GemPy core code is written in Python. However for efficiency (and other reasons) most of heavy computations happend in optimize compile code, either C or CUDA for GPU. To do so, GemPy rely on the library theano. To guarantee maximum optimization theano requires to compile the code for every Python kernel. The compilation is done by calling the following line at any point (before computing the model)
Step3: Creating figure
Step4: Add model section
Step5: Loading cross-section image
Step6: We can do the same in 3D through pyvista and vtk rendering. Click the qt5 button Back (+Y) to have the same view as in the 2D viwer
Step7: Building the model
Step8: If we do not care about the names and we just want to interpolate a surface we can use
Step9: Now we can start adding data. GemPy input data consist on surface points and orientations (perpendicular to the layers). The 2D plot gives you the X and Z coordinates when hovering the mouse over. We can add a surface point as follows
Step10: Now we can add the other two points of the layer
Step11: The minimum amount of data to interpolate anything in gempy is
Step12: Now we have enough data for finally interpolate!
Step13: That is, we have interpolated the 3D surface. We can visualize with
Step14: Adding more layers
Step15: Layer 2
Step16: Layer 3
Step17: Faults
Step18: Then define that is a fault
Step19: But we also need to add a new surface
Step20: And finally assign the new surface to the new series/fault
Step21: Now we can just add input data as before (remember the minimum amount of input data to compute a model)
Step22: And now is computing as before
Step23: As you can see now instead of having folding layers we have a sharp jump. Building on this you can pretty much any model you can imagine.
Step24: The topography can we visualize in both renderers
Step25: But also allows us to compute the geological map of an area
Step26: Gravity inversion
Step27: Also we can create a centered grid around a device for precision
Step28: We need to modify the compile code
Step29: But now additionally to the interpolation we also compute the forward gravity of the model (at the point XYZ = 400, 0, 0)
Step30: We can visualize it it better in the following figure. The aim of an inversion is to find the set of parameters that fit a measured point the better. In this example the red x symbolize the measured gravity while the blue dots are the current gravity (and previous) fw gravity values. The widget moves up and down the surface 3
|
<ASSISTANT_TASK:>
Python Code:
# Importing GemPy
import gempy as gp
# Importing aux libraries
from ipywidgets import interact
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Embedding matplotlib figures in the notebooks
%matplotlib qt5
geo_model = gp.create_model('Model1')
geo_model = gp.init_data(geo_model, extent= [0, 791, 0, 200, -582, 0], resolution=[100, 10, 100])
gp.set_interpolator(geo_model, theano_optimizer='fast_compile', verbose=[])
%matplotlib qt5
import pyvista as pv
pv.set_plot_theme("document")
p2d = gp.plot_2d(geo_model, section_names=[], direction=None, show=False)
# In this case perpendicular to the y axes
ax = p2d.add_section(cell_number=1, direction='y')
# Reading image
img = mpimg.imread('../examples/getting_started/wells.png')
# Plotting it inplace
ax.imshow(img, origin='upper', alpha=.8, extent = (0, 791, -582,0))
p3d = gp.plot_3d(geo_model, plotter_type='background', notebook=False)
geo_model.surfaces
# Default surfaces:
geo_model.set_default_surfaces()
# Add a point
geo_model.add_surface_points(X=223, Y=0.01, Z=-94, surface='surface1')
# Plot in 2D
p2d.plot_data(ax, cell_number=11)
# Plot in 3D
p3d.plot_surface_points()
# Add points
geo_model.add_surface_points(X=458, Y=0, Z=-107, surface='surface1')
geo_model.add_surface_points(X=612, Y=0, Z=-14, surface='surface1')
# Plotting
p2d.plot_data(ax, cell_number=11)
p3d.plot_surface_points()
# Adding orientation
geo_model.add_orientations(X=350, Y=0, Z=-300, surface='surface1', pole_vector= (0,0,1))
p2d.plot_data(ax, cell_number=5)
p3d.plot_data()
gp.compute_model(geo_model)
geo_model.additional_data.kriging_data
# In 2D
p2d.plot_contacts(ax, cell_number=5)
# In 3D
p3d.plot_surfaces()
geo_model.surfaces
geo_model.series
geo_model.add_surfaces(['surface3', 'basement'])
# Your code here:
geo_model.add_surface_points(X=225, Y=0, Z=-269, surface='surface2')
geo_model.add_surface_points(X=459, Y=0, Z=-279, surface='surface2')
#--------------------
# Plot data
p2d.remove(ax)
p2d.plot_data(ax, cell_number=5, legend='force')
p3d.plot_data()
# Compute model
gp.compute_model(geo_model)
geo_model.additional_data.kriging_data
# Plot 2D
p2d.plot_lith(ax, cell_number=5)
p2d.plot_contacts(ax, cell_number=5)
# Plot 3D
p3d.plot_surfaces()
p3d.plot_structured_grid(opacity=.2, annotations = {1: 'surface1', 2:'surface2', 3:'surface3'})
# Your code here:
geo_model.add_surface_points(X=225, Y=0, Z=-439, surface='surface3')
geo_model.add_surface_points(X=464, Y=0, Z=-456, surface='surface3')
geo_model.add_surface_points(X=619, Y=0, Z=-433, surface='surface3')
# ------------------
# Plotting data
p2d.remove(ax)
p2d.plot_data(ax, cell_number=5)
p3d.plot_data()
# Computing and plotting 3D
gp.compute_model(geo_model)
p2d.plot_lith(ax, cell_number=5)
p2d.plot_contacts(ax, cell_number=5)
p3d.plot_surfaces()
p3d.plot_structured_grid(opacity=.2, annotations = {1: 'surface1', 2:'surface2', 3:'surface3', 4:'basement'})
geo_model.additional_data
geo_model.add_series('Fault1')
geo_model.reorder_series(['Fault1', 'Default series'])
geo_model.set_is_fault('Fault1')
geo_model.add_surfaces('fault1')
gp.map_series_to_surfaces(geo_model, {'Fault1':'fault1'})
# Add input data of the fault
geo_model.add_surface_points(X=550, Y=0, Z=-30, surface='fault1')
geo_model.add_surface_points(X=650, Y=0, Z=-200, surface='fault1')
geo_model.add_orientations(X=600, Y=0, Z= -100, surface='fault1', pole_vector=(.3,0,.3))
# Plotting Inpute data
p2d.remove(ax)
p2d.plot_data(ax, cell_number=5)
p3d.plot_data()
# Compute
gp.compute_model(geo_model)
# Plot
p2d.plot_lith(ax, cell_number=5)
p2d.plot_contacts(ax, cell_number=5)
p3d.plot_surfaces()
p3d.plot_structured_grid(opacity=.2, annotations = {2: 'surface1', 3:'surface2', 4:'surface3', 5:'basement'})
## Adding random topography
geo_model.set_topography(source='random',fd=1.9, d_z=np.array([-150, 0]), resolution=np.array([200,200]))
p2d.plot_topography(ax, cell_number=5)
p3d.plot_topography(scalars='topography')
gp.compute_model(geo_model)
p3d.plot_surfaces()
p3d.plot_topography()
p3d.plot_structured_grid()
geo_model.add_surface_values([0, 2.6, 2.4, 3.2, 3.6], ['density'])
geo_model.set_centered_grid(centers= [[400, 0, 0]], resolution = [10, 10, 100], radius=800)
gp.set_interpolator(geo_model, output=['gravity'], theano_optimizer='fast_run', )
gp.compute_model(geo_model)
geo_model.solutions.fw_gravity
# Creating new figure
p_grav = gp.plot_2d(geo_model, direction=None, show=False, figsize=(6, 9))
# Adding section for model
ax2 = p_grav.add_section(cell_number=1, direction='y', ax_pos=211)
# Adding section for gravity
ax3 = p_grav.add_section(ax_pos=414)
# Plotting model in section
p2d.plot_data(ax2, cell_number=5, legend='force')
p2d.plot_lith(ax2, cell_number=5)
p2d.plot_contacts(ax2, cell_number=5)
ax2.plot(400, 0, '^', markersize=40, c='red')
# Plotting initial values of the gravity axes
target_grav = -810
ax3.tick_params( bottom=False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.plot(0, target_grav, 'X', label='Target Gravity', markersize=4, c='red')
ax3.plot(1, geo_model.solutions.fw_gravity, 'o', label='Current Gravity', markersize=4, c='blue')
ax3.set_ylabel('grav')
# We store the original values of z for the surface 3
Z_ori = geo_model.surface_points.df.loc[[5,6,7], 'Z'].copy()
# init a list to store grav
grav_ = []
# Function that modify the model, compute it and plot
def gravity_invert(dz):
new_z = Z_ori + dz
geo_model.modify_surface_points(indices=[5, 6, 7], Z= new_z)
gp.compute_model(geo_model)
grav_.append(geo_model.solutions.fw_gravity[0])
p2d.remove(ax2)
p2d.plot_data(ax2, cell_number=5)
p2d.plot_lith(ax2, cell_number=5)
p2d.plot_contacts(ax2, cell_number=5)
ax3.plot(np.arange(len(grav_))+1, grav_, 'o', label='Current Gravity', markersize=4, c='blue')
ax3.set_xlim(-1, len(grav_)+1)
interact(gravity_invert, dz=(-200, 200, 10))
# Creating new figure
p_grav = gp.plot_2d(geo_model, direction=None, show=False, figsize=(6, 9))
# Adding section for model
ax2 = p_grav.add_section(cell_number=1, direction='y', ax_pos=211)
# Adding section for gravity
ax3 = p_grav.add_section(ax_pos=414)
# Plotting model in section
p2d.plot_data(ax2, cell_number=5, legend='force')
p2d.plot_lith(ax2, cell_number=5)
p2d.plot_contacts(ax2, cell_number=5)
ax2.plot(400, 0, '^', markersize=40, c='red')
# Plotting initial values of the gravity axes
target_grav = -81
ax3.tick_params( bottom=False)
ax3.spines['top'].set_visible(False)
ax3.spines['bottom'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.plot(0, target_grav, 'X', label='Target Gravity', markersize=4, c='red')
ax3.plot(1, geo_model.solutions.fw_gravity, 'o', label='Current Gravity', markersize=4, c='blue')
ax3.set_ylabel('grav')
# We store the original values of z for the surface 3
Z_ori = geo_model.surface_points.df.loc[[5,6,7], 'Z'].copy()
# init a list to store grav
grav_ = []
# Function that modify the model, compute it and plot
def gravity_invert(dz):
new_z = Z_ori + dz
geo_model.modify_surface_points(indices=[5, 6, 7], Z= new_z)
gp.compute_model(geo_model)
grav_.append(geo_model.solutions.fw_gravity[0])
p2d.remove(ax2)
p2d.plot_data(ax2, cell_number=5)
p2d.plot_lith(ax2, cell_number=5)
p2d.plot_contacts(ax2, cell_number=5)
ax3.plot((np.arange(len(grav_))+1), grav_, 'o', label='Current Gravity', markersize=4, c='blue')
interact(gravity_invert, dz=(-200, 200, 10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load, save and filter all the data
Step2: Plot data and the thresholds
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import medfilt
import math
import gitInformation
from neo.io import NeuralynxIO
import sklearn
from scipy.interpolate import Rbf
import fastdtw
import time
%matplotlib inline
gitInformation.printInformation()
# Session folder with all needed neuralynx files
sessionfolder = 'C:\\Users\\Dominik\\Documents\\GitRep\\kt-2015-DSPHandsOn\\MedianFilter\\Python\\08. Tests'
NIO = NeuralynxIO(sessiondir = sessionfolder, cachedir = sessionfolder)
block = NIO.read_block()
seg = block.segments[0]
analogsignals = {}
# Save all recorded datas in a analogsignals dictionary.
for i in range(len(seg.analogsignalarrays)):
analogsignals["analogsignal{0}".format(i)] = seg.analogsignalarrays[i]
csc = {}
count = -1
# Extract the magnitude of each data.
for i in analogsignals:
csc["csc{0}".format(i[-1])] = analogsignals[i].magnitude
# Check if the Signal is already inverted, if not, invert it
for i in analogsignals:
temp = analogsignals[i]
if temp.annotations['InputInverted'] == 'False':
csc["csc{0}".format(i[-1])] = csc["csc{0}".format(i[-1])] * (-1)
filtered = {}
# Filter each data with a median filter with window length 25(seems to be the best)
for i in csc:
temp = csc[i]
filtered["filtered{0}".format(i[-1])] = medfilt(temp, 25)
new_data = {}
for i in filtered:
temp = filtered[i]
new_data["new_data{0}".format(i[-1])] = csc["csc{0}".format(i[-1])] - temp
threshold = {}
# Calculate the Threshold
for i in new_data:
temp = new_data[i]
threshold["threshold{0}".format(i[-1])] = 5*np.median(abs(temp)/0.6745)
plt.figure(figsize=(30,7))
plt.plot(new_data["new_data0"])
plt.hlines(threshold["threshold0"], 0, len(new_data["new_data0"]), color = 'red')
plt.figure(figsize=(30,7))
plt.plot(new_data["new_data1"])
plt.hlines(threshold["threshold1"], 0, len(new_data["new_data0"]), color = 'red')
plt.figure(figsize=(30,7))
plt.plot(new_data["new_data2"])
plt.hlines(threshold["threshold2"], 0, len(new_data["new_data0"]), color = 'red')
plt.figure(figsize=(30,7))
plt.plot(new_data["new_data3"])
plt.hlines(threshold["threshold3"], 0, len(new_data["new_data0"]), color = 'red')
def thresHold(new_data, threshold):
count = -1
count2 = 0
timer = 0
positionlist = []
# Dictionary with all thresholded shapes
thresholds = {}
for v in new_data:
temp = new_data[v]
for element in temp:
# Increment the counter (counter = position in the array)
count += 1
# Check if the threshold is reached, and check if the window is inside the array
if element >= threshold["threshold{0}".format(v[-1])] and (count - 10) >= 0 and (count + 14) <= len(temp):
# Save a window with 24 samples
tempdata = [w for w in temp[count -10 : count + 14]]
# Save the position of the window in the original array
tempsavedcounter = np.linspace(count-10, count + 14, 25)
# Calculate the maximum value of the window
maximum = max(tempdata)
# Get the position of the maximum value inside the window
maxposition = [p for p, q in enumerate(tempdata) if q == maximum]
# Store the position (Note that the position here is the position in the original data)
position = tempsavedcounter[maxposition[0]]
tempdata = [w for w in temp[position -10 : position + 14]]
# Search in the other recorded data at the same position for spikes, check if the maximum
# in any other window is bigger, if True, replace the old shape with the new one
# We will get always the spike with the maximum amplitude
for i in new_data:
if i[-1] != v[-1]:
tempdata2 = new_data[i]
tempdata2 = tempdata2[position -10 : position + 14]
if max(tempdata2) > max(tempdata):
tempdata = tempdata2
position = int(position)
# Check if we didn't find the spike allready
if timer == 0 and all([i not in positionlist for i in range(position - 6, position + 10)]):
# set the timer to 16, so 16 samples will be passed
timer = 16
# increment count2, for the array name
count2 += 1
# Store the shape in a dictionary
thresholds["spike{0}".format(count2)] = tempdata
positionlist.append(position)
elif timer > 0:
# Decrement the timer.
timer -= 1
count = -1
return thresholds
shapes = thresHold(new_data, threshold)
count = 0
for i in shapes:
count += 1
plt.figure(count)
plt.plot(shapes[i], color = "black")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1D convnets are structured in the same way as their 2D counter-parts that you have used in Chapter 5
Step2: Here are our training and validation results
Step3: Combining CNNs and RNNs to process long sequences
Step4: Here are our training and validation Mean Absolute Errors
Step5: The validation MAE stays in the low 0.40s
Step6: This is our model, starting with two Conv1D layers and following-up with a GRU layer
|
<ASSISTANT_TASK:>
Python Code:
from keras.datasets import imdb
from keras.preprocessing import sequence
max_features = 10000 # number of words to consider as features
max_len = 500 # cut texts after this number of words (among top max_features most common words)
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(lr=1e-4),
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# We reuse the following variables defined in the last section:
# float_data, train_gen, val_gen, val_steps
import os
import numpy as np
data_dir = '/home/ubuntu/data/'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
float_data = np.zeros((len(lines), len(header) - 1))
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
# This is how many steps to draw from `val_gen`
# in order to see the whole validation set:
val_steps = (300000 - 200001 - lookback) // batch_size
# This is how many steps to draw from `test_gen`
# in order to see the whole test set:
test_steps = (len(float_data) - 300001 - lookback) // batch_size
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Conv1D(32, 5, activation='relu',
input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# This was previously set to 6 (one point per hour).
# Now 3 (one point per 30 min).
step = 3
lookback = 720 # Unchanged
delay = 144 # Unchanged
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step)
val_steps = (300000 - 200001 - lookback) // 128
test_steps = (len(float_data) - 300001 - lookback) // 128
model = Sequential()
model.add(layers.Conv1D(32, 5, activation='relu',
input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
Step11: Unit tests
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
# Output layer error is the difference between desired target and actual output.
output_errors = targets - final_outputs
# TODO: Backpropagated error
# errors propagated to the hidden layer
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)
# hidden layer gradients
hidden_grad = hidden_outputs * (1 - hidden_outputs)
# TODO: Update the weights
# update hidden-to-output weights with gradient descent step
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)
# update input-to-hidden weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot(hidden_errors * hidden_grad, inputs.T)
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
#hidden_inputs = # signals into hidden layer
#hidden_outputs = # signals from hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
#final_inputs = # signals into final output layer
#final_outputs = # signals from final output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 2400
learning_rate = 0.0065
hidden_nodes = 12
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
#create a new data set to explore Neuronal Network performance in other parts of the data.
#Set for Dec 2011
cx_test_data = data[339*24:360*24]
cx_test_features, cx_test_targets = cx_test_data.drop(target_fields, axis=1), cx_test_data[target_fields]
#Plot predictions and data from Dec 2011 and Dec 2012
fig1, ax1 = plt.subplots(figsize=(14,6))
cx_predictions = network.run(cx_test_features)*std + mean
ax1.plot(cx_predictions[0], label='Prediction 2011')
ax1.plot((cx_test_targets['cnt']*std + mean).values, label='Data 2011')
ax1.set_xlim(right=len(cx_predictions))
ax1.legend()
dates1 = pd.to_datetime(rides.ix[cx_test_data.index]['dteday'])
dates1 = dates1.apply(lambda d: d.strftime('%b %d'))
ax1.set_xticks(np.arange(len(dates1))[12::24])
_ = ax1.set_xticklabels(dates1[12::24], rotation=45)
#Show the mean squeared error for test set and the new set
test_MSE = MSE(predictions[0], (test_targets['cnt']*std + mean).values)
new_MSE = MSE(cx_predictions[0], (cx_test_targets['cnt']*std + mean).values)
print ('test MSE : ', test_MSE)
print ('new MSE : ',new_MSE)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
Step2: Step 3
Step3: Our data is now accessible by a key value. The keys are the column headers in the dataframe. In this example case, those are 'Time (s) - Dev1/ai0' and 'Temperature - Dev1/ai0'. For example, lets access the data in the first column.
Step4: What would happen if we tried to access the data with an invalid key, say <code>1</code> for example? Lets try it to find out.
Step5: So lets say you have a large dataframe with unknown columns. There is a simple way to index them without having prior knowledge of what the dataframe columns are. Namely, the <code>columns</code> method in pandas.
Step6: Data Manipulation (Plots)
Step7: In order to demonstrate the plotting capabilities of pandas arrays, lets use the example data that we imported earlier. The data frame contains only the two columns that were in the file; temperature and time. Because of this simplicity, we can trust pandas to properly interpret the first column as time and the second column as th measurement (temperature). Thus we can plot with the simple command.
Step8: While this simplification is nice, it is generally better to specify what data you want to plot. Particularly if you are automating the plotting of a large set of dataframes. To do this, specify the <code>x</code> and <code>y</code> arrays in your dataframe as you would in a standard <code>matplotlib</code> plot call, however since this plotting function is a method of the dataframe, you need only specify the column.
Step9: Now that we have the basics down, lets spice up the plot a little bit.
Step10: Data Manipulation (Timestamps)
Step11: Good news! Since python interpreted the date as a datetime object, we can use datetime object methods to determine the time in seconds. The one caveat is that we can only determine a time difference, not an absolute time. For more on this, read this stackoverflow question.
Step12: Note
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import os
dirPath = os.path.realpath('.')
fileName = 'assets/coolingExample.xlsx'
filePath = os.path.join(dirPath, fileName)
df = pd.read_excel(filePath,header=0)
df.head()
df[df.columns[0]]
try:
df[1]
except KeyError:
print("KeyError: 1 - not a valid key")
cols = df.columns
for col in cols:
print(df[col])
import matplotlib.pyplot as plt
plt.figure(1)
ax = df.plot()
plt.show()
plt.figure(2)
ax = df.plot(cols[0],cols[1])
plt.show()
plt.figure(3)
ax = df.plot(cols[0],cols[1])
ax.set_title('This is a Title')
ax.set_ylabel('Temperature (deg F)')
ax.grid()
plt.show()
df[cols[0]][0]
from datetime import datetime, date
startTime = df[cols[0]][0]
timeArray = []
for i in range(0,len(df[cols[0]])):
timeArray.append((datetime.combine(date.today(), df[cols[0]][i]) - datetime.combine(date.today(), startTime)).total_seconds())
plt.figure(4)
plt.plot(timeArray, df[cols[1]], 'b')
plt.title('This is a graph with a better time axis')
plt.ylabel('Temperature (deg F)')
plt.xlabel('Time (s)')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we have to load the iges file on which we will perform the FFD. Since it is an iges we use the IgesHandler() class. The parse method extract the control points (or poles) coordinate of the NURBS surfaces that represent each face of the iges geometry without touching the topology.
Step2: We can now use the show method to visualize the iges geometry. It basically load the file and shows its geometry. It is different from the plot method since
Step3: Sometimes the write method can fail. In fact, in write function we perform some operations like
|
<ASSISTANT_TASK:>
Python Code:
import pygem as pg
params = pg.params.FFDParameters()
params.read_parameters(filename='../tests/test_datasets/parameters_test_ffd_iges.prm')
iges_handler = pg.igeshandler.IgesHandler()
mesh_points = iges_handler.parse('../tests/test_datasets/test_pipe.iges')
free_form = pg.freeform.FFD(params, mesh_points)
free_form.perform()
new_mesh_points = free_form.modified_mesh_points
iges_handler.write(new_mesh_points, 'test_pipe_mod.iges')
iges_handler.write(new_mesh_points, 'test_pipe_mod.iges', 1e-3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Look at the generated data
Step2: Parse the data in a reduced way
Step3: Use xml.etree instead
Step4: Via a callback
Step5: Convert the callback to a generator at the OSM data level
Step6: Conclusion
|
<ASSISTANT_TASK:>
Python Code:
import os, lzma
#basedir = os.path.join("/media/disk", "OSM_Data")
basedir = os.path.join("e:\\", "OSM_Data")
filename = "isle-of-wight-latest.osm.xz"
with lzma.open(os.path.join(basedir, filename), mode="rt", encoding="utf-8") as f:
print(next(f), end="")
print(next(f), end="")
print(next(f), end="")
print(next(f), end="")
import osmdigest.detail as detail
import datetime
possible_node_tags = set()
possible_way_tags = set()
possible_relation_tags = set()
#with detail.Parser(os.path.join(basedir, filename)) as gen:
start = datetime.datetime.now()
with detail.Parser("isle-of-wight-latest.osm") as gen:
for x in gen:
if isinstance(x, detail.OSM) or isinstance(x, detail.Bounds):
print(x)
elif isinstance(x, detail.Node):
for y in x.subobjs:
assert isinstance(y, detail.Tag)
possible_node_tags.add(y.key)
elif isinstance(x, detail.Way):
for y in x.subobjs:
if isinstance(y, detail.Tag):
possible_way_tags.add(y.key)
else:
assert isinstance(y, detail.NodeRef)
elif isinstance(x, detail.Relation):
for y in x.subobjs:
if isinstance(y, detail.Tag):
possible_relation_tags.add(y.key)
else:
assert isinstance(y, detail.Member)
assert y.type in {"way", "node", "relation"}
else:
raise Exception("Should see this")
print("Took {}".format(datetime.datetime.now()-start))
len(possible_node_tags), list(possible_node_tags)[:5]
len(possible_way_tags), list(possible_way_tags)[:5]
len(possible_relation_tags), list(possible_relation_tags)[:5]
import osmdigest.digest as digest
possible_node_tags = set()
possible_way_tags = set()
possible_relation_tags = set()
start = datetime.datetime.now()
for x in digest.parse_sax("isle-of-wight-latest.osm"):
if isinstance(x, digest.OSM) or isinstance(x, digest.Bounds):
print(x)
elif isinstance(x, digest.Node):
for key in x.tags.keys():
possible_node_tags.add(key)
elif isinstance(x, digest.Way):
for key in x.tags.keys():
possible_way_tags.add(key)
elif isinstance(x, digest.Relation):
for key in x.tags.keys():
possible_relation_tags.add(key)
print("Took {}".format(datetime.datetime.now()-start))
len(possible_node_tags), list(possible_node_tags)[:5]
len(possible_way_tags), list(possible_way_tags)[:5]
len(possible_relation_tags), list(possible_relation_tags)[:5]
possible_node_tags = set()
possible_way_tags = set()
possible_relation_tags = set()
start = datetime.datetime.now()
for x in digest.parse("isle-of-wight-latest.osm"):
if isinstance(x, digest.OSM) or isinstance(x, digest.Bounds):
print(x)
elif isinstance(x, digest.Node):
for key in x.tags.keys():
possible_node_tags.add(key)
elif isinstance(x, digest.Way):
for key in x.tags.keys():
possible_way_tags.add(key)
elif isinstance(x, digest.Relation):
for key in x.tags.keys():
possible_relation_tags.add(key)
print("Took {}".format(datetime.datetime.now()-start))
len(possible_node_tags), list(possible_node_tags)[:5]
len(possible_way_tags), list(possible_way_tags)[:5]
len(possible_relation_tags), list(possible_relation_tags)[:5]
class Handler(digest.OSMDataHandler):
def __init__(self):
self.possible_node_tags = set()
self.possible_way_tags = set()
self.possible_relation_tags = set()
def start(self, osm):
print(osm)
def bounds(self, bounds):
print(bounds)
def node(self, x):
for key in x.tags.keys():
self.possible_node_tags.add(key)
def way(self, x):
for key in x.tags.keys():
self.possible_way_tags.add(key)
def relation(self, x):
for key in x.tags.keys():
self.possible_relation_tags.add(key)
start = datetime.datetime.now()
handler = Handler()
digest.parse_callback("isle-of-wight-latest.osm", handler)
print("Took {}".format(datetime.datetime.now()-start))
len(handler.possible_node_tags), list(handler.possible_node_tags)[:5]
len(handler.possible_way_tags), list(handler.possible_way_tags)[:5]
len(handler.possible_relation_tags), list(handler.possible_relation_tags)[:5]
import osmdigest.utils.cbtogen as cbtogen
class Handler(digest.OSMDataHandler):
def __init__(self, delegate):
self.delegate = delegate
def start(self, osm):
self.delegate.notify(osm)
def bounds(self, bounds):
self.delegate.notify(bounds)
def node(self, x):
self.delegate.notify(x)
def way(self, x):
self.delegate.notify(x)
def relation(self, x):
self.delegate.notify(x)
generator = cbtogen.CallbackToGenerator()
handler = Handler(generator)
def func():
digest.parse_callback("isle-of-wight-latest.osm", handler)
generator.set_callback_function(func)
possible_node_tags = set()
possible_way_tags = set()
possible_relation_tags = set()
with generator:
start = datetime.datetime.now()
for x in generator:
if isinstance(x, digest.OSM) or isinstance(x, digest.Bounds):
print(x)
elif isinstance(x, digest.Node):
for key in x.tags.keys():
possible_node_tags.add(key)
elif isinstance(x, digest.Way):
for key in x.tags.keys():
possible_way_tags.add(key)
elif isinstance(x, digest.Relation):
for key in x.tags.keys():
possible_relation_tags.add(key)
print("Took {}".format(datetime.datetime.now()-start))
len(possible_node_tags), len(possible_way_tags), len(possible_relation_tags)
gen = digest.parse("isle-of-wight-latest.osm")
osm, bounds = next(gen), next(gen)
osm, bounds
minlon, maxlon, minlat, maxlat = None, None, None, None
nodes, ways, relations = [], [], []
for el in digest.parse("isle-of-wight-latest.osm"):
if el.name == "node":
nodes.append(el)
if minlon is None or el.longitude < minlon:
minlon = el.longitude
if maxlon is None or el.longitude > maxlon:
maxlon = el.longitude
if minlat is None or el.latitude < minlat:
minlat = el.latitude
if maxlat is None or el.latitude > maxlat:
maxlat = el.latitude
elif el.name == "way":
ways.append(el)
elif el.name == "relation":
relations.append(el)
minlon, maxlon, minlat, maxlat
node_ids_in_bb = { node.osm_id for node in nodes if
node.longitude >= bounds.min_longitude and node.longitude <= bounds.max_longitude
and node.latitude >= bounds.min_latitude and node.latitude <= bounds.max_latitude }
node_ids = { node.osm_id for node in nodes }
way_ids = { way.osm_id for way in ways }
# Find ways which feature no node in the bounding box
for way in ways:
if not any(noderef in node_ids_in_bb for noderef in way.nodes):
print(way)
# Check that all nodes in each way are in the dataset
for way in ways:
if not all(noderef in node_ids for noderef in way.nodes):
print(way)
relations = {rel.osm_id : rel for rel in relations}
# Find relations which feature a member not in the dataset
for rel in relations.values():
failed = False
for member in rel.members:
if member.type == "node" and member.ref not in node_ids:
failed = True
elif member.type == "way" and member.ref not in way_ids:
failed = True
elif member.type == "relation" and member.ref not in relations:
failed = True
if failed:
print(rel.osm_id)
def relation_has_member_in_bb(rel):
for member in rel.members:
if member.type == "node" and member.ref in node_ids_in_bb:
return True
elif member.type == "way" and member.ref in way_ids:
return True
elif member.type == "relation" and member.ref in relations:
return relation_has_member_in_bb(relations[member.ref])
return False
all(relation_has_member_in_bb(rel) for rel in relations.values())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preparation
Step2: The file Seasons_Stats.csv contains the statics of all players since 1950. First, we drop a couple of blank columns, and the "Tm" column, that contains the team.
Step3: A second file, players.csv, contains static information for each player, as height, weight, etc.
Step4: We merge both tables, and do some data cleaning
Step5: We will train a neural network with this data, to try to predict the position of each player.
Step6: Neural network training
Step7: The model performs well both for the validation and the test sets (65% might not seem a lot, but it is satisfying enough for our problem, where all the labels are very subjective (Was Larry Bird a "SM-PF" or a "PF-SF"? Nobody can tell).
Step8: Predicting the positions of the First NBA Team of 2017
Step9: The model gets right four of the five. It's even more interesting that the one that gets wrong, Anthony Davis, can play in both PF and C positions, and that in the last season, he played more as a Power Forward than as a Center, as the model predicts
Step10: The model gets right most of the players, and the errors are always for a contiguous position (it is interesting that the model gets this right without having been provided with any information about the distances between the labels.)
Step11: Curry is labeled as a point guard (his real position) from 1973 until today, and as a shooting guard before that. Perhaps because of his heigh (191cm), or perhaps because he is too much of a scorer. Bob Pettit is labeled as a center until 1967, and as a power forward after that (he played both roles, but nowadays he would have difficulties to play as a center, and would be for sure a forward, perhaps even a small forward).
Step12: The model is able to detect the conversion of Jordan into a forward at the end of his career, but not the return of Magic as a power forward. Also, in his rookie season, he is classified as a small forward instead of as a shooting guard (Magic was clearly and outlier in the data, a 205cm point guard who could easily play in the five positions. It is even surprising that is properly labelled as a point guard during most of his career)
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
stats = pd.read_csv(r'data/Seasons_Stats.csv', index_col=0)
stats = pd.read_csv(r'data/Seasons_Stats.csv', index_col=0)
stats_clean = stats.drop(['blanl', 'blank2', 'Tm'], axis=1)
stats_clean.head()
players = pd.read_csv(r'data/players.csv', index_col=0)
players.head(10)
data = pd.merge(stats_clean, players[['Player', 'height', 'weight']], left_on='Player', right_on='Player', right_index=False,
how='left', sort=False).fillna(value=0)
data = data[~(data['Pos']==0) & (data['MP'] > 200)]
data.reset_index(inplace=True, drop=True)
data['Player'] = data['Player'].str.replace('*','')
totals = ['PER', 'OWS', 'DWS', 'WS', 'OBPM', 'DBPM', 'BPM', 'VORP', 'FG', 'FGA', '3P', '3PA', '2P', '2PA', 'FT', 'FTA',
'ORB', 'DRB', 'TRB', 'AST', 'STL', 'BLK', 'TOV', 'PF', 'PTS']
for col in totals:
data[col] = 36 * data[col] / data['MP']
data.tail()
X = data.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = data['Pos'].as_matrix()
encoder = LabelBinarizer()
y_cat = encoder.fit_transform(y)
nlabels = len(encoder.classes_)
scaler =StandardScaler()
Xnorm = scaler.fit_transform(X)
stats2017 = (data['Year'] == 2017)
X_train = Xnorm[~stats2017]
y_train = y_cat[~stats2017]
X_test = Xnorm[stats2017]
y_test = y_cat[stats2017]
model = Sequential()
model.add(Dense(40, activation='relu', input_dim=46))
model.add(Dropout(0.5))
model.add(Dense(30, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nlabels, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
# x_train and y_train are Numpy arrays --just like in the Scikit-Learn API.
model.fit(X_train, y_train, epochs=200, batch_size=128, validation_split=0.2, verbose=1)
model.test_on_batch(X_test, y_test, sample_weight=None)
# Production model, using all data
model.fit(X_train, y_train, epochs=200, batch_size=128, validation_split=0, verbose=1)
first_team_members = ['Russell Westbrook', 'James Harden', 'Anthony Davis', 'LeBron James', 'Kawhi Leonard']
first_team_stats = data[[((x[1]['Player'] in first_team_members) & (x[1]['Year']==2017)) for x in data.iterrows()]]
first_team_stats
pd.DataFrame(index=first_team_stats.loc[:, 'Player'].values, data={'Real': first_team_stats.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm[first_team_stats.index, :]))})
mvp = [(1956, 'Bob Pettit'), (1957, 'Bob Cousy'), (1958, 'Bill Russell'), (1959, 'Bob Pettit'),
(1960, 'Wilt Chamberlain'), (1961, 'Bill Russell'), (1962, 'Bill Russell'), (1963, 'Bill Russell'),
(1964, 'Oscar Robertson'), (1965, 'Bill Russell'), (1966, 'Wilt Chamberlain'), (1967, 'Wilt Chamberlain'),
(1968, 'Wilt Chamberlain'), (1969, 'Wes Unseld'), (1970, 'Willis Reed'), (1971, 'Lew Alcindor'),
(1972, 'Kareem Abdul-Jabbar'), (1973, 'Dave Cowens'), (19704, 'Kareem Abdul-Jabbar'), (1975, 'Bob McAdoo'),
(1976, 'Kareem Abdul-Jabbar'), (1977, 'Kareem Abdul-Jabbar'), (1978, 'Bill Walton'), (1979, 'Moses Malone'),
(1980, 'Kareem Abdul-Jabbar'), (1981, 'Julius Erving'), (1982, 'Moses Malone'), (1983, 'Moses Malone'),
(1984, 'Larry Bird'), (1985, 'Larry Bird'), (1986, 'Larry Bird'), (1987, 'Magic Johnson'),
(1988, 'Michael Jordan'), (1989, 'Magic Johnson'), (1990, 'Magic Johnson'), (1991, 'Michael Jordan'),
(1992, 'Michael Jordan'), (1993, 'Charles Barkley'), (1994, 'Hakeem Olajuwon'), (1995, 'David Robinson'),
(1996, 'Michael Jordan'), (1997, 'Karl Malone'), (1998, 'Michael Jordan'), (1999, 'Karl Malone'),
(2000, 'Shaquille O\'Neal'), (2001, 'Allen Iverson'), (2002, 'Tim Duncan'), (2003, 'Tim Duncan'),
(2004, 'Kevin Garnett'), (2005, 'Steve Nash'), (2006, 'Steve Nash'), (2007, 'Dirk Nowitzki'),
(2008, 'Kobe Bryant'), (2009, 'LeBron James'), (2010, 'LeBron James'), (2011, 'Derrick Rose'),
(2012, 'LeBron James'), (2013, 'LeBron James'), (2014, 'Kevin Durant'), (2015, 'Stephen Curry'),
(2016, 'Stephen Curry')]
mvp_stats = pd.concat([data[(data['Player'] == x[1]) & (data['Year']==x[0])] for x in mvp], axis=0)
mvp_stats
mvp_pred = pd.DataFrame(index=mvp_stats.loc[:, 'Player'].values, data={'Real': mvp_stats.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm[mvp_stats.index, :]))})
mvp_pred
curry2017 = data[(data['Player'] == 'Stephen Curry') & (data['Year']==2017)]
pettit1956 = data[(data['Player'] == 'Bob Pettit') & (data['Year']==1956)]
time_travel_curry = pd.concat([curry2017 for year in range(1956, 2018)], axis=0)
time_travel_curry['Year'] = range(1956, 2018)
X = time_travel_curry.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = time_travel_curry['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
time_travel_curry_pred = pd.DataFrame(index=time_travel_curry.loc[:, 'Year'].values,
data={'Real': time_travel_curry.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
time_travel_pettit = pd.concat([pettit1956 for year in range(1956, 2018)], axis=0)
time_travel_pettit['Year'] = range(1956, 2018)
X = time_travel_pettit.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = time_travel_pettit['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
time_travel_pettit_pred = pd.DataFrame(index=time_travel_pettit.loc[:, 'Year'].values,
data={'Real': time_travel_pettit.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
pd.concat([time_travel_curry_pred,time_travel_pettit_pred],axis=1,keys=['Stephen Curry','Bob Pettit'])
magic = data[(data['Player'] == 'Magic Johnson')]
jordan = data[(data['Player'] == 'Michael Jordan')]
# Magic
X = magic.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = magic['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
magic_pred = pd.DataFrame(index=magic.loc[:, 'Age'].values,
data={'Real': magic.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
# Jordan
X = jordan.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = jordan['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
jordan_pred = pd.DataFrame(index=jordan.loc[:, 'Age'].values,
data={'Real': jordan.loc[:, 'Pos'].values,
'Predicted':encoder.inverse_transform(model.predict(Xnorm))})
pd.concat([magic_pred,jordan_pred],axis=1,keys=['Magic Johnson','Michael Jordan'])
first_team_stats
multiplier = np.arange(0.8,1.2,0.02)
growing_predicted = []
for p in first_team_stats.iterrows():
growing = pd.concat([p[1].to_frame().T for x in multiplier], axis=0)
growing['height'] = growing['height'] * multiplier
growing['weight'] = growing['weight'] * (multiplier ** 3)
X = growing.drop(['Player', 'Pos', 'G', 'GS', 'MP'], axis=1).as_matrix()
y = growing['Pos'].as_matrix()
y_cat = encoder.transform(y)
Xnorm = scaler.transform(X)
growing_predicted.append(pd.DataFrame(index=multiplier, data={'height': growing.loc[:, 'height'].values,
'Real': growing.loc[:, 'Pos'].values, 'Predicted':encoder.inverse_transform(model.predict(Xnorm))}))
pd.concat(growing_predicted,axis=1,keys=first_team_stats['Player'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The vocabulary is used at transform time to build the occurrence matrix
Step2: Let's refit with a slightly larger corpus
Step3: The vocabulary_ is the (logarithmically) growing with the size of the training corpus. Note that we could not have built the vocabularies in parallel on the 2 text documents as they share some words hence would require some kind of shared datastructure or synchronization barrier which is complicated to setup, especially if we want to distribute the processing on a cluster.
Step4: The Sentiment 140 Dataset
Step5: Those files were downloaded from the research archive of the http
Step6: Let's parse the CSV files and load everything in memory. As loading everything can take up to 2GB, let's limit the collection to 100K tweets of each (positive and negative) out of the total of 1.6M tweets.
Step7: Let's display the first samples
Step8: A polarity of "0" means negative while a polarity of "4" means positive. All the positive tweets are at the end of the file
Step9: Let's split the training CSV file into a smaller training set and a validation set with 100k random tweets each
Step10: Let's open the manually annotated tweet files. The evaluation set also has neutral tweets with a polarity of "2" which we ignore. We can build the final evaluation set with only the positive and negative tweets of the evaluation CSV file
Step11: The Hashing Trick
Step12: This mapping is completely stateless and the dimensionality of the output space is explicitly fixed in advance (here we use a modulo 2 ** 20 which means roughly 1M dimensions). The makes it possible to workaround the limitations of the vocabulary based vectorizer both for parallelizability and online / out-of-core learning.
Step13: It shares the same "preprocessor", "tokenizer" and "analyzer" infrastructure
Step14: We can vectorize our datasets into a scipy sparse matrix exactly as we would have done with the CountVectorizer or TfidfVectorizer, except that we can directly call the transform method
Step15: The dimension of the output is fixed ahead of time to n_features=2 ** 20 by default (nearly 1M features) to minimize the rate of collision on most classification problem while having reasonably sized linear models (1M weights in the coef_ attribute)
Step16: As only the non-zero elements are stored, n_features has little impact on the actual size of the data in memory. We can combine the hashing vectorizer with a Passive-Aggressive linear model in a pipeline
Step17: Let's check that the score on the validation set is reasonably in line with the set of manually annotated tweets
Step18: As the text_train_small dataset is not that big we can still use a vocabulary based vectorizer to check that the hashing collisions are not causing any significant performance drop on the validation set (WARNING this is twice as slow as the hashing vectorizer version, skip this cell if your computer is too slow)
Step19: We get almost the same score but almost twice as slower with also a big, slow to (un)pickle datastructure in memory
Step21: More info and reference for the original papers on the Hashing Trick in the answers to this http
Step22: We can now use our infinte tweet source to train an online machine learning algorithm using the hashing vectorizer. Note the use of the partial_fit method of the PassiveAggressiveClassifier instance in place of the traditional call to the fit method that needs access to the full training set.
Step23: We can now plot the collected validation score values, versus the number of samples generated by the infinite source and feed to the model
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
])
vectorizer.vocabulary_
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
vectorizer = CountVectorizer(min_df=1)
vectorizer.fit([
"The cat sat on the mat.",
"The quick brown fox jumps over the lazy dog.",
])
vectorizer.vocabulary_
X = vectorizer.transform([
"The cat sat on the mat.",
"This cat is a nice cat.",
]).toarray()
print(len(vectorizer.vocabulary_))
print(vectorizer.get_feature_names())
print(X)
import os
sentiment140_folder = os.path.join('datasets', 'sentiment140')
training_csv_file = os.path.join(sentiment140_folder, 'training.1600000.processed.noemoticon.csv')
testing_csv_file = os.path.join(sentiment140_folder, 'testdata.manual.2009.06.14.csv')
!ls -lh datasets/sentiment140/training.1600000.processed.noemoticon.csv
FIELDNAMES = ('polarity', 'id', 'date', 'query', 'author', 'text')
def read_csv(csv_file, fieldnames=FIELDNAMES, max_count=None,
n_partitions=1, partition_id=0):
import csv # put the import inside for use in IPython.parallel
def file_opener(csv_file):
try:
open(csv_file, 'r', encoding="latin1").close()
return open(csv_file, 'r', encoding="latin1")
except TypeError:
# Python 2 does not have encoding arg
return open(csv_file, 'rb')
texts = []
targets = []
with file_opener(csv_file) as f:
reader = csv.DictReader(f, fieldnames=fieldnames,
delimiter=',', quotechar='"')
pos_count, neg_count = 0, 0
for i, d in enumerate(reader):
if i % n_partitions != partition_id:
# Skip entry if not in the requested partition
continue
if d['polarity'] == '4':
if max_count and pos_count >= max_count / 2:
continue
pos_count += 1
texts.append(d['text'])
targets.append(1)
elif d['polarity'] == '0':
if max_count and neg_count >= max_count / 2:
continue
neg_count += 1
texts.append(d['text'])
targets.append(-1)
return texts, targets
%time text_train_all, target_train_all = read_csv(training_csv_file, max_count=200000)
len(text_train_all), len(target_train_all)
for text in text_train_all[:3]:
print(text + "\n")
print(target_train_all[:3])
for text in text_train_all[-3:]:
print(text + "\n")
print(target_train_all[-3:])
from sklearn.cross_validation import train_test_split
text_train_small, text_validation, target_train_small, target_validation = train_test_split(
text_train_all, np.array(target_train_all), test_size=.5, random_state=42)
len(text_train_small)
(target_train_small == -1).sum(), (target_train_small == 1).sum()
len(text_validation)
(target_validation == -1).sum(), (target_validation == 1).sum()
text_test_all, target_test_all = read_csv(testing_csv_file)
len(text_test_all), len(target_test_all)
from sklearn.utils.murmurhash import murmurhash3_bytes_u32
# encode for python 3 compatibility
for word in "the cat sat on the mat".encode("utf-8").split():
print("{0} => {1}".format(
word, murmurhash3_bytes_u32(word, 0) % 2 ** 20))
from sklearn.feature_extraction.text import HashingVectorizer
h_vectorizer = HashingVectorizer(encoding='latin-1')
h_vectorizer
analyzer = h_vectorizer.build_analyzer()
analyzer('This is a test sentence.')
%time X_train_small = h_vectorizer.transform(text_train_small)
X_train_small
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.pipeline import Pipeline
h_pipeline = Pipeline((
('vec', HashingVectorizer(encoding='latin-1')),
('clf', PassiveAggressiveClassifier(C=1, n_iter=1)),
))
%time h_pipeline.fit(text_train_small, target_train_small).score(text_validation, target_validation)
h_pipeline.score(text_test_all, target_test_all)
from sklearn.feature_extraction.text import TfidfVectorizer
vocabulary_vec = TfidfVectorizer(encoding='latin-1', use_idf=False)
vocabulary_pipeline = Pipeline((
('vec', vocabulary_vec),
('clf', PassiveAggressiveClassifier(C=1, n_iter=1)),
))
%time vocabulary_pipeline.fit(text_train_small, target_train_small).score(text_validation, target_validation)
len(vocabulary_vec.vocabulary_)
from random import Random
class InfiniteStreamGenerator(object):
Simulate random polarity queries on the twitter streaming API
def __init__(self, texts, targets, seed=0, batchsize=100):
self.texts_pos = [text for text, target in zip(texts, targets)
if target > 0]
self.texts_neg = [text for text, target in zip(texts, targets)
if target <= 0]
self.rng = Random(seed)
self.batchsize = batchsize
def next_batch(self, batchsize=None):
batchsize = self.batchsize if batchsize is None else batchsize
texts, targets = [], []
for i in range(batchsize):
# Select the polarity randomly
target = self.rng.choice((-1, 1))
targets.append(target)
# Combine 2 random texts of the right polarity
pool = self.texts_pos if target > 0 else self.texts_neg
text = self.rng.choice(pool) + " " + self.rng.choice(pool)
texts.append(text)
return texts, targets
infinite_stream = InfiniteStreamGenerator(text_train_small, target_train_small)
texts_in_batch, targets_in_batch = infinite_stream.next_batch(batchsize=3)
for t in texts_in_batch:
print(t + "\n")
targets_in_batch
n_batches = 1000
validation_scores = []
training_set_size = []
# Build the vectorizer and the classifier
h_vectorizer = HashingVectorizer(encoding='latin-1')
clf = PassiveAggressiveClassifier(C=1)
# Extract the features for the validation once and for all
X_validation = h_vectorizer.transform(text_validation)
classes = np.array([-1, 1])
n_samples = 0
for i in range(n_batches):
texts_in_batch, targets_in_batch = infinite_stream.next_batch()
n_samples += len(texts_in_batch)
# Vectorize the text documents in the batch
X_batch = h_vectorizer.transform(texts_in_batch)
# Incrementally train the model on the new batch
clf.partial_fit(X_batch, targets_in_batch, classes=classes)
if n_samples % 100 == 0:
# Compute the validation score of the current state of the model
score = clf.score(X_validation, target_validation)
validation_scores.append(score)
training_set_size.append(n_samples)
if i % 100 == 0:
print("n_samples: {0}, score: {1:.4f}".format(n_samples, score))
plt.plot(training_set_size, validation_scores)
plt.ylim(0.5, 1)
plt.xlabel("Number of samples")
plt.ylabel("Validation score")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each SymmetryOperation has an optional label attribute. This can be set at records the matrix representation of the symmetry operation and an optional label. We can provide the label when creating a SymmetryOperation
Step2: or set it afterwards
Step3: Or for $C_3$
Step4: Vector representations of symmetry operations
Step5: The vector representation of a SymmetryOperation can be accessed using the as_vector() method.
Step6: Inverting symmetry operations
Step7: The product of $C_3$ and $C_3^\prime$ is the identity, $E$.
Step8: <img src="figures/triangular_c3_inversion.pdf" />
Step9: The resulting SymmetryOperation does not have a label defined. This can be set directly, or by chaining the .set_label() method, e.g.
Step10: The SymmetryGroup class
Step11: <img src="figures/triangular_c3v_symmetry_operations.pdf" />
Step12: The ConfigurationSpace class
Step13: The Configuration class
Step14: The effect of a particular symmetry operation acting on a configuration can now be calculated using the SymmetryOperation.operate_on() method, or by direct multiplication, e.g.
Step15: <img src="figures/triangular_rotation_operation.pdf" />
Step16: This ConfigurationSpace has been created without a symmetry_group argument. The default behaviour in this case is to create a SymmetryGroup containing only the identity, $E$.
Step17: We can now calculate all symmetry inequivalent arrangements where two sites are occupied and two are unoccupied, using the unique_configurations() method. This takes as a argument a dict with the numbers of labels to be arranged in the configuration space. Here, we use the labels 1 and 0 to represent occupied and unoccupied sites, respectively, and the distribution of sites is given by { 1
Step18: Because we have not yet taken into account the symmetry of the configuration space, we get
Step19: We can also calculate the result when all symmetry operations of this configuration space are included.
Step20: Taking symmetry in to account, we now only have two unique configurations
Step21: <img src="figures/square_unique_configurations_2.pdf">
Step22: We can use the bsym.interface.pymatgen.unique_structure_substitutions() function to identify symmetry-inequivalent structures generated by substituting at different sites.
Step23: As a trivial example, when substituting one Li atom for Na, we get a single unique structure
Step24: <img src="figures/pymatgen_example_one_site.pdf">
Step25: This Li$\to$Na substitution breaks the symmetry of the $4\times4$ supercell.
Step26: number_of_equivalent_configurations only lists the number of equivalent configurations found when performing the second substitution, when the list of structures unique_structures_with_Mg was created. The full configuration degeneracy relative to the initial empty 4×4 lattice can be queried using full_configuration_degeneracy.
Step27: <img src="figures/pymatgen_example_two_sites.pdf">
Step28: This double substitution can also be done in a single step
Step29: Because both substitutions were performed in a single step, number_of_equivalent_configurations and full_configuration_degeneracy now contain the same data
Step30: Constructing SpaceGroup and ConfigurationSpace objects using pymatgen
Step31: Documentation
Step32: Progress bars
|
<ASSISTANT_TASK:>
Python Code:
from bsym import SymmetryOperation
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]], label='E' )
e = SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
e.label = 'E'
e
c_3 = SymmetryOperation( [ [ 0, 0, 1 ],
[ 1, 0, 0 ],
[ 0, 1, 0 ] ], label='C3' )
c_3
c_3_from_vector = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_from_vector
c_3.as_vector()
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C3_inv' )
print( c_3, '\n' )
print( c_3_inv, '\n' )
c_3 * c_3_inv
c_3.invert()
c_3.invert( label= 'C3_inv')
c_3.invert().set_label( 'C3_inv' )
from bsym import PointGroup
# construct SymmetryOperations for C_3v group
e = SymmetryOperation.from_vector( [ 1, 2, 3 ], label='e' )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C_3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C_3_inv' )
sigma_a = SymmetryOperation.from_vector( [ 1, 3, 2 ], label='S_a' )
sigma_b = SymmetryOperation.from_vector( [ 3, 2, 1 ], label='S_b' )
sigma_c = SymmetryOperation.from_vector( [ 2, 1, 3 ], label='S_c' )
c3v = PointGroup( [ e, c_3, c_3_inv, sigma_a, sigma_b, sigma_c ] )
c3v
from bsym import ConfigurationSpace
c = ConfigurationSpace( objects=['a', 'b', 'c' ], symmetry_group=c3v )
c
from bsym import Configuration
conf_1 = Configuration( [ 1, 1, 0 ] )
conf_1
c1 = Configuration( [ 1, 1, 0 ] )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ] )
c_3.operate_on( c1 )
c_3 * conf_1
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ] ) # four vector configuration space
c
c.unique_configurations( {1:2, 0:2} )
[ uc.count for uc in c.unique_configurations( {1:2, 0:2} ) ]
# construct point group
e = SymmetryOperation.from_vector( [ 1, 2, 3, 4 ], label='E' )
c4 = SymmetryOperation.from_vector( [ 2, 3, 4, 1 ], label='C4' )
c4_inv = SymmetryOperation.from_vector( [ 4, 1, 2, 3 ], label='C4i' )
c2 = SymmetryOperation.from_vector( [ 3, 4, 1, 2 ], label='C2' )
sigma_x = SymmetryOperation.from_vector( [ 4, 3, 2, 1 ], label='s_x' )
sigma_y = SymmetryOperation.from_vector( [ 2, 1, 4, 3 ], label='s_y' )
sigma_ac = SymmetryOperation.from_vector( [ 1, 4, 3, 2 ], label='s_ac' )
sigma_bd = SymmetryOperation.from_vector( [ 3, 2, 1, 4 ], label='s_bd' )
c4v = PointGroup( [ e, c4, c4_inv, c2, sigma_x, sigma_y, sigma_ac, sigma_bd ] )
# create ConfigurationSpace with the c4v PointGroup.
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ], symmetry_group=c4v )
c
c.unique_configurations( {1:2, 0:2} )
[ uc.count for uc in c.unique_configurations( {1:2, 0:2 } ) ]
c.unique_configurations( {2:1, 1:1, 0:2} )
[ uc.count for uc in c.unique_configurations( {2:1, 1:1, 0:2 } ) ]
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
import numpy as np
# construct a pymatgen Structure instance using the site fractional coordinates
coords = np.array( [ [ 0.0, 0.0, 0.0 ] ] )
atom_list = [ 'Li' ]
lattice = Lattice.from_parameters( a=1.0, b=1.0, c=1.0, alpha=90, beta=90, gamma=90 )
parent_structure = Structure( lattice, atom_list, coords ) * [ 4, 4, 1 ]
parent_structure.cart_coords.round(2)
from bsym.interface.pymatgen import unique_structure_substitutions
print( unique_structure_substitutions.__doc__ )
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Na':1, 'Li':15 } )
len( unique_structures )
na_substituted = unique_structures[0]
unique_structures_with_Mg = unique_structure_substitutions( na_substituted, 'Li', { 'Mg':1, 'Li':14 } )
len( unique_structures_with_Mg )
[ s.number_of_equivalent_configurations for s in unique_structures_with_Mg ]
[ s.full_configuration_degeneracy for s in unique_structures_with_Mg ]
# Check the squared distances between the Na and Mg sites in these unique structures are [1, 2, 4, 5, 8]
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] )**2 for s in unique_structures_with_Mg ] ) )
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Mg':1, 'Na':1, 'Li':14 } )
len(unique_structures)
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] ) for s in unique_structures ] ) )**2
[ s.number_of_equivalent_configurations for s in unique_structures ]
[ s.full_configuration_degeneracy for s in unique_structures ]
from bsym.interface.pymatgen import ( space_group_symbol_from_structure,
space_group_from_structure,
configuration_space_from_structure )
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.5, 0.0 ],
[ 0.0, 0.5, 0.5 ],
[ 0.5, 0.0, 0.5 ] ] )
atom_list = [ 'Li' ] * len( coords )
lattice = Lattice.from_parameters( a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90 )
structure = Structure( lattice, atom_list, coords )
space_group_symbol_from_structure( structure )
space_group_from_structure( structure )
configuration_space_from_structure( structure )
a = 3.798 # lattice parameter
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.0, 0.0 ],
[ 0.0, 0.5, 0.0 ],
[ 0.0, 0.0, 0.5 ] ] )
atom_list = [ 'Ti', 'X', 'X', 'X' ]
lattice = Lattice.from_parameters( a=a, b=a, c=a, alpha=90, beta=90, gamma=90 )
unit_cell = Structure( lattice, atom_list, coords )
parent_structure = unit_cell * [ 2, 2, 2 ]
unique_structures = unique_structure_substitutions( parent_structure, 'X', { 'O':8, 'F':16 },
show_progress='notebook' )
%load_ext version_information
%version_information bsym, numpy, jupyter, pymatgen, tqdm
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Repeat simulation without notification
Step3: File helpers for reading and writing
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from __future__ import print_function
import tellurium as te
# to get the tellurium version use
print('te.__version__')
print(te.__version__)
# or
print('te.getTelluriumVersion()')
print(te.getTelluriumVersion())
# to print the full version info use
print('-' * 80)
te.printVersionInfo()
print('-' * 80)
from builtins import range
# Load SBML file
r = te.loada(
model test
J0: X0 -> X1; k1*X0;
X0 = 10; X1=0;
k1 = 0.2
end
)
import matplotlib.pyplot as plt
# Turn off notices so they don't clutter the output
te.noticesOff()
for i in range(0, 20):
result = r.simulate (0, 10)
r.reset()
r.plot(result, loc=None, show=False,
linewidth=2.0, linestyle='-', color='black', alpha=0.8)
r.k1 = r.k1 + 0.2
# Turn the notices back on
te.noticesOn()
import tellurium as te
# create tmp file
import tempfile
ftmp = tempfile.NamedTemporaryFile(suffix=".xml")
# load model
r = te.loada('S1 -> S2; k1*S1; k1 = 0.1; S1 = 10')
# save to file
te.saveToFile(ftmp.name, r.getMatlab())
# or easier via
r.exportToMatlab(ftmp.name)
# load file
matlabstr = te.readFromFile(ftmp.name)
print('%' + '*'*80)
print('Converted MATLAB code')
print('%' + '*'*80)
print(matlabstr[1531:2000])
print('...')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import test image. The colormap is Matlab's Jet
Step2: Reduce number of colours
Step3: Convert from RGB to HSL, get unique values of H, S, and L then sort both lightness L and hue H, by increasing values of H
Step4: Import a function to plot colored lines in the final plot using the colormap created above
Step5: Make final plot of the sorted hue, H versus lightness, L, colored by L
Step6: Run perceptual test checks for monotonicity
Step7: Now we try it on an abstract rainbow image
Step8: Try it on mycarta perceptual rainbow
Step9: The test should have worked but it did not.
Step10: From Matt Hall
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, io, segmentation, color
from skimage.future import graph
%matplotlib inline
import requests
from PIL import Image
from io import StringIO
url = 'https://mycarta.files.wordpress.com/2015/04/jet_tight.png'
r = requests.get(url)
img = np.asarray(Image.open(StringIO(r.content)).convert('RGB'))
img = np.asarray(Image.open('data/cbar/test.png'))[...,:3]
# plot image
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(img)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
# parameters chosen by trial and error. Will have to find a way to automatically optimize them
labels1 = segmentation.slic(img, compactness=30, n_segments=32)
out1 = color.label2rgb(labels1, img, kind='avg')
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(out1)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
width, height, dump = np.shape(out1)
print(width, height, dump)
# method 1
# extract lightness and hue, combine them into a 2D array
# extract
from skimage.color import rgb2lab, lab2lch, lch2lab, lab2rgb
lab = rgb2lab(out1)
lch = lab2lch(lab)
lab = np.asarray(lab)
lch = np.asarray(lch)
# reshape
pixels_lab = np.reshape(lab, (width*height, -1))
l1, a, b = np.split(pixels_lab, 3, axis=-1)
pixels_lch = np.reshape(lch, (width*height, -1))
l2, c, h = np.split(pixels_lch, 3, axis=-1)
# flatten
import itertools
lM = list(itertools.chain.from_iterable(l2))
hM = list(itertools.chain.from_iterable(h))
# zip together to make 2D numpy array
lhM = np.asarray(list(zip(hM,lM)))
lhM
# Sorting unique rows
# Joe Kington's answer on Stackoverflow: http://stackoverflow.com/a/16971224
def unique_rows(data):
print(data.shape)
uniq = np.unique(data.view(data.dtype.descr * data.shape[1]))
return uniq.view(data.dtype).reshape(-1, data.shape[1])
uniqLM = unique_rows(lhM)
uniqLM.shape
# method 2
# sorting both lightness and hue by hue separately
from skimage.color import rgb2lab, lab2lch, lch2lab, lab2rgb
lab = rgb2lab(out1)
lch = lab2lch(lab)
lab = np.asarray(lab)
lch = np.asarray(lch)
pixels_lab = np.reshape(lab, (width*height, -1))
l1, a, b = np.split(pixels_lab, 3, axis=-1)
pixels_lch = np.reshape(lch, (width*height, -1))
l2, c, h = np.split(pixels_lch, 3, axis=-1)
huniq, unidx = np.unique(h, return_index=True)
luniq = l2[unidx]
cuniq = c[unidx]
# flatten luniq, cuniq
import itertools
luniqM = list(itertools.chain.from_iterable(luniq))
cuniqM = list(itertools.chain.from_iterable(cuniq))
# compare output of two methods
lhM2 = np.asarray(list(zip(huniq,luniqM)))
print('method 2')
print(' H L')
print(lhM2[:4])
print(lhM2[-4:])
print('method 1')
print(' H L')
print(uniqLM[:4])
print(uniqLM[-4:])
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
# Data manipulation:
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates, in the correct format for LineCollection:
an array of the form numlines x (points per line) x 2 (x and y) array
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
# Interface to LineCollection:
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0), linewidth=3, alpha=1.0):
'''
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
'''
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = LineCollection(segments, array=z, cmap=cmap, norm=norm, linewidth=linewidth, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
def clear_frame(ax=None):
# Taken from a post by Tony S Yu
if ax is None:
ax = plt.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
for spine in ax.spines.itervalues():
spine.set_visible(False)
# To color by L, it has to be normalized to [0 1]
luniqM_n=(luniqM-min(luniqM))/(max(luniqM)-min(luniqM))
fig = plt.figure(figsize=(16,4))
plt.xticks(np.arange(0, 2.25*np.pi,0.25*np.pi),[0., 45., 90., 135., 180., 225., 270., 315., 360.])
# Hue as 0-360 angle
ax1 = fig.add_subplot(111)
# ax1.scatter(huniq, luniq)
colorline(huniq,luniqM, luniqM_n, linewidth=4,cmap='gray')
ax1.set_xlim(0, 2.25*np.pi)
ax1.set_ylim(-5, 105)
ax1.text(5, 95, 'H vs. L - colored by L', va='top')
plt.show()
# as of Nov 05, 2015 after reinstalling Anaconda, using Colorline gives a future warning:
# https://github.com/dpsanders/matplotlib-examples/issues/1
# rerun above to suppress warning
# Stackoverflow answer http://stackoverflow.com/a/4985520
def pairwise(seq):
items = iter(seq)
last = next(items)
for item in items:
yield last, item
last = item
def strictly_increasing(L):
return all(x<y for x, y in pairwise(L))
def strictly_decreasing(L):
return all(x>y for x, y in pairwise(L))
def non_increasing(L):
return all(x>=y for x, y in pairwise(L))
def non_decreasing(L):
return all(x<=y for x, y in pairwise(L))
print(strictly_increasing(luniq))
print(non_decreasing(luniq))
print(strictly_decreasing(luniq))
print(non_increasing(luniq))
# Originally from: http://bgfons.com/upload/rainbow_texture1761.jpg. Resized and saved as png
url = 'https://mycarta.files.wordpress.com/2015/11/rainbow_texture17611.png'
r = requests.get(url)
img = np.asarray(Image.open(StringIO(r.content)).convert('RGB'))
# plot image
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(img)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
labels1 = segmentation.slic(img, compactness=30, n_segments=32)
out1 = color.label2rgb(labels1, img, kind='avg')
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(out1)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
width, height, dump = np.shape(out1)
# method 2
# sorting both lightness and hue by hue separately
from skimage.color import rgb2lab, lab2lch, lch2lab, lab2rgb
lab = rgb2lab(out1)
lch = lab2lch(lab)
lab = np.asarray(lab)
lch = np.asarray(lch)
pixels_lab = np.reshape(lab, (width*height, -1))
l1, a, b = np.split(pixels_lab, 3, axis=-1)
pixels_lch = np.reshape(lch, (width*height, -1))
l2, c, h = np.split(pixels_lch, 3, axis=-1)
huniq, unidx = np.unique(h, return_index=True)
luniq = l2[unidx]
cuniq = c[unidx]
# flatten luniq, cuniq
import itertools
luniqM = list(itertools.chain.from_iterable(luniq))
cuniqM = list(itertools.chain.from_iterable(cuniq))
# To color by L, it has to be normalized to [0 1]
luniqM_n=(luniqM-min(luniqM))/(max(luniqM)-min(luniqM))
fig = plt.figure(figsize=(8,4))
plt.xticks(np.arange(0, 2.25*np.pi,0.25*np.pi),[0., 45., 90., 135., 180., 225., 270., 315., 360.])
# Hue as 0-360 angle
ax1 = fig.add_subplot(111)
# ax1.scatter(huniq, luniq)
colorline(huniq,luniqM, luniqM_n, linewidth=4,cmap='gray')
ax1.set_xlim(0, 2.25*np.pi)
ax1.set_ylim(-5, 105)
ax1.text(5, 95, 'H vs. L - colored by L', va='top')
plt.show()
print strictly_increasing(luniq)
print non_decreasing(luniq)
print strictly_decreasing(luniq)
print non_increasing(luniq)
url = 'https://mycarta.files.wordpress.com/2015/04/cubic_no_red_tight.png'
r = requests.get(url)
img = np.asarray(Image.open(StringIO(r.content)).convert('RGB'))
# plot image
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(img)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
labels1 = segmentation.slic(img, compactness=30, n_segments=32)
out1 = color.label2rgb(labels1, img, kind='avg')
fig = plt.figure(figsize=(6,6))
ax1 = fig.add_subplot(111)
plt.imshow(out1)
ax1.xaxis.set_ticks([])
ax1.yaxis.set_ticks([])
plt.show()
width, height, dump = np.shape(out1)
# method 2
# sorting both lightness and hue by hue separately
from skimage.color import rgb2lab, lab2lch, lch2lab, lab2rgb
lab = rgb2lab(out1)
lch = lab2lch(lab)
lab = np.asarray(lab)
lch = np.asarray(lch)
pixels_lab = np.reshape(lab, (width*height, -1))
l1, a, b = np.split(pixels_lab, 3, axis=-1)
pixels_lch = np.reshape(lch, (width*height, -1))
l2, c, h = np.split(pixels_lch, 3, axis=-1)
huniq, unidx = np.unique(h, return_index=True)
luniq = l2[unidx]
cuniq = c[unidx]
# flatten luniq, cuniq
import itertools
luniqM = list(itertools.chain.from_iterable(luniq))
cuniqM = list(itertools.chain.from_iterable(cuniq))
# To color by L, it has to be normalized to [0 1]
luniqM_n=(luniqM-min(luniqM))/(max(luniqM)-min(luniqM))
fig = plt.figure(figsize=(8,4))
plt.xticks(np.arange(0, 2.25*np.pi,0.25*np.pi),[0., 45., 90., 135., 180., 225., 270., 315., 360.])
# Hue as 0-360 angle
ax1 = fig.add_subplot(111)
# ax1.scatter(huniq, luniq)
colorline(huniq,luniqM, luniqM_n, linewidth=4,cmap='gray')
ax1.set_xlim(0, 2.25*np.pi)
ax1.set_ylim(-5, 105)
ax1.text(5, 95, 'H vs. L - colored by L', va='top')
plt.show()
print strictly_increasing(luniq)
print strictly_decreasing(luniq)
print non_increasing(luniq)
print non_decreasing(luniq)
print luniqM[:15]
plt.plot(luniqM[:15])
def moving_average(a, length, mode='valid'):
#pad = np.floor(length/2)
pad = int(np.floor(length/2)) # replace to prevent a deprecation warning
# due to passing a float as an index
if mode == 'full':
pad *= 2
# Make a padded version, paddding with first and last values
r = np.empty(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1,...] + out[1:,...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
avg = moving_average(np.asarray(luniqM), 7, mode='same')
plt.plot(avg[:15])
print strictly_increasing(avg)
print non_decreasing(avg)
print strictly_decreasing(avg)
print non_increasing(avg)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we choose the Wine dataset which has 3 classes.
Step2: Now we setup an Aqua configuration dictionary to use the classical SVM algorithm and add a multiclass extension to classify the Wine data set, since it has 3 classes. We loop over the three extensions (modifying the params dictionary) to show the result with each.
|
<ASSISTANT_TASK:>
Python Code:
from datasets import *
from qiskit_aqua.utils import split_dataset_to_data_and_labels
from qiskit_aqua.input import get_input_instance
from qiskit_aqua import run_algorithm
import numpy as np
n = 2 # dimension of each data point
sample_Total, training_input, test_input, class_labels = Wine(training_size=20,
test_size=10, n=n, PLOT_DATA=True)
temp = [test_input[k] for k in test_input]
total_array = np.concatenate(temp)
aqua_dict = {
'problem': {'name': 'svm_classification'},
'algorithm': {
'name': 'SVM'
},
'multiclass_extension': {'name': 'OneAgainstRest'}
}
algo_input = get_input_instance('SVMInput')
algo_input.training_dataset = training_input
algo_input.test_dataset = test_input
algo_input.datapoints = total_array
extensions = [
{'name': 'OneAgainstRest'},
{'name': 'AllPairs'},
{'name': 'ErrorCorrectingCode', 'code_size': 5}
]
for extension in extensions:
aqua_dict['multiclass_extension'] = extension
result = run_algorithm(aqua_dict, algo_input)
print("\n----- Using multiclass extension: '{}' -----\n".format(extension['name']))
for k,v in result.items():
print("'{}' : {}".format(k, v))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will be using the WHFast integrator with a fixed timestep. It's important to point out that the default IAS15 integrator is not well suited for stochastic forces because it automatically reduces the timestep if it doesn't achieve an accuracy near machine precision. Because the stochastic forces are noisy by design, it might never converge or reduce the timestep to a very small value.
Step2: Next, we add the stochastic_forces module in REBOUNDx
Step3: We can now turn on stochastic forces for a particle by setting the particle's $\kappa$ (kappa) field to a finite value. This parameter determines the strength of the stochastic forces and is expressed relative to the gravitational force that the particle experiences from the centeral object.
Step4: You can also specify the auto-correlation time of the stochastic forces in units of the current orbital period. The default of $\tau_\kappa$ is 1, so we don't really needs this line
Step5: We can now run the simulation for 10000 orbits.
Step6: Let's plot the random walk of the semi-major axis.
Step7: Let us run a few more simulations with different diffusion parameters and verify that the planet is indeed undergoing a random walk where the semi-major axes grow as $\Delta a \sim \sqrt{t}$. Rein and Papaloizou (2009) provide equations for all orbital elements in single planet systems as well as for libration ampltidues in two-planet systems. In our case, we are interested in the change of semi-major axis which is given by (see their Eq 46)
Step8: The following plot compares the analytic predictions (dashed lines) to the numerical simulations (solid lines). Because this is a stochastic process, we can only expect that the analytic predictions hold in the average sense. This is indeed the case
Step9: The stochastic_forces module uses the internal pseudo random number generator of REBOUND. This means that by default, each simulation starts with a random seed. If you want to generate reproducible results, you can manually set the seed to the same value every time you run the simulation.
|
<ASSISTANT_TASK:>
Python Code:
import rebound
sim = rebound.Simulation()
sim.add(m=1.) # Star
sim.add(m=1e-3, a=1) # Jupiter mass planet
sim.move_to_com()
sim.integrator = "whfast"
sim.dt = sim.particles[1].P/34.5678 # About 30 steps per orbit
import reboundx
rebx = reboundx.Extras(sim)
sto = rebx.load_force("stochastic_forces")
rebx.add_force(sto)
sim.particles[1].params["kappa"] = 1.0e-5
sim.particles[1].params["tau_kappa"] = 1.0
import numpy as np
times = np.linspace(0.,1e4*np.pi*2., 1000)
semia = np.zeros(len(times))
for i, t in enumerate(times):
sim.integrate(t)
semia[i] = sim.particles[1].a
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1,1)
ax.set_xlabel("time [orbits]")
ax.set_ylabel("semi-major axis")
ax.plot(times/np.pi/2., semia);
def runsim(kappa):
sim = rebound.Simulation()
sim.add(m=1.) # Star
sim.add(m=1e-3, a=1) # Jupiter mass planet
sim.move_to_com()
sim.integrator = "whfast"
sim.dt = sim.particles[1].P/30.3
rebx = reboundx.Extras(sim)
sto = rebx.load_force("stochastic_forces")
rebx.add_force(sto)
sim.particles[1].params["kappa"] = kappa
times = np.pi*2.*np.logspace(0.,5, 1000)
semia = np.zeros(len(times))
for i, t in enumerate(times):
sim.integrate(t)
semia[i] = sim.particles[1].a
return times, semia
kappas = [1e-5, 1e-6, 1e-7]
results = np.zeros((len(kappas),1000))
for i, kappa in enumerate(kappas):
times, results[i] = runsim(kappa)
fig, ax = plt.subplots(1,1, figsize=(10,5))
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("time [orbits]")
ax.set_ylim([1e-6,1e-1])
ax.set_ylabel("relative change in semi-major axis")
for i, kappa in enumerate(kappas):
p = ax.plot(times/np.pi/2., np.abs(1.-results[i]), label="$\kappa = 10^{%.0f}$"%np.log10(kappa))
tau = sim.particles[1].P
D = 2.*kappa**2*tau
ax.plot(times/np.pi/2., np.sqrt(4.*D/sim.particles[1].n**2*times), color=p[0].get_color(), ls="--")
ax.legend();
sim.rand_seed = 123
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table class="tfo-notebook-buttons" align="left">
Step2: Some normal imports we've seen before.
Step3: Part 1
Step4: The images in the Dogs vs. Cats dataset are not all the same size. So, we need to reformat all images to the resolution expected by MobileNet (224, 224)
Step5: Part 2
Step6: Freeze the variables in the feature extractor layer, so that the training only modifies the final classifier layer.
Step7: Attach a classification head
Step8: Train the model
Step9: Check the predictions
Step10: Run an image batch through the model and convert the indices to class names.
Step11: Let's look at the true labels and predicted ones.
Step12: Part 3
Step13: You can later recreate the same model from this file, even if you no longer have access to the code that created the model.
Step14: We can check that the reloaded model and the previous model give the same result
Step15: The difference in output should be zero
Step16: As we can see, the reult is 0.0, which indicates that both models made the same predictions on the same batch of images.
Step17: Part 5
Step18: Part 6
Step19: Now, let's use the reloaded_sm (reloaded SavedModel) to make predictions on a batch of images.
Step20: We can check that the reloaded SavedModel and the previous model give the same result.
Step21: As we can see, the result is 0.0, which indicates that both models made the same predictions on the same batch of images.
Step22: Now, let's use the reloaded_sm)keras (reloaded Keras model from our SavedModel) to make predictions on a batch of images.
Step23: We can check that the reloaded Keras model and the previous model give the same result.
Step24: Part 8
Step25: The zip file is saved in the current working directory. You can see what the current working directory is by running
Step26: Once the file is zipped, you can download it to your local disk.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -U tensorflow_hub
!pip install -U tensorflow_datasets
import time
import numpy as np
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
from tensorflow.keras import layers
(train_examples, validation_examples), info = tfds.load(
'cats_vs_dogs',
split=['train[:80%]', 'train[80%:]'],
with_info=True,
as_supervised=True,
)
def format_image(image, label):
# `hub` image modules exepct their data normalized to the [0,1] range.
image = tf.image.resize(image, (IMAGE_RES, IMAGE_RES))/255.0
return image, label
num_examples = info.splits['train'].num_examples
BATCH_SIZE = 32
IMAGE_RES = 224
train_batches = train_examples.cache().shuffle(num_examples//4).map(format_image).batch(BATCH_SIZE).prefetch(1)
validation_batches = validation_examples.cache().map(format_image).batch(BATCH_SIZE).prefetch(1)
URL = "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4"
feature_extractor = hub.KerasLayer(URL,
input_shape=(IMAGE_RES, IMAGE_RES,3))
feature_extractor.trainable = False
model = tf.keras.Sequential([
feature_extractor,
layers.Dense(2)
])
model.summary()
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
EPOCHS = 3
history = model.fit(train_batches,
epochs=EPOCHS,
validation_data=validation_batches)
class_names = np.array(info.features['label'].names)
class_names
image_batch, label_batch = next(iter(train_batches.take(1)))
image_batch = image_batch.numpy()
label_batch = label_batch.numpy()
predicted_batch = model.predict(image_batch)
predicted_batch = tf.squeeze(predicted_batch).numpy()
predicted_ids = np.argmax(predicted_batch, axis=-1)
predicted_class_names = class_names[predicted_ids]
predicted_class_names
print("Labels: ", label_batch)
print("Predicted labels: ", predicted_ids)
plt.figure(figsize=(10,9))
for n in range(30):
plt.subplot(6,5,n+1)
plt.imshow(image_batch[n])
color = "blue" if predicted_ids[n] == label_batch[n] else "red"
plt.title(predicted_class_names[n].title(), color=color)
plt.axis('off')
_ = plt.suptitle("Model predictions (blue: correct, red: incorrect)")
t = time.time()
export_path_keras = "./{}.h5".format(int(t))
print(export_path_keras)
model.save(export_path_keras)
!ls
reloaded = tf.keras.models.load_model(
export_path_keras,
# `custom_objects` tells keras how to load a `hub.KerasLayer`
custom_objects={'KerasLayer': hub.KerasLayer})
reloaded.summary()
result_batch = model.predict(image_batch)
reloaded_result_batch = reloaded.predict(image_batch)
(abs(result_batch - reloaded_result_batch)).max()
EPOCHS = 3
history = reloaded.fit(train_batches,
epochs=EPOCHS,
validation_data=validation_batches)
t = time.time()
export_path_sm = "./{}".format(int(t))
print(export_path_sm)
tf.saved_model.save(model, export_path_sm)
!ls {export_path_sm}
reloaded_sm = tf.saved_model.load(export_path_sm)
reload_sm_result_batch = reloaded_sm(image_batch, training=False).numpy()
(abs(result_batch - reload_sm_result_batch)).max()
t = time.time()
export_path_sm = "./{}".format(int(t))
print(export_path_sm)
tf.saved_model.save(model, export_path_sm)
reload_sm_keras = tf.keras.models.load_model(
export_path_sm,
custom_objects={'KerasLayer': hub.KerasLayer})
reload_sm_keras.summary()
result_batch = model.predict(image_batch)
reload_sm_keras_result_batch = reload_sm_keras.predict(image_batch)
(abs(result_batch - reload_sm_keras_result_batch)).max()
!zip -r model.zip {export_path_sm}
!ls
try:
from google.colab import files
files.download('./model.zip')
except ImportError:
pass
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How cool is this?! Almost any question I can think of regarding NFL play outcomes is suddenly queryable. Although first, we'll clarify exactly what info some of the variables include.
Step2: Wow, that's a lot of play types. For most statistics we'd be interested in, we'd want to restrict our plays to runs and passes. Although the PlayTypes above show us that we'll need to include Sacks as Passes since Sacks occur during pass attempts.
Step3: Exploring Quick High-Level Stats
Step4: What percentage of plays are run vs pass?
Step5: So pass attempts comprise 54.6% + 3.7%(sacks) = 58.3% of plays. The other 41.7% are runs. At least from 2009-2015.
Step6: What percentage of coaches' challenges are successful?
Step7: ...(a challenge is successful if it reverses the call on the field, so 41% of challenges are successful)
Step8: More on Run vs Pass Yards Per Play
Step9: So including sacks drops pass plays by almost 0.9 yards-per-play, or about 12%. Even with sacks included though, passes still look way more effective on average; about 2 yards-per-play more effective, which is almost 50% more yards-per-play than the run plays. I wonder how consistent that is by year.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
%matplotlib inline
nfl_data = pd.read_csv('/home/max/nfl_stats/data/pbp_2009_2015.csv', low_memory=False)
#Print (Rows, Columns) of Data
print(nfl_data.shape)
#Print Variable-Names and First Two Values for Each Variable.
with pd.option_context('display.max_rows', 999, 'display.max_colwidth', 25):
print(nfl_data.head(2).transpose())
selected_columns = ['PassOutcome', 'PassLength', 'PassLocation', 'RunLocation', 'RunGap', 'PlayType']
for c in selected_columns:
print(nfl_data[c].value_counts(normalize=True).to_frame(), '\n')
#Create new DataFrame where Play-Type is Run/Pass/Sack
run_pass_row_indices = nfl_data[nfl_data['PlayType'].isin(['Run', 'Pass', 'Sack'])].index
runs_passes_sacks = nfl_data.loc[run_pass_row_indices, :]
runs_passes_sacks['Yards.Gained'].mean()
runs_passes_sacks['PlayType'].value_counts(normalize=True).to_frame()
first_downs = runs_passes_sacks[runs_passes_sacks['down']==1]
first_downs['PlayType'].value_counts(normalize=True).to_frame()
nfl_data['ChalReplayResult'].value_counts(normalize=True).to_frame()
runs_passes_sacks.groupby('PlayType')['Yards.Gained'].mean().to_frame()
runs_passes_sacks['PlayType2'] = runs_passes_sacks['PlayType'].replace({'Sack':'Pass'})
runs_passes_sacks.groupby('PlayType2')['Yards.Gained'].mean()
annual = runs_passes_sacks.groupby(['PlayType2', 'Season'], as_index=False)['Yards.Gained'].mean()
sn.pointplot(data=annual, x='Season', y='Yards.Gained', hue='PlayType2')#, scale=0.75)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: II- Predictive Analysis
Step2: Now, let's find the most popular male and female names of all times
Step3: And the winner for most popular male and female baby names since 1910 are
Step4: The winner in the male category is James. 493865 baby boys were named 'James' from 1910 to 2014.
Step5: It is intesting to notice number gender ambigious names more than doubles since 1945. I believe this is a general trend which could more predominantly observerd in liberal and urban cities in the US.
Step6: II-5 Other Names with largest decrease and increase in number since 1980
Step7: III- Insights
Step8: A quick google seaerch revealst that in 2003 and 2004 landmark years in the process of leagalization of same-sex marriage
Step9: Now, the other peak has happened in 1989. It turns out Berlin wall came down in 1989. But also Denmark became the first country to legalize same sex marriage.
Step10: Next, we'll perform dimentionality reduction using principle component analysis and we'll retain only two of the componets. Scikit-learn's RandomizedPCA implementation is choosen for its efficiencty.
Step11: It is interesting to observe CA and TX being obvious outliers. We have squeezed many dimansions into only two therefore it not easy to comment on the meaning of principle componenets. However it is tempting to conclude that the first principal component is directly proportional to the Hispanic population since both CA and TX has huge values in that direction. And with taking the rist of getting ahead of ourselves we can say that the other direction could well be related to the Asian population percentage. And it is not surprising to see CA having the largest coefficient in that direction
Step12: Finally we employ a K-means clustering algorithm to the data with reduced to 2 dimensions.
Step13: We'll conclude by listing the states under each cluster. For that aim we downloaded a csv file from http
Step14: Finally, let's list the states under each cluster
Step15: We'll avoid trying to give too much isight looking at these clusters as we mentioned before a lot of dimentions are pressed into two and it is questionable if these clusters are meaningful in an obvious sense.
|
<ASSISTANT_TASK:>
Python Code:
import os
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
data_folder = os.path.join('data')
file_names = []
for f in os.listdir(data_folder):
file_names.append(os.path.join(data_folder,f))
del file_names[file_names.index(os.path.join(data_folder,'StateReadMe.pdf'))]
# we yearly count data for each name in the data set using the following dictionary format :
# dict = {'name': {count: []}} where count[0] is count for name 1910 and count[-1] is for 2014
N_years = 2014-1910+1
names_dict_M = {}
names_dict_F = {}
for fname in file_names:
with open(fname,'r') as f:
for line in f:
state, gender, year, name, count = line.split(',')
year = int(year)
count = int(count)
if gender == 'M':
if name in names_dict_M:
# name already in the dict, update the count for appropriate year
names_dict_M[name]['count'][year-1910] += count
else:
# create an entry for the name
names_dict_M[name] = {'count': [0]*N_years}
names_dict_M[name]['count'][year-1910] += count
elif gender == 'F':
if name in names_dict_F:
# name already in the dict, update the count for appropriate year
names_dict_F[name]['count'][year-1910] += count
else:
# create an entry for the name
names_dict_F[name] = {'count': [0]*N_years}
names_dict_F[name]['count'][year-1910] += count
#lets extract tuples as (name, total_count) and sort them
male_overall = [(n, sum(names_dict_M[n]['count'])) for n in names_dict_M.keys()]
male_overall.sort(key = lambda x: x[1], reverse = True)
female_overall = [(n, sum(names_dict_F[n]['count'])) for n in names_dict_F.keys()]
female_overall.sort(key = lambda x: x[1], reverse = True)
print('Male:')
print('{}: {}'.format(male_overall[0][0], male_overall[0][1]))
print('\nFemale:')
print('{}: {}'.format(female_overall[0][0], female_overall[0][1]))
width = 0.6
fig = plt.figure(figsize = (12,3))
ax = plt.subplot(121)
ax.bar(np.arange(10), [c for n,c in male_overall[:10]], width = width)
ax.set_xticks(np.arange(10) + width/2)
ax.set_xticklabels([n for n,c in male_overall[:10]], rotation = 90)
ax.set_title('10 Most Popular Male Names since 1910')
ax.set_ylabel('name count')
ax = plt.subplot(122)
ax.bar(np.arange(10), [c for n,c in female_overall[:10]], width = width)
ax.set_xticks(np.arange(10) + width/2)
ax.set_xticklabels([n for n,c in female_overall[:10]], rotation = 90)
ax.set_title('10 Most Popular Female Names since 1910')
ax.set_ylabel('name count')
plt.tight_layout()
plt.show()
#lets extract tuples as (name, count[2013]) and sort them with count
male_2013 = [(n, names_dict_M[n]['count'][2013-1910])
for n in names_dict_M.keys()
if names_dict_M[n]['count'][2013-1910] > 0]
female_2013 = [(n, names_dict_F[n]['count'][2013-1910])
for n in names_dict_F.keys()
if names_dict_F[n]['count'][2013-1910] > 0]
male_1945 = [(n, names_dict_M[n]['count'][1945-1910])
for n in names_dict_M.keys()
if names_dict_M[n]['count'][1945-1910] > 0]
female_1945 = [(n, names_dict_F[n]['count'][1945-1910])
for n in names_dict_F.keys()
if names_dict_F[n]['count'][1945-1910] > 0]
#first find gender ambigious names in 2013:
gender_ambigious_names = set([n for n, _ in male_2013]) & set([n for n, _ in female_2013])
gender_ambigious_names = [(
n,min(names_dict_M[n]['count'][2013-1910],
names_dict_F[n]['count'][2013-1910])
)
for n in gender_ambigious_names]
#sort the tuples such that most popular names are at top
gender_ambigious_names.sort(key = lambda x: x[1], reverse = True)
print('In 2013 there were {} gender ambigious names and are the most popular ones was {}'
.format(len(gender_ambigious_names), gender_ambigious_names[0][0]))
width = 0.6
fig = plt.figure(figsize = (12,3))
ax = plt.subplot(121)
ax.bar(np.arange(10), [c for n,c in gender_ambigious_names[:10]], width = width)
ax.set_xticks(np.arange(10) + width/2)
ax.set_xticklabels([n for n,c in gender_ambigious_names[:10]], rotation = 90)
ax.set_title('10 Most Popular Gender Ambigious Names in 2013')
ax.set_ylabel('name count')
gender_ambigious_names = set([n for n, _ in male_1945]) & set([n for n, _ in female_1945])
gender_ambigious_names = [(
n,min(names_dict_M[n]['count'][1945-1910],
names_dict_F[n]['count'][1945-1910])
)
for n in gender_ambigious_names]
#sort the tuples such that most popular names are at top
gender_ambigious_names.sort(key = lambda x: x[1], reverse = True)
print('In 1945 there were {} gender ambigious names and are the most popular ones was {}'
.format(len(gender_ambigious_names), gender_ambigious_names[0][0]))
ax2 = plt.subplot(122)
ax2.bar(np.arange(10), [c for n,c in gender_ambigious_names[:10]], width = width)
ax2.set_xticks(np.arange(10) + width/2)
ax2.set_xticklabels([n for n,c in gender_ambigious_names[:10]], rotation = 90)
ax2.set_title('10 Most Popular Gender Ambigious Names in 1945')
ax2.set_ylabel('name count')
plt.tight_layout()
plt.show()
male_diff = [ (n, names_dict_M[n]['count'][-1] - names_dict_M[n]['count'][1980-1910]) for n in names_dict_M.keys() ]
female_diff = [ (n, names_dict_F[n]['count'][-1] - names_dict_F[n]['count'][1980-1910]) for n in names_dict_F.keys() ]
male_diff.sort(key = lambda x: x[1], reverse = True)
female_diff.sort(key = lambda x: x[1], reverse = True)
print('Male name with most increase in popularity is {}'.format(male_diff[0][0]))
print('Count for {} increased from {} to {} from 1980 to 2014'.format(male_diff[0][0],
names_dict_M[male_diff[0][0]]['count'][1980-1910],
names_dict_M[male_diff[0][0]]['count'][-1]))
print('\nFemale name with most increase in popularity is {}'.format(female_diff[0][0]))
print('Count for {} increased from {} to {} from 1980 to 2014'.format(female_diff[0][0],
names_dict_F[female_diff[0][0]]['count'][1980-1910],
names_dict_F[female_diff[0][0]]['count'][-1]))
print('\nMale name with most deccrease in popularity is {}'.format(male_diff[-1][0]))
print('Count for {} decreased from {} to {} from 1980 to 2014'.format(male_diff[-1][0],
names_dict_M[male_diff[-1][0]]['count'][1980-1910],
names_dict_M[male_diff[-1][0]]['count'][-1]))
print('\nFemale name with most deccrease in popularity is {}'.format(female_diff[-1][0]))
print('Count for {} decreased from {} to {} from 1980 to 2014'.format(female_diff[-1][0],
names_dict_F[female_diff[-1][0]]['count'][1980-1910],
names_dict_F[female_diff[-1][0]]['count'][-1]))
print('Male names with largest increase in popularity along with increase rate:')
for n, c in male_diff[:5]:
print('{}: {}'.format(n,c))
print('\nFemale names with largest increase in popularity along with increase rate:')
for n, c in female_diff[:5]:
print('{}: {}'.format(n,c))
print('\nMale names with largest decrease in popularity along with decrease rate:')
for n, c in male_diff[-1:-5:-1]:
print('{}: {}'.format(n,c))
print('\nFemale names with largest decrease in popularity along with decrease rate:')
for n, c in female_diff[-1:-5:-1]:
print('{}: {}'.format(n,c))
count = [0]*(2014-1910+1)
for year in range(0,2014-1910+1):
male_names = [n for n in names_dict_M.keys() if names_dict_M[n]['count'][year] > 0]
female_names = [n for n in names_dict_F.keys() if names_dict_F[n]['count'][year] > 0]
count[year] = len(set(male_names) & set(female_names))
fit = np.polyfit(range(0,2014-1910+1),count,1)
fit_fn = np.poly1d(fit)
fig = plt.figure(figsize = (15,3))
plt.plot(range(0,2014-1910+1), count, label = 'data')
plt.plot(range(0,2014-1910+1), fit_fn(range(0,2014-1910+1)), '--k', label = 'linear fit')
plt.legend(loc = 'lower right')
plt.title('Trend in the number of gender ambigious names from 1910 to 2014')
plt.xticks([0,1960-1910,2014-1910], ['1910', '1960', '2014'])
plt.xlabel('years')
plt.xlim([0,2014-1910])
plt.grid()
plt.show()
print('There is peak in yer {}.'.format(1910 + count.index(max(count))))
#what are the most popular gender ambigious names in 2004:
male_2004 = [(n, names_dict_M[n]['count'][2004-1910])
for n in names_dict_M.keys()
if names_dict_M[n]['count'][2004-1910] > 0]
female_2004 = [(n, names_dict_F[n]['count'][2004-1910])
for n in names_dict_F.keys()
if names_dict_F[n]['count'][2004-1910] > 0]
gender_ambigious_names = set([n for n, _ in male_2004]) & set([n for n, _ in female_2004])
gender_ambigious_names = [(
n,min(names_dict_M[n]['count'][1945-1910],
names_dict_F[n]['count'][1945-1910])
)
for n in gender_ambigious_names]
#sort the tuples such that most popular names are at top
gender_ambigious_names.sort(key = lambda x: x[1], reverse = True)
print('In 2014 there were {} gender ambigious names and here are the most popular ones:'
.format(len(gender_ambigious_names)))
for n,c in gender_ambigious_names[:3]:
print('{}: {}'.format(n,c))
count[2004-1910] = 0
1910 + count.index(max(count))
#find all the male nd female names for 2014
male_names = [n for n in names_dict_M.keys() if names_dict_M[n]['count'][-1] > 0]
female_names = [n for n in names_dict_F.keys() if names_dict_F[n]['count'][-1] > 0]
#create a map names to indexes
#we'll make sure to have two feature's associated with gender-neutral names
name2index_male = {}
for i,n in enumerate(male_names):
name2index_male[n] = i
male_name_count = len(male_names)
name2index_female = {}
for i,n in enumerate(female_names):
name2index_female[n] = i + male_name_count
states = []
#data with counts for all the names in 2014 for each state in its rows:
X = []
for fname in file_names:
states.append(fname[-6:-4])
#temporary sample vector for current state
temp = [0]*(len(name2index_male)+len(name2index_female))
#read the file for the current state
with open(fname,'r') as f:
for line in f:
state, gender, year, name, count = line.split(',')
year = int(year)
if year == 2014:
count = float(count)
if gender == 'M':
feature_index = name2index_male[name]
else:
feature_index = name2index_female[name]
temp[feature_index] = count
X.append(temp)
X = np.array(X)
print('Data matrix X has shape: {}'.format(X.shape))
#check if sparse to see if it makes sense to transform X to a sparse matrix
from scipy.sparse import csr_matrix, issparse
issparse(X)
#normlize each the counts for each state by the total number babies born there in 2014
for i in range(X.shape[0]):
X[i,:] = X[i,:] / np.sum(X[i,:])
from sklearn.decomposition import RandomizedPCA
from sklearn.preprocessing import StandardScaler
X = StandardScaler().fit_transform(X)
pca = RandomizedPCA(n_components = 2)
pca.fit(X)
X_pca = pca.transform(X)
fig = plt.figure(figsize = (6,6))
plt.scatter(X_pca[:,0],X_pca[:,1])
# plt.xlim([-1,2])
# plt.ylim([-2,3])
for i in range(len(states)):
plt.annotate(states[i], (X_pca[i,0], X_pca[i,1]))
plt.xlabel("first principal component")
plt.ylabel("second principal component")
plt.title("States projected to first two principle components")
plt.show()
ind2keep = [i for i in range(len(states)) if states[i] not in ['NY', 'FL', 'CA', 'TX']]
X_pca = X_pca[ind2keep,:]
states = [states[i] for i in ind2keep]
X_pca = StandardScaler().fit_transform(X_pca)
fig = plt.figure(figsize = (13,6))
ax1 = plt.subplot(121)
ax1.scatter(X_pca[:,0],X_pca[:,1])
# plt.xlim([-1,2])
# plt.ylim([-2,3])
for i in range(len(states)):
ax1.annotate(states[i], (X_pca[i,0], X_pca[i,1]))
ax1.set_xlabel("first principal component")
ax1.set_ylabel("second principal component")
ax1.set_title('States')
ax2 = plt.subplot(122)
ax2.scatter(X_pca[:,0],X_pca[:,1])
ax2.set_xlim([-1.5,1.1])
ax2.set_ylim([-1.5,0.5])
for i in range(len(states)):
ax2.annotate(states[i], (X_pca[i,0], X_pca[i,1]))
ax2.set_xlabel("first principal component")
ax2.set_ylabel("second principal component")
ax2.set_title('States - Zoomed in to the lower left corner')
plt.show()
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters = 3, init='k-means++')
kmeans.fit(X_pca)
y_pred = kmeans.predict(X_pca)
fig = plt.figure(figsize = (15,15))
ax1 = plt.subplot(111)
ax1.scatter(X_pca[:,0],X_pca[:,1], c = y_pred, s= 100)
for i in range(len(states)):
ax1.annotate(states[i], (X_pca[i,0], X_pca[i,1]))
ax1.set_xlabel("first principal component")
ax1.set_ylabel("second principal component")
ax1.set_title('States Clustered by K-means')
plt.show()
state_dict = {}
import re
with open('states.csv', 'r') as f:
for line in f:
name, abbrv = re.sub('["\n]', '', line).split(',')
state_dict[abbrv] = name
print('Blue cluster:')
print('--------------')
print(', '.join([state_dict[states[i]] for i in range(len(states)) if y_pred[i] == 0 ]))
print('\nGreen cluster:')
print('--------------')
print(', '.join([state_dict[states[i]] for i in range(len(states)) if y_pred[i] == 1 ]))
print('\nRed cluster:')
print('--------------')
print(', '.join([state_dict[states[i]] for i in range(len(states)) if y_pred[i] == 2 ]))
!ipython nbconvert baby_names.ipynb
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Clone Graph
Step2: Search Graph Nodes
Step4: Topological Sorting
Step6: BFS in Matrix
Step7: Zombie in Matrix
Step8: Knight Shortest Path
|
<ASSISTANT_TASK:>
Python Code:
class Solution:
# @param {int} n an integer
# @param {int[][]} edges a list of undirected edges
# @return {boolean} true if it's a valid tree, or false
def validTree(self, n, edges):
# Write your code here
dic = {i: [] for i in range(n)}
for i, j in edges:
dic[i].append(j)
dic[j].append(i)
visited = [0 for i in range(n)]
layer, visited[0] = [0], 1
while layer:
next_layer = []
for node in layer:
for next_node in dic[node]:
if visited[next_node]:
return False
dic[next_node].remove(node)
next_layer.append(next_node)
visited[next_node] = 1
layer = next_layer
return True if sum(visited) == n else False
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param node, a undirected graph node
# @return a undirected graph node
def __init__(self):
self.dict = {}
def cloneGraph(self, node):
# write your code here
if node is None:
return
nodeCopy = UndirectedGraphNode(node.label)
self.dict[node] = nodeCopy
queue = [node]
while queue:
node = queue.pop(0)
for n in node.neighbors:
if n not in self.dict:
nc = UndirectedGraphNode(n.label)
self.dict[n] = nc
self.dict[node].neighbors.append(nc)
queue.append(n)
else:
self.dict[node].neighbors.append(self.dict[n])
return nodeCopy
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
# @param {UndirectedGraphNode[]} graph a list of undirected graph node
# @param {dict} values a dict, <UndirectedGraphNode, (int)value>
# @param {UndirectedGraphNode} node an Undirected graph node
# @param {int} target an integer
# @return {UndirectedGraphNode} a node
def searchNode(self, graph, values, node, target):
# Write your code here
if not node:
return None
if values[node] == target:
return node
queue = [node]
visited = set([node])
while queue:
node = queue.pop(0)
for n in node.neighbors:
if values[n] == target:
return n
if n not in visited:
visited.add(n)
queue.append(n)
return None
# Definition for a Directed graph node
# class DirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
@param graph: A list of Directed graph node
@return: A list of graph nodes in topological order.
def topSort(self, graph):
# write your code here
dic = {}
for node in graph:
dic[node] = 0
for node in graph:
for n in node.neighbors:
dic[n] += 1
order = []
for node in graph:
if dic[node] == 0:
order.append(node)
queue = order[:] ## right way to copy a list!!!
while queue:
node = queue.pop(0)
for n in node.neighbors:
dic[n] -= 1
if dic[n] == 0:
order.append(n)
queue.append(n)
return order if len(order) == len(dic) else None
class Solution(object):
def numIslands(self, grid):
:type grid: List[List[str]]
:rtype: int
if grid is None or len(grid) == 0 or len(grid[0]) == 0:
return 0
m, n = len(grid), len(grid[0])
visited = [[False for i in range(n)] for j in range(m)]
count = 0
def bfs(grid, m, n, x, y):
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
queue = [[x, y]]
visited[x][y] = True
while queue:
x, y = queue.pop(0)
for i in range(4):
xx, yy = x + dx[i], y + dy[i]
if 0 <= xx < m and 0 <= yy < n:
if not visited[xx][yy] and grid[xx][yy] == '1':
queue.append([xx, yy])
visited[xx][yy] = True
for x in range(m):
for y in range(n):
if not visited[x][y] and grid[x][y] == '1':
count += 1
bfs(grid, m, n, x, y)
return count
class Solution:
# @param {boolean[][]} grid a boolean 2D matrix
# @return {int} an integer
def numIslands(self, grid):
# Write your code here
if grid is None or len(grid) == 0 or len(grid[0]) == 0:
return 0
m, n = len(grid), len(grid[0])
visited = [[False for y in range(n)] for x in range(m)]
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
count = 0
def bfs(x, y):
queue = [[x, y]]
while queue:
x, y = queue.pop(0)
visited[x][y] = True
for i in range(4):
xx, yy = x + dx[i], y + dy[i]
if 0 <= xx < m and 0 <= yy < n:
if not visited[xx][yy] and grid[xx][yy] == 1:
queue.append([xx, yy])
visited[xx][yy] = True
for x in range(m):
for y in range(n):
if not visited[x][y] and grid[x][y] == 1:
count += 1
bfs(x, y)
return count
class Solution:
# @param {int[][]} grid a 2D integer grid
# @return {int} an integer
def zombie(self, grid):
# Write your code here
if grid is None or len(grid) == 0 or len(grid[0]) == 0:
return -1
m, n = len(grid), len(grid[0])
day, people, zombies = -1, 0, []
di = [0, 0, 1, -1]
dj = [1, -1, 0, 0]
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
people += 1
elif grid[i][j] == 1:
zombies.append([i, j])
while zombies:
new_zombies = []
for i, j in zombies:
for k in range(4):
zi, zj = i + di[k], j + dj[k]
if 0 <= zi < m and 0 <= zj < n and grid[zi][zj] == 0:
new_zombies.append([zi, zj])
grid[zi][zj] = 1
people -= 1
day += 1
zombies = new_zombies
# print day, zombies
return -1 if people > 0 else day
# Definition for a point.
# class Point:
# def __init__(self, a=0, b=0):
# self.x = a
# self.y = b
class Solution:
# @param {boolean[][]} grid a chessboard included 0 (False) and 1 (True)
# @param {Point} source a point
# @param {Point} destination a point
# @return {int} the shortest path
def shortestPath(self, grid, source, destination):
# Write your code here
if grid is None or len(grid) == 0 or len(grid[0]) == 0:
return -1
m, n = len(grid), len(grid[0])
d = [(1,2), (1,-2), (-1,2), (-1,-2), (2,1), (2,-1), (-2,1), (-2,-1)]
path, step = [[source.x, source.y]], -1
while path:
step += 1
new_path = []
for x, y in path:
if [x, y] == [destination.x, destination.y]:
return step
for dx, dy in d:
xx, yy = x + dx, y + dy
if 0 <= xx < m and 0 <= yy < n and grid[xx][yy] != 1:
new_path.append([xx, yy])
grid[xx][yy] = 1
path = new_path
return -1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Matrix
Step2: Create Vectorized Function
Step3: Apply Function To Elements
|
<ASSISTANT_TASK:>
Python Code:
# Load library
import numpy as np
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Create a function that adds 100 to something
add_100 = lambda i: i + 100
# Create a vectorized function
vectorized_add_100 = np.vectorize(add_100)
# Apply function to all elements in matrix
vectorized_add_100(matrix)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Set random seed for reproducibility and ignore warning messages.
Step3: Download data
Step4: Load and preprocess data
Step5: Let us now explore the data
Step6: Data dimension
Step7: Preprocess data
Step8: Define and train the model
Step9: We are now ready to compile and fit the model.
Step10: Evaluate the model
Step11: Confusion matrix
Step12: Submission
|
<ASSISTANT_TASK:>
Python Code:
!pip install -r requirements.txt --quiet
import os
import datetime
import numpy as np
import pandas as pd
import pickle
import zipfile
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from tensorflow import keras, optimizers
from tensorflow.keras.metrics import SparseCategoricalAccuracy
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras import layers
print("tensorflow version: ", tf.__version__)
# hyper parameters
LR = 1e-3
EPOCHS = 2
BATCH_SIZE = 64
CONV_DIM1 = 56
CONV_DIM2 = 100
tf.random.set_seed(42)
np.random.seed(42)
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
# Setting the graph style
plt.rc('figure', autolayout=True)
plt.rc('axes', titleweight='bold',
titlesize=15)
plt.rc('font', size=12)
import zipfile
import wget
import os
# download files
data_path = os.getcwd()
# data link
train_link = 'https://github.com/josepholaide/examples/blob/master/digit-recognition-kaggle-competition/data/train.csv.zip?raw=true'
test_link = 'https://github.com/josepholaide/examples/blob/master/digit-recognition-kaggle-competition/data/test.csv.zip?raw=true'
sample_submission = 'https://raw.githubusercontent.com/josepholaide/examples/master/digit-recognition-kaggle-competition/data/sample_submission.csv'
# download data
wget.download(train_link, f'{data_path}/train_csv.zip')
wget.download(test_link, f'{data_path}/test_csv.zip')
wget.download(sample_submission, f'{data_path}/sample_submission.csv')
with zipfile.ZipFile(f"{data_path}/train_csv.zip","r") as zip_ref:
zip_ref.extractall(data_path)
with zipfile.ZipFile(f"{data_path}/test_csv.zip","r") as zip_ref:
zip_ref.extractall(data_path)
data_path = os.getcwd()
# Data Path
train_data_path = data_path + '/train.csv'
test_data_path = data_path + '/test.csv'
sample_submission_path = data_path + '/sample_submission.csv'
# Loading dataset into pandas
train_df = pd.read_csv(train_data_path)
test_df = pd.read_csv(test_data_path)
ss = pd.read_csv(sample_submission_path)
train_df.head()
train_df.shape, test_df.shape
# join train and test together
ntrain = train_df.shape[0]
ntest = test_df.shape[0]
all_data = pd.concat((train_df, test_df)).reset_index(drop=True)
print("all_data size is : {}".format(all_data.shape))
all_data_X = all_data.drop('label', axis=1)
all_data_y = all_data.label
# Reshape image in 3 dimensions (height = 28px, width = 28px , channel = 1)
all_data_X = all_data_X.values.reshape(-1,28,28,1)
# Normalize the data
all_data_X = all_data_X / 255.0
#Get the new dataset
X = all_data_X[:ntrain].copy()
y = all_data_y[:ntrain].copy()
# split the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
def build_model(hidden_dim1=int(CONV_DIM1), hidden_dim2=int(CONV_DIM2), DROPOUT=0.5):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters = hidden_dim1, kernel_size = (5,5),padding = 'Same',
activation ='relu'),
tf.keras.layers.Dropout(DROPOUT),
tf.keras.layers.Conv2D(filters = hidden_dim2, kernel_size = (3,3),padding = 'Same',
activation ='relu'),
tf.keras.layers.Dropout(DROPOUT),
tf.keras.layers.Conv2D(filters = hidden_dim2, kernel_size = (3,3),padding = 'Same',
activation ='relu'),
tf.keras.layers.Dropout(DROPOUT),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation = "softmax")
])
model.build(input_shape=(None,28,28,1))
return model
model = build_model()
# display the model summary
model.summary()
model.compile(optimizers.Adam(learning_rate=float(LR)),
loss=SparseCategoricalCrossentropy(),
metrics=SparseCategoricalAccuracy(name='accuracy'))
history = model.fit(np.array(X_train), np.array(y_train),
validation_split=.1, batch_size=int(BATCH_SIZE), epochs=int(EPOCHS))
# Evaluate the model and print the results
test_loss, test_acc = model.evaluate(np.array(X_test), np.array(y_test), verbose=0)
print("Test_loss: {}, Test_accuracy: {} ".format(test_loss,test_acc))
y_pred = np.argmax(model.predict(X_test), axis=-1)
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(7,7))
sns.heatmap(cm, fmt='g', cbar=False, annot=True, cmap='Blues')
plt.title('confusion_matrix')
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
test = all_data_X[ntrain:].copy()
submission_file = np.argmax(model.predict(test), axis=-1)
ss['Label'] = submission_file
ss.to_csv('sub.csv', index=False)
ss.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Yo-yo
Step2: The results are
Step3: Rmin is the radius of the axle. Rmax is the radius of the axle plus rolled string.
Step4: Based on these parameters, we can compute the moment of inertia for the yo-yo, modeling it as a solid cylinder with uniform density (see here).
Step5: And we can compute k, which is the constant that determines how the radius of the spooled string decreases as it unwinds.
Step6: The state variables we'll use are angle, theta, angular velocity, omega, the length of the spooled string, y, and the linear velocity of the yo-yo, v.
Step7: And here's a System object with init and t_end (chosen to be longer than I expect for the yo-yo to drop 1 m).
Step8: Write a slope function for this system, using these results from the book
Step9: Test your slope function with the initial conditions.
Step10: Notice that the initial acceleration is substantially smaller than g because the yo-yo has to start spinning before it can fall.
Step11: Test your event function
Step12: Then run the simulation.
Step13: Check the final state. If things have gone according to plan, the final value of y should be close to 0.
Step14: How long does it take for the yo-yo to fall 1 m? Does the answer seem reasonable?
Step15: y should decrease and accelerate down.
Step16: Plot velocity as a function of time; is the acceleration constant?
Step17: We can use gradient to estimate the derivative of v. How does the acceleration of the yo-yo compare to g?
Step18: And we can use the formula for r to plot the radius of the spooled thread over time.
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
from sympy import symbols, Eq, solve
T, a, alpha, I, m, g, r = symbols('T a alpha I m g r')
eq1 = Eq(a, -r * alpha)
eq1
eq2 = Eq(T - m * g, m * a)
eq2
eq3 = Eq(T * r, I * alpha)
eq3
soln = solve([eq1, eq2, eq3], [T, a, alpha])
soln[T]
soln[a]
soln[alpha]
Rmin = 8e-3 # m
Rmax = 16e-3 # m
Rout = 35e-3 # m
mass = 50e-3 # kg
L = 1 # m
g = 9.8 # m / s**2
1 / (Rmax)
I = mass * Rout**2 / 2
I
k = (Rmax**2 - Rmin**2) / 2 / L
k
init = State(theta=0, omega=0, y=L, v=0)
system = System(init=init, t_end=2)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
results.theta.plot(color='C0', label='theta')
decorate(xlabel='Time (s)',
ylabel='Angle (rad)')
results.y.plot(color='C1', label='y')
decorate(xlabel='Time (s)',
ylabel='Length (m)')
results.v.plot(label='velocity', color='C3')
decorate(xlabel='Time (s)',
ylabel='Velocity (m/s)')
a = gradient(results.v)
a.plot(label='acceleration', color='C4')
decorate(xlabel='Time (s)',
ylabel='Acceleration (m/$s^2$)')
r = np.sqrt(2*k*results.y + Rmin**2)
r.plot(label='radius')
decorate(xlabel='Time (s)',
ylabel='Radius of spooled thread (m)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting Tools
Step2: astropy.modeling
|
<ASSISTANT_TASK:>
Python Code:
def gaussian_model(xaxis, amplitude, offset, width):
amplitude = u.Quantity(amplitude, u.K)
offset = u.Quantity(offset, u.km/u.s)
width = u.Quantity(width, u.km/u.s)
return amplitude*np.exp(-(xaxis-offset)**2/(2.*width**2))
from specutils.io import fits
spec = fits.read_fits_spectrum1d('gbt_1d.fits')
from astropy import units as u
%%bash
which conda
import specutils
import numpy
import astropy
specutils.__version__, astropy.__version__, numpy.__version__, astropy.__path__
spec.velocity
model = gaussian_model(spec.velocity, amplitude=5*u.K, offset=20*u.km/u.s, width=5*u.km/u.s)
%matplotlib inline
import pylab as pl
pl.plot(spec.velocity, spec.flux, 'k-')
pl.plot(spec.velocity, model)
def cost_function(params):
return ((spec.flux*u.K-gaussian_model(spec.velocity, *params))**2).sum().value
from scipy.optimize import curve_fit, minimize
result = minimize(cost_function, (-5, 20, 20))
result
best_fit_parameters = result.x
best_fit_model = gaussian_model(spec.velocity, *best_fit_parameters)
best_fit_model
pl.plot(spec.velocity, spec.flux, 'k-')
pl.plot(spec.velocity, best_fit_model)
pl.xlim(-30,50)
from scipy.optimize import curve_fit
# curve_fit does not play well with units
#result_curve_fit = curve_fit(gaussian_model, spec.velocity, spec.flux*u.K, p0=(-5, 20, 20))
import pyspeckit
sp = pyspeckit.Spectrum('gbt_1d.fits')
sp.plotter(xmin=-40, xmax=50)
sp.specfit(guesses=(-5, 20, 20))
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.