text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Create raw dataset
Step3: Create dataset for WALS
Step4: Creating rows and columns datasets
Step5: To summarize, you created the following data files from collab_raw.csv
Step6: This code is helpful in developing the input function. You don't need it in production.
Step7: Run as a Python module
Step8: Run on Cloud
Step9: This will take <b>10 minutes</b> to complete. Rerun the above command until the jobs gets submitted.
Step10: You can visualize the embedding vectors using dimensional reduction techniques such as PCA.
|
<ASSISTANT_TASK:>
Python Code:
import os
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.15"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
import tensorflow as tf
print(tf.__version__)
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
sql =
WITH CTE_visitor_page_content AS (
SELECT
# Schema: https://support.google.com/analytics/answer/3437719?hl=en
# For a completely unique visit-session ID, you combine combination of fullVisitorId and visitNumber:
CONCAT(fullVisitorID,'-',CAST(visitNumber AS STRING)) AS visitorId,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId,
(LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) - hits.time) AS session_duration
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
GROUP BY
fullVisitorId,
visitNumber,
latestContentId,
hits.time )
-- Aggregate web stats
SELECT
visitorId,
latestContentId as contentId,
SUM(session_duration) AS session_duration
FROM
CTE_visitor_page_content
WHERE
latestContentId IS NOT NULL
GROUP BY
visitorId,
latestContentId
HAVING
session_duration > 0
df = bq.query(sql).to_dataframe()
df.head()
stats = df.describe()
stats
df[["session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
# The rating is the session_duration scaled to be in the range 0-1. This will help with training.
median = stats.loc["50%", "session_duration"]
df["rating"] = 0.3 * df["session_duration"] / median
df.loc[df["rating"] > 1, "rating"] = 1
df[["rating"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
del df["session_duration"]
%%bash
rm -rf data
mkdir data
# TODO 1: Write object to a comma-separated values (csv) file.
df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False)
!head data/collab_raw.csv
import pandas as pd
import numpy as np
def create_mapping(values, filename):
with open(filename, 'w') as ofp:
value_to_id = {value:idx for idx, value in enumerate(values.unique())}
for value, idx in value_to_id.items():
ofp.write("{},{}\n".format(value, idx))
return value_to_id
df = pd.read_csv(filepath_or_buffer = "data/collab_raw.csv",
header = None,
names = ["visitorId", "contentId", "rating"],
dtype = {"visitorId": str, "contentId": str, "rating": np.float})
df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False)
user_mapping = create_mapping(df["visitorId"], "data/users.csv")
item_mapping = create_mapping(df["contentId"], "data/items.csv")
!head -3 data/*.csv
df["userId"] = df["visitorId"].map(user_mapping.get)
df["itemId"] = df["contentId"].map(item_mapping.get)
mapped_df = df[["userId", "itemId", "rating"]]
mapped_df.to_csv(path_or_buf = "data/collab_mapped.csv", index = False, header = False)
mapped_df.head()
import pandas as pd
import numpy as np
mapped_df = pd.read_csv(filepath_or_buffer = "data/collab_mapped.csv", header = None, names = ["userId", "itemId", "rating"])
mapped_df.head()
NITEMS = np.max(mapped_df["itemId"]) + 1
NUSERS = np.max(mapped_df["userId"]) + 1
mapped_df["rating"] = np.round(mapped_df["rating"].values, 2)
print("{} items, {} users, {} interactions".format( NITEMS, NUSERS, len(mapped_df) ))
grouped_by_items = mapped_df.groupby("itemId")
iter = 0
for item, grouped in grouped_by_items:
print(item, grouped["userId"].values, grouped["rating"].values)
iter = iter + 1
if iter > 5:
break
import tensorflow as tf
grouped_by_items = mapped_df.groupby("itemId")
with tf.python_io.TFRecordWriter("data/users_for_item") as ofp:
for item, grouped in grouped_by_items:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [item])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["userId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
grouped_by_users = mapped_df.groupby("userId")
with tf.python_io.TFRecordWriter("data/items_for_user") as ofp:
for user, grouped in grouped_by_users:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [user])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["itemId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
!ls -lrt data
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.contrib.factorization import WALSMatrixFactorization
def read_dataset(mode, args):
# TODO 2: Decode the example
def decode_example(protos, vocab_size):
features = {
"key": tf.FixedLenFeature(shape = [1], dtype = tf.int64),
"indices": tf.VarLenFeature(dtype = tf.int64),
"values": tf.VarLenFeature(dtype = tf.float32)}
parsed_features = tf.parse_single_example(serialized = protos, features = features)
values = tf.sparse_merge(sp_ids = parsed_features["indices"], sp_values = parsed_features["values"], vocab_size = vocab_size)
# Save key to remap after batching
# This is a temporary workaround to assign correct row numbers in each batch.
# You can ignore details of this part and remap_keys().
key = parsed_features["key"]
decoded_sparse_tensor = tf.SparseTensor(indices = tf.concat(values = [values.indices, [key]], axis = 0),
values = tf.concat(values = [values.values, [0.0]], axis = 0),
dense_shape = values.dense_shape)
return decoded_sparse_tensor
def remap_keys(sparse_tensor):
# Current indices of your SparseTensor that you need to fix
bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Current values of your SparseTensor that you need to fix
bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),)
# Since batch is ordered, the last value for a batch index is the user
# Find where the batch index chages to extract the user rows
# 1 where user, else 0
user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Mask out the user rows from the values
good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,)
good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],)
# User and item indices are rank 1, need to make rank 1 to concat
good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2)
remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
def parse_tfrecords(filename, vocab_size):
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
else:
num_epochs = 1 # end-of-input after this
files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size))
dataset = dataset.repeat(count = num_epochs)
dataset = dataset.batch(batch_size = args["batch_size"])
dataset = dataset.map(map_func = lambda x: remap_keys(x))
return dataset.make_one_shot_iterator().get_next()
def _input_fn():
features = {
WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]),
WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]),
WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)
}
return features, None
return _input_fn
def try_out():
with tf.Session() as sess:
fn = read_dataset(
mode = tf.estimator.ModeKeys.EVAL,
args = {"input_path": "data", "batch_size": 4, "nitems": NITEMS, "nusers": NUSERS})
feats, _ = fn()
print(feats["input_rows"].eval())
print(feats["input_rows"].eval())
try_out()
def find_top_k(user, item_factors, k):
all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors))
topk = tf.nn.top_k(input = all_items, k = k)
return tf.cast(x = topk.indices, dtype = tf.int64)
def batch_predict(args):
import numpy as np
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
# This is how you would get the row factors for out-of-vocab user data
# row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args)))
# user_factors = tf.convert_to_tensor(np.array(row_factors))
# But for in-vocab data, the row factors are already in the checkpoint
user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds)
# In either case, you have to assume catalog doesn"t change, so col_factors are read in
item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds)
# For each user, find the top K items
topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64))
with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f:
for best_items_for_user in topk.eval():
f.write(",".join(str(x) for x in best_items_for_user) + '\n')
def train_and_evaluate(args):
train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"])
steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"])
print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch))
def experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"]),
train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args),
eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args),
train_steps = train_steps,
eval_steps = 1,
min_eval_frequency = steps_in_epoch
)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"])
batch_predict(args)
import shutil
shutil.rmtree(path = "wals_trained", ignore_errors=True)
train_and_evaluate({
"output_dir": "wals_trained",
"input_path": "data/",
"num_epochs": 0.05,
"nitems": NITEMS,
"nusers": NUSERS,
"batch_size": 512,
"n_embeds": 10,
"topk": 3
})
!ls wals_trained
!head wals_trained/batch_pred.txt
os.environ["NITEMS"] = str(NITEMS)
os.environ["NUSERS"] = str(NUSERS)
%%bash
rm -rf wals.tar.gz wals_trained
gcloud ai-platform local train \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
-- \
--output_dir=${PWD}/wals_trained \
--input_path=${PWD}/data \
--num_epochs=0.01 --nitems=${NITEMS} --nusers=${NUSERS} \
--job-dir=./tmp
%%bash
gsutil -m cp data/* gs://${BUCKET}/wals/data
%%bash
OUTDIR=gs://${BUCKET}/wals/model_trained
JOBNAME=wals_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--input_path=gs://${BUCKET}/wals/data \
--num_epochs=10 --nitems=${NITEMS} --nusers=${NUSERS}
def get_factors(args):
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
row_factors = estimator.get_row_factors()[0]
col_factors = estimator.get_col_factors()[0]
return row_factors, col_factors
args = {
"output_dir": "gs://{}/wals/model_trained".format(BUCKET),
"nitems": NITEMS,
"nusers": NUSERS,
"n_embeds": 10
}
user_embeddings, item_embeddings = get_factors(args)
print(user_embeddings[:3])
print(item_embeddings[:3])
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
pca.fit(user_embeddings)
# TODO 3: Apply the mapping (transform) to user embeddings
user_embeddings_pca = pca.transform(user_embeddings)
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(111, projection = "3d")
xs, ys, zs = user_embeddings_pca[::150].T
ax.scatter(xs, ys, zs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The # option of print_dot() can be used to display the internal number of each transition
Step2: Using these numbers you can selectively hightlight some transitions. The second argument is a color number (from a list of predefined colors).
Step3: Note that these highlight_ functions work for edges and states, and come with both singular (changing the color of single state or edge) and plural versions.
Step4: The plural version can take a list or tuple of state numbers (as above) or of Booleans (as below). In the latter case the indices of the True values give the states to highlight.
Step5: Saving to HOA 1.1
Step6: Highlighting a run
Step7: The call of highlight(5) on the accepting run r modified the original automaton b
Step8: Highlighting from a product
Step9: The projection also works for products generated on-the-fly, but the on-the-fly product itself cannot be highlighted (it does not store states or transitions).
Step10: Highlighting nondeterminism
Step11: Disappearing highlights
Step12: Highlighting with partial output
Step13: Highlighting languages
|
<ASSISTANT_TASK:>
Python Code:
a = spot.translate('a U b U c')
a.show('.#')
a.highlight_edges([2, 4, 5], 1)
a.highlight_edge(6, 2).highlight_states((0, 1), 0)
a.highlight_states([False, True, True], 5)
print(a.to_str('HOA', '1'))
print()
print(a.to_str('HOA', '1.1'))
b = spot.translate('X (F(Ga <-> b) & GF!b)'); b
r = b.accepting_run(); r
r.highlight(5) # the parameter is a color number
b
left = spot.translate('a U b')
right = spot.translate('GFa')
display(left, right)
prod = spot.product(left, right); prod
run = prod.accepting_run(); run
run.highlight(5)
# Note that by default project() needs to know on which side you project, but it cannot
# guess it. The left-side is assumed unless you pass True as a second argument.
run.project(left).highlight(5)
run.project(right, True).highlight(5)
display(prod, left, right)
left2 = spot.translate('!b & FG a')
right2 = spot.translate('XXXb')
prod2 = spot.otf_product(left2, right2) # Note "otf_product()"
run2 = prod2.accepting_run()
run2.project(left2).highlight(5)
run2.project(right2, True).highlight(5)
display(run2, prod2, left2, right2)
b = spot.translate('X (F(Ga <-> b) & GF!b)')
spot.highlight_nondet_states(b, 5)
spot.highlight_nondet_edges(b, 4)
b
spot.randomize(b); b
spot.highlight_nondet_edges(b, 4) # let's get those highlighted edges back
display(b, b.show('.<4'), b.show('.<2'))
aut = spot.translate('(b W Xa) & GF(c <-> Xb) | a', 'generic', 'det')
spot.highlight_languages(aut)
aut.show('.bas')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Players selected into All-NBA teams
Step2: Players Drafted
Step3: Merge both data tables
|
<ASSISTANT_TASK:>
Python Code:
import sys
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
import html5lib
from bs4 import BeautifulSoup
import seaborn.apionly as sns
from pandas.io import data, wb
from plotly.offline import iplot, iplot_mpl
import plotly.graph_objs as go
import plotly
import cufflinks as cf
cf.set_config_file(offline=True, offline_show_link=False)
%matplotlib inline
plotly.offline.init_notebook_mode()
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('Plotly version: ', plotly.__version__)
print('Today: ', dt.date.today())
# Code reading allnba team selections since the 1988-1989 season.
url = 'http://www.basketball-reference.com/awards/all_league.html'
allnba = pd.read_html(url)
allnba = allnba[0]
allnba.columns = ['Year','League','All-NBA Team','Player1','Player2','Player3','Player4','Player5']
allnba = allnba.drop('League',1)
allnba = allnba.head(106)
allnba
# Combine all 'Player' columns into one and remove extra information at the end of the names such as C,F, and G
allnbaM = pd.melt(allnba, id_vars=['All-NBA Team'],value_vars=['Player1','Player2','Player3','Player4','Player5'],
value_name='Player')
allnbaM['Player'] = allnbaM['Player'].str.replace("(C|F|G)\s*$", "")
allnbaM['Player'] = allnbaM['Player'].str.strip()
allnbaM = allnbaM.drop('variable',1)
allnbaM.tail()
# Code for reading players drafted in every first round of the nba draft since 1989
draft = []
for number in range (27):
Year = str(2015 - (number))
url1 = 'http://basketball.realgm.com/nba/draft/past_drafts/'
thisdraft = pd.read_html(url1 + Year)[0]
print('Number:', number)
print('Type: ', type(thisdraft))
print(thisdraft.head())
draft.append(thisdraft)
# Code for placing all players drafted in the first round into a pandas dataframe
alldrafts = pd.concat(draft)
alldrafts = alldrafts.drop('Team',1)
alldrafts.drop(alldrafts.columns[[2]], axis=1, inplace=True)
alldrafts.loc[alldrafts.Class.str.contains("DOB"), "Class"] = "N/A"
alldrafts['Class'] = alldrafts['Class'].str.replace("*", "")
alldrafts['Class'] = alldrafts['Class'].str.strip()
alldrafts.tail(10)
# Get means of draft positions based on college class
print('Avergage draft position for freshmen in the first round is:',
alldrafts[alldrafts['Class'].str.contains('Fr')]['Pick'].mean())
print('Avergage draft position for Sophomores in the first round is:',
alldrafts[alldrafts['Class'].str.contains('So')]['Pick'].mean())
print('Avergage draft position for Juniors in the first round is:',
alldrafts[alldrafts['Class'].str.contains('Jr')]['Pick'].mean())
print('Avergage draft position for Seniors in the first round is:',
alldrafts[alldrafts['Class'].str.contains('Sr')]['Pick'].mean())
print('Avergage draft position for those who didnt attend college in the first round is:',
alldrafts[alldrafts['Class'].str.contains('N/A')]['Pick'].mean())
# Gives us the distribution of selections by class ordered by pick number
# But first we must reorder the classes by Fr, So, Jr, Sr, N/A
Classes = ['Fr', 'So', 'Jr', 'Sr', 'N/A']
mapping = {Class: i for i, Class in enumerate(Classes)}
key = alldrafts['Class'].map(mapping)
alldrafts_in_order = alldrafts.iloc[key.argsort()]
ax = sns.swarmplot(x="Class", y="Pick", data=alldrafts_in_order)
ax.set_title('Number of Draft Selections Per Class by Pick Number')
ax.set_ylim(0)
#code for number of players drafted in each class => Seniors are most prevalent followed by juniors
clAss = ['Fr','So','Jr','Sr','N/A']
alldraftsC = alldrafts.copy()
grades = []
for x in clAss:
grades.append(x)
alldraftsC[x] = alldraftsC['Class'].str.contains(x)*1
classes = alldraftsC[grades]
classes_counts = classes.sum()
print(classes_counts)
fig, ax = plt.subplots()
classes_counts.plot(ax=ax, legend=False, kind = 'bar', color=['blue','green','red','turquoise','purple'])
ax.set_xlabel("Player's Class")
ax.set_ylabel('Number of Players Drafted')
ax.set_title('Players Drafted by College Class')
# Merge on Player (Add an 'All-NBA Team' column)
draftallnba = pd.merge(alldrafts, allnbaM,
how='left',
on='Player')
draftallnba.tail(30)
# Count the number of All-NBA selections by time spent in college
counts = draftallnba['All-NBA Team'].groupby([draftallnba['Class'], draftallnba['All-NBA Team']]).count()
counts = pd.DataFrame(counts)
counts.columns = ['Number of Selections']
counts = counts.unstack(level=0)
counts = counts['Number of Selections']
counts = counts[['Fr', 'So', 'Jr', 'Sr', 'N/A']]
countsT = counts.T
countsT['Total'] = [24,51,45,56,87]
counts = countsT.transpose()
counts
# Plot All-NBA selections by time spent in college
fig, ax = plt.subplots()
counts.plot(ax=ax, legend=True, kind = 'barh')
ax.set_xlabel('Number of All-NBA Selections')
ax.set_title('All-NBA Selections by College Class')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementing a Neural Network
Step2: The neural network parameters will be stored in a dictionary (model below), where the keys are the parameter names and the values are numpy arrays. Below, we initialize toy data and a toy model that we will use to verify your implementations.
Step3: Forward pass
Step4: Forward pass
Step5: Backward pass
Step6: Train the network
Step7: Now fill in the momentum update in the first missing code block inside the train function, and run the same optimization as above but with the momentum update. You should see a much better result in the final obtained loss
Step8: Now also implement the RMSProp update rule inside the train function and rerun the optimization
Step10: Load the data
Step11: Train a network
Step12: Debug the training
Step13: Tune your hyperparameters
Step14: Run on the test set
|
<ASSISTANT_TASK:>
Python Code:
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Create some toy data to check your implementations
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
model = {}
model['W1'] = np.linspace(-0.2, 0.6, num=input_size*hidden_size).reshape(input_size, hidden_size)
model['b1'] = np.linspace(-0.3, 0.7, num=hidden_size)
model['W2'] = np.linspace(-0.4, 0.1, num=hidden_size*num_classes).reshape(hidden_size, num_classes)
model['b2'] = np.linspace(-0.5, 0.9, num=num_classes)
return model
def init_toy_data():
X = np.linspace(-0.2, 0.5, num=num_inputs*input_size).reshape(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
model = init_toy_model()
X, y = init_toy_data()
from cs231n.classifiers.neural_net import two_layer_net
scores = two_layer_net(X, model)
print (scores)
correct_scores = [[-0.5328368, 0.20031504, 0.93346689],
[-0.59412164, 0.15498488, 0.9040914 ],
[-0.67658362, 0.08978957, 0.85616275],
[-0.77092643, 0.01339997, 0.79772637],
[-0.89110401, -0.08754544, 0.71601312]]
# the difference should be very small. We get 3e-8
print ('Difference between your scores and correct scores:')
print (np.sum(np.abs(scores - correct_scores)))
reg = 0.1
loss, _ = two_layer_net(X, model, y, reg)
correct_loss = 1.38191946092
print loss
# should be very small, we get 5e-12
print 'Difference between your loss and correct loss:'
print np.sum(np.abs(loss - correct_loss))
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = two_layer_net(X, model, y, reg)
# these should all be less than 1e-8 or so
for param_name in grads:
param_grad_num = eval_numerical_gradient(lambda W: two_layer_net(X, model, y, reg)[0], model[param_name], verbose=False)
print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))
from cs231n.classifier_trainer import ClassifierTrainer
model = init_toy_model()
trainer = ClassifierTrainer()
# call the trainer to optimize the loss
# Notice that we're using sample_batches=False, so we're performing Gradient Descent (no sampled batches of data)
best_model, loss_history, _, _ = trainer.train(X, y, X, y,
model, two_layer_net,
reg=0.001,
learning_rate=1e-1, momentum=0.0, learning_rate_decay=1,
update='sgd', sample_batches=False,
num_epochs=100,
verbose=False)
print 'Final loss with vanilla SGD: %f' % (loss_history[-1], )
model = init_toy_model()
trainer = ClassifierTrainer()
# call the trainer to optimize the loss
# Notice that we're using sample_batches=False, so we're performing Gradient Descent (no sampled batches of data)
best_model, loss_history, _, _ = trainer.train(X, y, X, y,
model, two_layer_net,
reg=0.001,
learning_rate=1e-1, momentum=0.9, learning_rate_decay=1,
update='momentum', sample_batches=False,
num_epochs=100,
verbose=False)
correct_loss = 0.494394
print 'Final loss with momentum SGD: %f. We get: %f' % (loss_history[-1], correct_loss)
model = init_toy_model()
trainer = ClassifierTrainer()
# call the trainer to optimize the loss
# Notice that we're using sample_batches=False, so we're performing Gradient Descent (no sampled batches of data)
best_model, loss_history, _, _ = trainer.train(X, y, X, y,
model, two_layer_net,
reg=0.001,
learning_rate=1e-1, momentum=0.9, learning_rate_decay=1,
update='rmsprop', sample_batches=False,
num_epochs=100,
verbose=False)
correct_loss = 0.439368
print 'Final loss with RMSProp: %f. We get: %f' % (loss_history[-1], correct_loss)
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
from cs231n.classifiers.neural_net import init_two_layer_model
model = init_two_layer_model(32*32*3, 50, 10) # input size, hidden size, number of classes
trainer = ClassifierTrainer()
best_model, loss_history, train_acc, val_acc = trainer.train(X_train, y_train, X_val, y_val,
model, two_layer_net,
num_epochs=5, reg=1.0,
momentum=0.9, learning_rate_decay = 0.95,
learning_rate=1e-5, verbose=True)
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(loss_history)
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(train_acc)
plt.plot(val_acc)
plt.legend(['Training accuracy', 'Validation accuracy'], loc='lower right')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(model):
plt.imshow(visualize_grid(model['W1'].T.reshape(-1, 32, 32, 3), padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(model)
best_model = None # store the best model into this
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_model. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous assignment. #
#################################################################################
best_model = None
best_val_acc = 0
best_hyperparameters = {}
import itertools
hidden_sizes = [10, 50, 100, 160, 320, 640][3:4]
regs = [0.01, 0.1, 1][:1]
learning_rates = [1e-5, 1e-4, 1e-3][1:2]
num_epoches = [20]
momentums = [0.9]
learning_rate_decays = [0.95, 0.65]
input_size = 32*32*3
output_size = 10
for (
hidden_size,
reg,
lr,
num_epochs,
momentum,
lr_decay
) in itertools.product(hidden_sizes,
regs,
learning_rates,
num_epoches,
momentums,
learning_rate_decays):
model = init_two_layer_model(input_size, hidden_size, output_size)
trainer = ClassifierTrainer()
model_trained, loss_history, train_acc, val_acc = trainer.train(X_train, y_train, X_val, y_val,
model, two_layer_net,
num_epochs=num_epochs, reg=reg,
momentum=momentum, learning_rate_decay = lr_decay,
learning_rate=lr, verbose=True)
if val_acc[-1] > best_val_acc:
best_model = model_trained
best_val_acc = val_acc[-1]
best_hyperparameters = (hidden_size, reg, lr, num_epochs, momentum, lr_decay)
print('Model val_acc: %f, hidden_size: %d, reg: %f, lr: %f, num_epochs: %d, momentum: %f, lr_decay: %f'
%(val_acc[-1], hidden_size, reg, lr, num_epochs, momentum, lr_decay))
print('Best model val_acc: %f, hidden_size: %d, reg: %f, lr: %f, num_epochs: %d, momentum: %f, lr_decay: %f'
% ((best_val_acc,) + best_hyperparameters))
#################################################################################
# END OF YOUR CODE #
#################################################################################
# visualize the weights
show_net_weights(best_model)
scores_test = two_layer_net(X_test, best_model)
print 'Test accuracy: ', np.mean(np.argmax(scores_test, axis=1) == y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also set up the backend and load the data.
Step2: Now its your turn! Set up the branch nodes and layer structure above. Some tips
Step3: Now let's fit our model! First, set up multiple costs for each of the three branches using MultiCost
Step4: To test that your model was constructed properly, we first initialize the model with a dataset (so that it configures the layer shapes appropriately) and a cost, then print the model.
Step5: Then, we set up the remaining components and run fit!
|
<ASSISTANT_TASK:>
Python Code:
from neon.callbacks.callbacks import Callbacks
from neon.initializers import Gaussian
from neon.layers import GeneralizedCost, Affine, Multicost, SingleOutputTree
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, Logistic, Softmax
from neon.transforms import CrossEntropyBinary, CrossEntropyMulti, Misclassification
from neon.backends import gen_backend
be = gen_backend(batch_size=128)
from neon.data import MNIST
mnist = MNIST(path='data/')
train_set = mnist.train_iter
valid_set = mnist.valid_iter
from neon.layers import BranchNode
BranchNode.instances = dict()
# define common parameters as dictionary (see above)
init_norm = Gaussian(loc=0.0, scale=0.01)
normrelu = dict(init=init_norm, activation=Rectlin())
normsigm = dict(init=init_norm, activation=Logistic(shortcut=True))
normsoft = dict(init=init_norm, activation=Softmax())
# define your branch nodes
# branch nodes need to have a unique name
b1 = BranchNode(name="b1")
b2 = BranchNode(name="b2")
# define the main trunk (cost1 above)
..
# define the branch (cost2)
...
# define the branch (cost3)
...
# build the model as a SingleOutputTree
...
cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyMulti()),
GeneralizedCost(costfunc=CrossEntropyMulti())])
model.initialize(train_set, cost)
print model
# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9)
# setup standard fit callbacks
callbacks = Callbacks(model, eval_set=valid_set, eval_freq=1)
model.fit(train_set, optimizer=optimizer, num_epochs=20, cost=cost, callbacks=callbacks)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extract tags
Step2: Load tags back in
Step3: Extract nodes
Step4: Extract ways and relations
Step5: Load back node data and recompress
Step6: Load back way data
Step7: Process California data in one go
|
<ASSISTANT_TASK:>
Python Code:
import osmdigest.pythonify as pythonify
import os
basedir = os.path.join("/media/disk", "OSM_Data")
filename = "illinois-latest.osm.xz"
tags = pythonify.Tags(os.path.join(basedir, filename))
pythonify.pickle(tags, "illinois_tags.pic.xz")
os.stat("illinois_tags.pic.xz").st_size / 1024**2
tags.nodes_from_key("name")[:5]
tags_by_id = pythonify.TagsById(tags)
tags_by_id.node(701092)
tags = pythonify.unpickle("illinois_tags.pic.xz")
list(tags.all_relation_tag_keys)[:5]
tags.relations_from_key("tower:type")
pythonify.TagsById(tags).relation(5813084)
nodes = pythonify.Nodes(os.path.join(basedir, filename))
pythonify.pickle(nodes, "illinois_nodes.pic.xz")
os.stat("illinois_nodes.pic.xz").st_size / 1024**2
ways = pythonify.Ways(os.path.join(basedir, filename))
pythonify.pickle(ways, "illinois_ways.pic.xz")
os.stat("illinois_ways.pic.xz").st_size / 1024**2
relations = pythonify.Relations(os.path.join(basedir, filename))
pythonify.pickle(relations, "illinois_relations.pic.xz")
os.stat("illinois_relations.pic.xz").st_size / 1024**2
nodes = pythonify.unpickle("illinois_nodes.pic.xz")
nodes = pythonify.NodesPacked.from_Nodes(nodes)
i = iter(nodes)
for j in range(10):
print(next(i))
nodes[700732]
pythonify.pickle(nodes, "illinois_nodes_packed.pic.xz")
os.stat("illinois_nodes_packed.pic.xz").st_size / 1024**2
ways = pythonify.unpickle("illinois_ways.pic.xz")
print(next(iter(ways)))
nodes[20326165], nodes[33235915]
import osmdigest.pythonify as pythonify
import os
basedir = os.path.join("/media/disk", "OSM_Data")
filename = "california-latest.osm.xz"
pythonify.pythonify_and_pickle(os.path.join(basedir, filename), os.path.join(basedir, "california"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Looks like There aren't too many ppm m/z overlaps </h2>
Step2: <h2> So, about 1/4 of the mass-matches have potential isomers in the other dataset...? </h2>
Step3: <h2> So, we've got 6,315,000 pairs of molecules that could be isomers at 1 ppm </h2>
Step4: <h2> Looks like there are more isomers than 1
|
<ASSISTANT_TASK:>
Python Code:
### import two datasets
def reindex_xcms_by_mzrt(df):
df.index = (df.loc[:,'mz'].astype('str') +
':' + df.loc[:, 'rt'].astype('str'))
return df
# alzheimers
local_path = '/home/irockafe/Dropbox (MIT)/Alm_Lab/'\
'projects'
alzheimers_path = local_path + '/revo_healthcare/data/processed/MTBLS72/positive_mode/'\
'mtbls_no_retcor_bw2.csv'
## Import the data and remove extraneous columns
df_alzheimers = pd.read_csv(alzheimers_path, index_col=0)
df_alzheimers = reindex_xcms_by_mzrt(df_alzheimers)
# malaria
malaria_path = local_path + ('/revo_healthcare/data/processed/MTBLS315/'+
'uhplc_pos/xcms_result_4.csv')
df_malaria = pd.read_csv(malaria_path, index_col=0)
df_malaria = reindex_xcms_by_mzrt(df_malaria)
ppm_alz_v_malaria = ppm_matrix(df_malaria['mz'],
df_alzheimers['mz'])
rt_alz_v_malaria = pairwise_difference(df_malaria['rt'],
df_alzheimers['rt'])
sns.heatmap(np.log10(ppm_alz_v_malaria))
plt.title('Log10 ppm difference')
plt.show()
# How many for differences at 30ppm?
ppm_window = 30
within_ppm = (ppm_alz_v_malaria[ppm_alz_v_malaria < 30]
.dropna(axis=0, how='all')
.dropna(axis=1, how='all')
)
print 'shape', ppm_alz_v_malaria.shape
print ('ppm within {ppm} ppm: '.format(ppm=ppm_window) +
'{num}'.format(num=(ppm_alz_v_malaria < 30).sum().sum()))
# Get indexes
print 'shape of htose within 30ppm:, ', within_ppm.shape
# How many m/z from one dataset could be m/z isomers from
# other dataset?
print ('\n\nMass matches between datasets (isomers and 1:1 matches)',
(within_ppm < 30).sum().sum())
print '\nAlzheimers "isomers" in other dataset that are match >1 feature in other set', ((within_ppm < 30).sum(axis=0)>1).sum()
print 'Alzheimers total', df_alzheimers['rt'].shape
print '\n\nMalaria "isomers in other dataset that match >1 feature in other set', ((within_ppm < 30).sum(axis=1) > 1).sum()
print 'Malaria total', df_malaria['rt'].shape
# Show distribution of # of isomers per feature in both malaria and fever datasets
print (within_ppm < 30).sum(axis=0).hist(bins=30)
plt.title('Alzheimers isomers in malaria dataset')
plt.show()
(within_ppm < 30).sum(axis=1).hist(bins=30)
plt.title('Malaria isomers in alzheimers dataset')
plt.show()
local = '/home/irockafe/Dropbox (MIT)/Alm_Lab/projects/'
xml_file = local + 'revo_healthcare/data/external/toy_database.xml'
xml_file = local + 'revo_healthcare/data/external/serum_metabolites.xml'
#xml_tree = etree.iterparse(xml_file, tag='metabolite')
#
# namespace - at the top of file. fucks with every tag.
# very annoying, so name all tags ns + tag
ns = '{http://www.hmdb.ca}'
nsmap = {None : ns}
# If you're within a metabolite tag
count = 0
seen_mass = 0
d = {}
for event, element in etree.iterparse(xml_file, tag=ns+'metabolite'):
tree = etree.ElementTree(element)
# Aggregate info into a dictionary of
# {HMDB_ID: iso_mass}
accession = []
# Get accession number and masses for each metabolite
# Could be multiple accessions. Grab all of them,
# sort to make unique identifier
for elem in tree.iter():
if elem.tag == ns+'accession':
accession.append(elem.text)
# If you just saw a 'mono_mass' entry,
# get the mass value and reset, saying you
# havent seen 'mono_mass' in the text of next metabolite
if (elem.tag == ns+'value') & (seen_mass == 1):
mass = float(elem.text)
seen_mass = 0
if elem.text == 'mono_mass':
seen_mass = 1
elem.clear()
# sort accession numbers and join with '_'
accession_key = '_'.join(sorted(accession))
# add to dictionary
if mass:
d[accession_key] = mass
# reset mass - only add feature if mass listed
mass = None
# reset accession numbers
accession = []
element.clear()
count += 1
if count % 1000 == 0:
print('Made it through ' + str(count) + ' metabolites')
#pickle.dump(d, open('serumdb_dict.p', 'wb'))
print 'Number of metabolites: %s' % len(d.keys())
serumdb_masses = pd.Series(d, dtype='float32')
serumdb_ppm_matrix = ppm_matrix(serumdb_masses, serumdb_masses)*10**6
#df = pd.DataFrame(serumdb_ppm_matrix, index=serumdb_masses.index,
# columns=serumdb_masses.index)*10**6
# Forget about using a dataframe - uses too much memory
top_ppm = 30
pairs = np.full((top_ppm), np.nan)
print(pairs)
for i in range(1,top_ppm):
# div by two, b/c half matrix is redundant
# subtract length of diagonal of matrix, too
num = ((serumdb_ppm_matrix < i).sum() / 2) - serumdb_ppm_matrix.shape[0]
pairs[i] = num
plt.scatter(x=range(1,30), y=pairs[1:])
plt.title('Number of pairs of molecules that could overlap in human serum database\n')
plt.show()
# how to plot the number of overlaps per molecule?
num_below_1ppm = (serumdb_ppm_matrix < 1).sum(axis=1) - 1
plt.hist((serumdb_ppm_matrix < 1).sum(axis=1) - 1 )
plt.title('Pairs of overlapping mz at ppm 1')
plt.show()
num_below_1ppm
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example, we will read in a reflectance tile from the site SERC (Smithsonian Ecological Research Center) since this has a variety of landcover types (water, buildings, trees, etc.). This data is in the ENVI format (.hdr, .dat); NEON provides an h5 plugin for ENVI, since it is a popular software program for hyperspectral data visualization and analysis. You can also use the aop_h5refl2array function introduced in earlier tutorials, and use the spectral functions on the array, but some the geographic information is not automatically applied.
Step2: Note that the information is stored differently when read in with envi.open, since we're now using a different Python package. We can find the wavelength information in img.bands.centers. Let's take a look at the first and last wavelengths values
Step3: We'll set the Water Vapor Band windows to NaN so we don't include them in the classification.
Step4: To get a quick look at the img data, use the params method
Step5: Metadata information is stored in img.metadata, a dictionary. Let's look at the metadata contents
Step6: To access any of these metadata items, use the syntax eg. md['description'] or md['map info']
Step7: You can also use type and len to look at the type and length (or number) of some of the metadata contents
Step8: Let's look at the data using imshow, a wrapper around matplotlib's imshow for multi-band images
Step9: When dealing with NEON hyperspectral data, we first want to remove the water vapor & noisy bands, keeping only the valid bands. To speed up the classification algorithms for demonstration purposes, we'll look at a subset of the data using read_subimage, a built in method in the spectral package to subset by area and bands. Type help(img.read_subimage) to see how it works.
Step10: Plot the subsetted image for reference
Step11: Now that we have the image subsetted, lets run the k-means algorithm. Type help(kmeans) to show how the function works. To run the k-means algorithm on the image and create 5 clusters, using a maximum of 50 iterations, use the following syntax
Step12: Note that the algorithm terminated afte 14 iterations, when the pixels stopped being reassigned.
Step13: c contains 5 groups of spectral curves with 360 bands (the # of bands we've kept after removing the water vapor windows and the last 10 noisy bands). Let's plot these spectral classes
Step14: On your own
Step15: In the covariance matrix display, lighter values indicate strong positive covariance, darker values indicate strong negative covariance, and grey values indicate covariance near zero.
|
<ASSISTANT_TASK:>
Python Code:
from spectral import *
import spectral.io.envi as envi
import numpy as np
import matplotlib
ls ../../data/D02_SERC
img = envi.open('../../data/D02_SERC/NEON_D02_SERC_DP3_368000_4306000_reflectance.hdr',
'../../data/D02_SERC/NEON_D02_SERC_DP3_368000_4306000_reflectance.dat')
print('First 3 Band Center Wavelengths:',img.bands.centers[:3])
print('Last 3 Band Center Wavelengths:',img.bands.centers[-3:])
img.bands.centers[191:211]==np.nan
img.bands.centers[281:314]==np.nan
img.bands.centers[-10:]==np.nan
img.params
md = img.metadata
print('Metadata Contents:')
for item in md:
print('\t',item)
print('description:',md['description'])
print('map info:',md['map info'])
print(type(md['wavelength']))
print('Number of Bands:',len(md['wavelength']))
view = imshow(img,bands=(58,34,19),stretch=0.05,title="RGB Image of 2017 SERC Tile")
print(view)
valid_band_range = [i for j in (range(0,191), range(212, 281), range(315,415)) for i in j] #remove water vapor bands
img_subset = img.read_subimage(range(400,600),range(400,600),bands=valid_band_range) #subset image by area and bands
view = imshow(img_subset,bands=(58,34,19),stretch=0.01,title="RGB Image of 2017 SERC Tile Subset")
(m,c) = kmeans(img_subset,5,50)
print(c.shape)
%matplotlib inline
import pylab
pylab.figure()
for i in range(c.shape[0]):
pylab.plot(c[i])
pylab.show
pylab.title('Spectral Classes from K-Means Clustering')
pylab.xlabel('Bands (with Water Vapor Windows Removed)')
pylab.ylabel('Reflectance')
#%matplotlib notebook
view = imshow(img_subset, bands=(58,34,19),stretch=0.01, classes=m)
view.set_display_mode('overlay')
view.class_alpha = 0.5 #set transparency
view.show_data
pc = principal_components(img_subset)
pc_view = imshow(pc.cov)
xdata = pc.transform(img_subset)
pcdata = pc.reduce(num=10).transform(img_subset)
pc_0999 = pc.reduce(fraction=0.999)
# How many eigenvalues are left?
print(len(pc_0999.eigenvalues))
img_pc = pc_0999.transform(img_subset)
print(img_pc.shape)
v = imshow(img_pc[:,:,:5], stretch_all=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dropout
Step2: Dropout forward pass
Step3: Dropout backward pass
Step4: Fully-connected nets with Dropout
Step5: Regularization experiment
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
x = np.random.randn(500, 500) + 10
for p in [0.3, 0.6, 0.75]:
out, _ = dropout_forward(x, {'mode': 'train', 'p': p})
out_test, _ = dropout_forward(x, {'mode': 'test', 'p': p})
print 'Running tests with p = ', p
print 'Mean of input: ', x.mean()
print 'Mean of train-time output: ', out.mean()
print 'Mean of test-time output: ', out_test.mean()
print 'Fraction of train-time output set to zero: ', (out == 0).mean()
print 'Fraction of test-time output set to zero: ', (out_test == 0).mean()
print
x = np.random.randn(10, 10) + 10
dout = np.random.randn(*x.shape)
dropout_param = {'mode': 'train', 'p': 0.8, 'seed': 123}
out, cache = dropout_forward(x, dropout_param)
dx = dropout_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda xx: dropout_forward(xx, dropout_param)[0], x, dout)
print 'dx relative error: ', rel_error(dx, dx_num)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for dropout in [0, 0.25, 0.5]:
print 'Running check with dropout = ', dropout
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
weight_scale=5e-2, dtype=np.float64,
dropout=dropout, seed=123)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
print
# Train two identical nets, one with dropout and one without
num_train = 500
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
dropout_choices = [0, 0.75]
for dropout in dropout_choices:
model = FullyConnectedNet([500], dropout=dropout)
print dropout
solver = Solver(model, small_data,
num_epochs=25, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': 5e-4,
},
verbose=True, print_every=100)
solver.train()
solvers[dropout] = solver
# Plot train and validation accuracies of the two models
train_accs = []
val_accs = []
for dropout in dropout_choices:
solver = solvers[dropout]
train_accs.append(solver.train_acc_history[-1])
val_accs.append(solver.val_acc_history[-1])
plt.subplot(3, 1, 1)
for dropout in dropout_choices:
plt.plot(solvers[dropout].train_acc_history, 'o', label='%.2f dropout' % dropout)
plt.title('Train accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3, 1, 2)
for dropout in dropout_choices:
plt.plot(solvers[dropout].val_acc_history, 'o', label='%.2f dropout' % dropout)
plt.title('Val accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.gcf().set_size_inches(15, 15)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imagenet pretrained models
Step2: Imagenet 1000 classes
Step3: Using pretrained CNN as feature extractors
|
<ASSISTANT_TASK:>
Python Code:
import dogs_vs_cats as dvc
all_files = dvc.image_files()
from keras.applications.nasnet import NASNetMobile
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input, decode_predictions
import numpy as np
# https://keras.io/applications/#vgg16
model = NASNetMobile(weights='imagenet')
input_image_shape = (224,224,3)
img_path = all_files[10]
img = image.load_img(img_path, target_size=input_image_shape[1:])
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
# decode the results into a list of tuples (class, description, probability)
model.summary()
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=5))
from IPython.display import Image
Image(img_path)
print(preds.shape)
from keras.applications.imagenet_utils import CLASS_INDEX
# Imagenet 1000 classes
CLASS_INDEX
# predict a set of images
n_images = 10
x = np.ndarray((n_images,3,224,224))
for i,img_path in enumerate(all_files[0:n_images]):
img = image.load_img(img_path, target_size=(224, 224))
x[i] = image.img_to_array(img)
# preprocess and predict all together
x_preprocessed = preprocess_input(x)
preds = model.predict(x_preprocessed,verbose=1)
print("")
print(preds.shape)
dec_preds = decode_predictions(preds,top=5)
dec_preds
from IPython.display import Image,display
for img_path,dec_pred in zip(all_files[0:n_images],dec_preds):
display(Image(img_path,width="120px",height="120px"))
print(" ".join(["%s (prob: %.3f)"%(elm[1],elm[2]) for elm in dec_pred]))
# load model without top layer
n_images_train=500
n_images_test=500
input_image_shape = (3,224,224)
train_features, train_labels,train_files, \
test_features, test_labels, test_files = dvc.training_test_datasets(all_files,
n_images_train,n_images_test,
input_image_shape)
# load_img from keras.preprocessing loads the images in [0,255] scale
train_features = preprocess_input(train_features)
test_features = preprocess_input(test_features)
from keras.models import Model
base_model = VGG16(weights='imagenet')
model = Model(input=base_model.input, output=base_model.get_layer('fc2').output)
print("Predicting train images")
train_features_cnn = model.predict(train_features,verbose=1)
print("Predicting test images")
test_features_cnn = model.predict(test_features,verbose=1)
train_features_cnn.shape
from sklearn import svm
from sklearn.model_selection import GridSearchCV
tuned_parameters = {'kernel': ['linear'],
'C': [1, 10, 100, 1000]}
clf = GridSearchCV(svm.SVC(C=1), tuned_parameters, cv=5,n_jobs=7)
clf.fit(train_features_cnn, train_labels)
clf.best_estimator_
print("Train score: {}".format(clf.score(train_features_cnn,train_labels)))
print("Test score: {}".format(clf.score(test_features_cnn,test_labels)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Write your hypotheses here
Step3: b. Two tailed test.
Step4: Exercise 2
Step5: b. Mean T-Test
Step6: c. Mean p-value test
Step7: Exercise 3
Step8: b. Hypothesis Testing on Variances.
|
<ASSISTANT_TASK:>
Python Code:
# Useful Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t
import scipy.stats
prices1 = get_pricing('TSLA', start_date = '2015-01-01', end_date = '2016-01-01', fields = 'price')
returns_sample_tsla = prices1.pct_change()[1:]
print 'Tesla return sample mean', returns_sample_tsla.mean()
print 'Tesla return sample standard deviation', returns_sample_tsla.std()
print 'Tesla return sample size', len(returns_sample_tsla)
# Testing
## Your code goes here
## Sample mean difference:
## Z- Statistic:
print 't-statistic is:', test_stat
## Finding the p-value for one tail test
print 'p-value is: ', p_val
## Your code goes here
## Sample mean difference:
## Z- Statistic:
print 't-statistic is:', test_stat
## Finding the p-value for one tail test
print 'p-value is: ', p_val
## Your code goes here
# For alpha = 10%
alpha = 0.1
f =
print 'alpha = 10%: f = ', f
# For alpha = 5%
alpha = 0.05
f =
print 'alpha = 5%: f = ', f
# For alpha = 1%
alpha = 0.01
f =
print 'alpha = 1%: f = ', f
# Plot a standard normal distribution and mark the critical regions with shading
x = np.linspace(-3, 3, 100)
norm_pdf = lambda x: (1/np.sqrt(2 * np.pi)) * np.exp(-x * x / 2)
y = norm_pdf(x)
fig, ax = plt.subplots(1, 1, sharex=True)
ax.plot(x, y)
# Value for alpha = 1%
ax.fill_between(x, 0, y, where = x > ## Your code goes here
, label = 'alpha = 10%')
ax.fill_between(x, 0, y, where = x < ) ## Your code goes here
# Value for alpha = 5%
ax.fill_between(x, 0, y, where = x > ## Your code goes here
, color = 'red', label = 'alpha = 5%')
ax.fill_between(x, 0, y, where = x < ## Your code goes here
, color = 'red')
#Value for alpha = 10%
ax.fill_between(x, 0, y, where = x > ## Your code goes here
, facecolor='green', label = 'alpha = 1%')
ax.fill_between(x, 0, y, where = x < ## Your code goes here
, facecolor='green')
plt.title('Rejection regions for a two-tailed hypothesis test at 90%, 95%, 99% confidence')
plt.xlabel('x')
plt.ylabel('p(x)')
plt.legend();
# Calculating Critical Values probability
alpha = 0.1
f = ## Your code goes here
print f
data = get_pricing('SPY', start_date = '2016-01-01', end_date = '2017-01-01', fields = 'price')
returns_sample = data.pct_change()[1:]
# Running the T-test.
n = len(returns_sample)
test_statistic = ## Your code goes here
print 't test statistic: ', test_statistic
# Running p-value test.
alpha = 0.1
p_val = ## Your code goes here
print 'p-value is: ', p_val
# Data Collection
alpha = 0.1
symbol_list = ['XLF', 'MCD']
start = '2015-01-01'
end = '2016-01-01'
pricing_sample = get_pricing(symbol_list, start_date = start, end_date = end, fields='price')
pricing_sample.columns = map(lambda x: x.symbol, pricing_sample.columns)
returns_sample = pricing_sample.pct_change()[1:]
# Sample mean values
mu_xlf, mu_gs = returns_sample.mean()
s_xlf, s_gs = returns_sample.std()
n_xlf = len(returns_sample['XLF'])
n_gs = len(returns_sample['MCD'])
test_statistic = ## Your code goes here
df = ## Your code goes here
print 't test statistic: ', test_statistic
print 'Degrees of freedom (modified): ', df
print 'p-value: ', ## Your code goes here
# Data
symbol_list = ['XLF', 'MCD']
start = "2015-01-01"
end = "2016-01-01"
pricing_sample = get_pricing(symbol_list, start_date = start, end_date = end, fields = 'price')
pricing_sample.columns = map(lambda x: x.symbol, pricing_sample.columns)
returns_sample = pricing_sample.pct_change()[1:]
# Take returns from above, MCD and XLF, and compare their variances
## Your code goes here
print 'XLF standard deviation is: ', xlf_std_dev
print 'MCD standard deviation is: ', mcd_std_dev
# Calculate F-test with MCD.std > XLF.std
## Your code goes here
print "F Test statistic: ", test_statistic
#degree of freedom
df1 = ## Your code goes here
df2 = ## Your code goe here
print df1
print df2
# Calculate critical values.
from scipy.stats import f
upper_crit_value = f.ppf(0.975, df1, df2)
lower_crit_value = f.ppf(0.025, df1, df2)
print 'Upper critical value at a = 0.05 with df1 = {0} and df2 = {1}: '.format(df1, df2), upper_crit_value
print 'Lower critical value at a = 0.05 with df1 = {0} and df2 = {1}: '.format(df1, df2), lower_crit_value
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up the port as a PWM port with a 50Hz frequency
Step2: Start at the 0 degree position (for most motors, this is a 2.5% duty cycle).
Step3: Move that motor around.
|
<ASSISTANT_TASK:>
Python Code:
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
p = GPIO.PWM(18, 50)
p.start(2.5)
p.ChangeDutyCycle(5.0)
p.ChangeDutyCycle(7.0)
p.ChangeDutyCycle(4.0)
time.sleep(0.2)
p.ChangeDutyCycle(10.0)
time.sleep(0.2)
p.ChangeDutyCycle(5.0)
time.sleep(0.2)
p.ChangeDutyCycle(7.0)
time.sleep(0.2)
p.stop()
GPIO.cleanup()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load some house sales data
Step2: Exploring the data for housing sales
Step3: Create a simple regression model of sqft_living to price
Step4: Build the regression model using only sqft_living as a feature and
Step5: Evaluate the simple model
Step6: RMSE of about \$255,170!
Step7: plot a graph between the price and sqrt_living
Step8: Above
Step9: Explore other features in the data
Step10: Pull the bar at the bottom to view more of the data.
Step11: Comparing the results of the simple model with adding more features
Step12: The RMSE goes down from \$255,170 to \$179,508 with more features.
Step13: <img src="http
Step14: In this case, the model with more features provides a worse prediction than the simpler model with only 1 feature. However, on average, the model with more features is better.
Step15: <img src="https
Step16: In this case, the model with more features provides a better prediction. This behavior is expected here, because this house is more differentiated by features that go beyond its square feet of living space, especially the fact that it's a waterfront house.
Step17: <img src="https
Step18: The model predicts a price of over $13M for this house! But we expect the house to cost much more. (There are very few samples in the dataset of houses that are this fancy, so we don't expect the model to capture a perfect prediction here.)
Step19: here you can see the there is no difference between the my_features_model and advanced_features_model
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('home_data.gl/')
sales.head(5)
graphlab.canvas.set_target('ipynb')
sales.show(view="Scatter Plot", x="sqft_living", y="price")
train_data,test_data = sales.random_split(.8,seed=123)
sqft_model = graphlab.linear_regression.create(train_data, target='price', features=['sqft_living'],validation_set=None)
print test_data['price'].mean()
print sqft_model.evaluate(test_data)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(test_data['sqft_living'],test_data['price'],'.',
test_data['sqft_living'],sqft_model.predict(test_data),'-')
sqft_model.get('coefficients')
my_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors', 'zipcode']
sales[my_features].show()
sales.show(view='BoxWhisker Plot', x='zipcode', y='price')
my_features_model = graphlab.linear_regression.create(train_data,target='price',features=my_features,validation_set=None)
print my_features
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
house1 = sales[sales['id']=='5309101200']
house1
print house1['price']
print sqft_model.predict(house1)
print my_features_model.predict(house1)
house2 = sales[sales['id']=='1925069082']
house2
print sqft_model.predict(house2)
print my_features_model.predict(house2)
bill_gates = {'bedrooms':[8],
'bathrooms':[25],
'sqft_living':[50000],
'sqft_lot':[225000],
'floors':[4],
'zipcode':['98039'],
'condition':[10],
'grade':[10],
'waterfront':[1],
'view':[4],
'sqft_above':[37500],
'sqft_basement':[12500],
'yr_built':[1994],
'yr_renovated':[2010],
'lat':[47.627606],
'long':[-122.242054],
'sqft_living15':[5000],
'sqft_lot15':[40000]}
print my_features_model.predict(graphlab.SFrame(bill_gates))
advanced_features = ['bedrooms', 'bathrooms', 'sqft_living',
'sqft_lot', 'floors', 'zipcode',
'condition','grade', 'waterfront',
'view','sqft_above','sqft_basement',
'yr_built','yr_renovated', 'lat', 'long',
'sqft_living15','sqft_lot15'
]
advanced_features_model = graphlab.linear_regression.create(train_data,target='price',features=advanced_features,validation_set=None)
print advanced_features
print advanced_features_model.evaluate(test_data)
print sqft_model.evaluate(test_data)
print my_features_model.evaluate(test_data)
print my_features_model.predict(house2)
print advanced_features_model.predict(house2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rank list of images by share rates with Microsoft Image Tagging API output
Step2: Generate rank list of tags by share rate.
|
<ASSISTANT_TASK:>
Python Code:
import csv
import json
import JobsMapResultsFilesToContainerObjs as ImageMap
import DeriveFinalResultSet as drs
import DataStructsHelper as DS
import importlib
import pandas as pd
import htmltag as HT
from collections import OrderedDict
#import matplotlib.pyplot as plt
import plotly.plotly as py
import cufflinks as cf # this is necessary to link pandas to plotly
cf.go_online()
flName = "../data/All_Zebra_Count_Tag_Output_Results.txt"
pd.set_option('display.max_colwidth', -1)
imgAlbumDict = ImageMap.genImgAlbumDictFromMap(drs.imgJobMap)
master = ImageMap.createResultDict(1,100)
imgShareNotShareList,noResponse = ImageMap.imgShareCountsPerAlbum(imgAlbumDict,master)
importlib.reload(ImageMap)
importlib.reload(DS)
header,rnkFlLst = DS.genlstTupFrmCsv("../FinalResults/rankListImages_expt2.csv")
rnkListDf = pd.DataFrame(rnkFlLst,columns=header)
rnkListDf['Proportion'] = rnkListDf['Proportion'].astype('float')
rnkListDf.sort_values(by="Proportion",ascending=False,inplace=True)
# create an overall giant csv
gidFtrs = ImageMap.genMSAIDataHighConfidenceTags("../data/GZC_data_tagged.json",0.5)
gidFtrsLst = DS.cnvrtDictToLstTup(gidFtrs)
df = pd.DataFrame(gidFtrsLst,columns=['GID','tags'])
shrPropsTags = pd.merge(rnkListDf,df,left_on='GID',right_on='GID')
shrPropsTags.to_csv("../FinalResults/resultsExpt2RankList_Tags.csv",index=False)
shrPropsTags['URL'] = '<img src = "https://socialmediabias.blob.core.windows.net/wildlifephotos/All_Zebra_Count_Images/' + shrPropsTags['GID'] + '.jpeg" width = "350">'
shrPropsTags.sort_values(by=['Proportion','GID'],ascending=False,inplace=True)
fullFl = HT.html(HT.body(HT.HTML(shrPropsTags.to_html(bold_rows = False,index=False))))
fullFl
outputFile = open("../FinalResults/resultsExpt2RankList_Tags.html","w")
outputFile.write(fullFl)
outputFile.close()
tgsShrNoShrCount = {}
for lst in rnkFlLst:
tgs = gidFtrs[lst[0]]
tmpDict = {'share': int(lst[1]), 'not_share': int(lst[2]), 'total' : int(lst[3])}
for tag in tgs:
oldDict ={}
oldDict = tgsShrNoShrCount.get(tag,{'share' : 0,'not_share' : 0,'total' : 0})
oldDict['share'] = oldDict.get('share',0) + tmpDict['share']
oldDict['not_share'] = oldDict.get('not_share',0) + tmpDict['not_share']
oldDict['total'] = oldDict.get('total',0) + tmpDict['total']
tgsShrNoShrCount[tag] = oldDict
## Append data into data frames and build visualizations
tgsShrCntDf = pd.DataFrame(tgsShrNoShrCount).transpose()
tgsShrCntDf['proportion'] = tgsShrCntDf['share'] * 100 / tgsShrCntDf['total']
tgsShrCntDf.sort_values(by=['proportion','share'],ascending=False,inplace=True)
tgsShrCntDf = tgsShrCntDf[['share','not_share','total','proportion']]
tgsShrCntDf.to_csv("../FinalResults/RankListTags.csv")
fullFl = HT.html(HT.body(HT.HTML(tgsShrCntDf.to_html(bold_rows = False))))
outputFile = open("../FinalResults/RankListTags.html","w")
outputFile.write(fullFl)
outputFile.close()
iFrameBlock = []
fig = tgsShrCntDf['proportion'].iplot(kind='line',filename="All_Tags",title="Distribution of Tags")
iFrameBlock.append(fig.embed_code)
#plt.savefig("../FinalResults/RankListTags.png",bbox_inches='tight')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
import tensorflow as tf
from tensorflow.keras.losses import MeanSquaredError
y_true = [1., 0.]
y_pred = [2., 3.]
mse_loss = MeanSquaredError()
loss = mse_loss(y_true, y_pred).numpy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this post, I will demonstrate how you can use emcee to sample models defined using PyMC3.
Step2: Then, we can code up the model in PyMC3 following Jake VanderPlas' notation, and sample it using PyMC3's NUTS[sic] sampler
Step3: And we can take a look at the corner plot
Step4: Sampling the PyMC3 model using emcee
Step5: And now we can run the sampler
Step6: And we can use this to make the same corner plot as above
Step7: The last thing that we might want to look at is the integrated autocorrelation time for each method.
Step8: And, the autocorrelation for emcee is about 40 steps
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(42)
true_params = np.array([0.5, -2.3, -0.23])
N = 50
t = np.linspace(0, 10, 2)
x = np.random.uniform(0, 10, 50)
y = x * true_params[0] + true_params[1]
y_obs = y + np.exp(true_params[-1]) * np.random.randn(N)
plt.plot(x, y_obs, ".k", label="observations")
plt.plot(t, true_params[0]*t + true_params[1], label="truth")
plt.xlabel("x")
plt.ylabel("y")
plt.legend(fontsize=14);
import pymc3 as pm
import theano.tensor as tt
with pm.Model() as model:
logs = pm.Uniform("logs", lower=-10, upper=10)
alphaperp = pm.Uniform("alphaperp", lower=-10, upper=10)
theta = pm.Uniform("theta", -2*np.pi, 2*np.pi, testval=0.0)
# alpha_perp = alpha * cos(theta)
alpha = pm.Deterministic("alpha", alphaperp / tt.cos(theta))
# beta = tan(theta)
beta = pm.Deterministic("beta", tt.tan(theta))
# The observation model
mu = alpha * x + beta
pm.Normal("obs", mu=mu, sd=tt.exp(logs), observed=y_obs)
trace = pm.sample(draws=2000, tune=2000)
import corner
samples = np.vstack([trace[k] for k in ["alpha", "beta", "logs"]]).T
corner.corner(samples, truths=true_params);
import theano
with model:
f = theano.function(model.vars, [model.logpt] + model.deterministics)
def log_prob_func(params):
dct = model.bijection.rmap(params)
args = (dct[k.name] for k in model.vars)
results = f(*args)
return tuple(results)
import emcee
with model:
# First we work out the shapes of all of the deterministic variables
res = pm.find_MAP()
vec = model.bijection.map(res)
initial_blobs = log_prob_func(vec)[1:]
dtype = [(var.name, float, np.shape(b)) for var, b in zip(model.deterministics, initial_blobs)]
# Then sample as usual
coords = vec + 1e-5 * np.random.randn(25, len(vec))
nwalkers, ndim = coords.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob_func, blobs_dtype=dtype)
sampler.run_mcmc(coords, 5000, progress=True)
import pandas as pd
df = pd.DataFrame.from_records(sampler.get_blobs(flat=True, discard=100, thin=30))
corner.corner(df[["alpha", "beta", "logs"]], truths=true_params);
[float(emcee.autocorr.integrated_time(np.array(trace.get_values(var.name, combine=False)).T)) for var in model.free_RVs]
sampler.get_autocorr_time(discard=100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a simple resource
Step2: Define a mock for the resource
Step3: Call the mocked resource
Step4: What is going on here?
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('/opt/rhc')
import rhc.micro as micro
import rhc.async as async
import logging
logging.basicConfig(level=logging.DEBUG)
p=micro.load_connection([
'CONNECTION placeholder http://jsonplaceholder.typicode.com',
'RESOURCE document /posts/{id}',
])
async.wait(micro.connection.placeholder.document(1))
class MyMock(object):
def document(self, method, path, headers, body):
print('method', method)
print('path', path)
print('headers', headers)
print('body', body)
return 'foo'
micro.connection.placeholder.mock = MyMock()
async.wait(micro.connection.placeholder.document(1))
async.wait(micro.connection.placeholder.document(1, test='value'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Un graphe
Step2: Partie 1
Step3: Q4
Step4: Q6
Step5: Q7
Step6: On note $d(X_1,X_2)$ la distance euclidienne entre deux points $X_1$ et $X_2$. On construit le Laplacien suivant à partir d'un ensemble de points du plan $(X_i)_i$.
Step7: Q1
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# tutoriel_graphe
noeuds = {0: 'le', 1: 'silences', 2: 'quelques', 3: '\xe9crit', 4: 'non-dits.', 5: 'Et', 6: 'risque', 7: '\xe0', 8: "qu'elle,", 9: 'parfois', 10: 'aim\xe9', 11: 'lorsque', 12: 'que', 13: 'plus', 14: 'les', 15: 'Minelli,', 16: "n'oublierai", 17: 'je', 18: 'prises', 19: 'sa', 20: 'la', 21: 'jeune,', 22: "qu'elle,", 23: '\xe0', 24: 'ont', 25: "j'ai", 26: 'chemin', 27: '\xe9tranger', 28: 'lente', 29: 'de', 30: 'voir', 31: 'quand', 32: 'la', 33: 'recul,', 34: 'de', 35: 'trop', 36: 'ce', 37: 'Je', 38: 'Il', 39: "l'extr\xeame", 40: "J'ai", 41: 'silences,', 42: "qu'elle,", 43: 'le', 44: 'trace,', 45: 'avec', 46: 'seras', 47: 'dire,', 48: 'femme', 49: 'soit'}
arcs = {(3, 15): None, (46, 47): None, (42, 33): None, (35, 45): None, (1, 14): None, (22, 26): None, (26, 28): None, (43, 29): None, (40, 41): None, (29, 44): None, (17, 3): None, (32, 37): None, (24, 19): None, (46, 34): None, (11, 19): None, (34, 49): None, (22, 2): None, (37, 48): None, (14, 12): None, (3, 10): None, (5, 18): None, (12, 24): None, (34, 32): None, (45, 39): None, (37, 26): None, (33, 45): None, (34, 47): None, (36, 31): None, (29, 47): None, (13, 11): None, (12, 21): None, (2, 16): None, (5, 4): None, (33, 35): None, (28, 49): None, (25, 49): None, (21, 0): None, (3, 13): None, (18, 24): None, (12, 7): None, (13, 15): None, (11, 1): None, (16, 23): None, (37, 45): None, (27, 32): None, (32, 41): None, (8, 24): None, (10, 1): None, (2, 24): None, (24, 11): None, (2, 14): None, (47, 36): None, (48, 39): None, (30, 25): None, (30, 43): None, (15, 14): None, (26, 27): None, (6, 8): None, (20, 10): None, (19, 17): None, (5, 7): None, (44, 25): None, (27, 38): None, (2, 0): None, (3, 18): None, (3, 9): None, (25, 33): None, (42, 48): None, (2, 15): None, (26, 48): None, (26, 38): None, (7, 8): None, (8, 4): None}
from mlstatpy.graph.graphviz_helper import draw_graph_graphviz
draw_graph_graphviz(noeuds, arcs, "image.png")
from IPython.display import Image
Image("image.png", width=400)
import numpy
def Laplacien (edges) :
mat = {}
for k in edges :
i,j = k
if i != j :
mat [i,j] = -1
mat [j,i] = -1
if (i,i) not in mat : mat [i,i] = 0
if (j,j) not in mat : mat [j,j] = 0
mat [i,i] += 1
mat [j,j] += 1
maxi = max(max(_) for _ in mat) + 1
nmat = numpy.zeros((maxi, maxi))
for (i, j), v in mat.items():
nmat[i,j] = v
return nmat
mat = Laplacien(arcs)
mat
def eigen (mat, sort = True) :
l, v = numpy.linalg.eig(mat)
if sort :
li = list (l)
li = [ (_,i) for i,_ in enumerate (li) ]
li.sort ()
pos = [ _[1] for _ in li ]
l = numpy.array ( [ _[0] for _ in li ] )
mat = v.copy()
for i in range (0, len (pos)) :
mat [ :,i] = v [ :,pos[i] ]
return l,mat
else :
return l,v
val, vec = eigen(mat)
val
vec[:, 1]
from pyquickhelper.helpgen import NbImage
NbImage("images/tutgraphcl.png")
points = [(0.84737386691659533, 0.95848816613228727), (0.28893525107454354, 0.66073249195336492), (0.60382037086559148, 0.13747945088383384), (0.21951613156582261, 0.040905525433785228), (0.21613062123493632, 0.096875623632852625), (0.99787588721497178, 0.79337171783327132), (0.18576957348508683, 0.78396225027633837), (0.23875443625588322, 0.35497638429086975), (0.8713637939628045, 0.22983756618811024), (0.28301724069085921, 0.99408996134013161), (0.39792684083973429, 0.77105362865540716), (0.75452041353842147, 0.330325155167562), (0.24824845436118537, 0.95998690078041737), (0.92318434139996397, 0.38115765401571988), (0.54660304309415886, 0.62093667623480242), (0.58899996464290505, 0.9017292023892568), (0.60541336358687847, 0.28929082523865812), (0.87925379747840293, 0.94834058131858756), (0.61449632813730748, 0.94264237081849722), (0.13119804743502139, 0.44158556198130949), (0.20660796942108339, 0.915599021810789), (0.3097131996826511, 0.81979953110332837), (0.89711055197298928, 0.7298496710091944), (0.22499060312661545, 0.072786594549671291), (0.012604758185058018, 0.36199484670070914), (0.92050750708863993, 0.91447248587261709), (0.26304069827339327, 0.026058147250910935), (0.59289937178711172, 0.86673111722782969), (0.70640070176443837, 0.64096733852134291), (0.049399266565914535, 0.54027723332288746), (0.26450585597978316, 0.50883097182669357), (0.91987410679455195, 0.97753050553942622), (0.5618293073273094, 0.27688371997865069), (0.91241761244784847, 0.090310675429991605), (0.90925789663628509, 0.40628594240956295), (0.3832814495252409, 0.66221025722485627), (0.74928785967005418, 0.32840192750838815), (0.25478832731446643, 0.70269825611412617), (0.54293534537395793, 0.87800254191632932), (0.89603330911109724, 0.77106655965183546), (0.29830084404349644, 0.97117954065316903), (0.075137754060910056, 0.086473140735377596), (0.120307047737505, 0.073651360408690802), (0.87835916829742444, 0.34622147871872355), (0.20567119579830373, 0.42658381934346423), (0.27715586337053655, 0.87999487046170488), (0.16364186693234739, 0.98604111274325335), (0.31830209002283116, 0.36372930495109934), (0.73434680601907532, 0.65926820980026724), (0.9830474686174655, 0.12246834322318068), (4.0293130665095358, -3.0529459366329164), (-3.7755737603387041, 2.2685053357046323), (-2.1926920625846602, 2.4857321786911326), (5.1445647965531025, 4.8943143876324848), (0.87403644639763023, 5.6464000746270226), (-3.5545355219233219, 3.8988261206085766), (2.0785612031685732, -1.2948920530351256), (-3.4682717483474708, 2.2364561845005868), (2.0695530720860349, -2.9439062757612424), (3.9563571060210054, -2.0678946581365616), (3.2485209278176157, -2.6386418932454814), (-3.4800728241977779, 0.72646452125011518), (-1.8341241854718167, 3.3482541467971951), (-4.5558692651012178, 3.5624030818263908), (-4.6768285328272157, -1.0106699901361971), (3.9175303893386597, 0.1087117017596031), (-3.9111941479785823, 2.70001353796486), (-5.5501953466420737, -3.8544512068951891), (1.9246058344257151, 4.123740240481137), (-4.110657752575519, -2.0774760107085393), (2.6547967574269418, 4.6868873425221045), (-1.9308254017076039, -2.9448006865754279), (-3.0788555249744247, 3.396205767032443), (-4.0516249434348621, 0.42035392996461629), (-2.2989465364173602, -3.2706795830191275), (4.651698949077459, 1.1364194264447973), (3.3637257964296152, -2.5082040184760555), (-3.2502121678035314, 4.5383631321594571), (4.5274668721202556, 4.473426056956777), (3.400114365788911, -3.0434200740148363), (3.513062501300436, 2.718209259961025), (-2.3986743034356737, -4.0590996420222467), (2.6632346815268289, 4.8894243587379433), (4.2802341564965607, -3.4921791441653762), (-1.5297912885016269, 5.5780900056883569), (4.0634598983096293, -2.2904478604819776), (1.0857595813036722, 5.6366192967000295), (-4.5596385297232223, -1.3177709282351766), (-2.1361714943468244, -3.9107871995830976), (3.7240531749202161, -4.8033709892679886), (-4.1017624989859351, -0.54374796617700816), (2.3715344477591818, -3.2387553898801391), (3.8187172884547076, -5.1522284671097314), (1.0454193728074506, 3.1688190599740418), (-3.9848808505730315, -3.5176013894081675), (-4.1965918931505275, 2.0248869962483522), (3.4535361867324776, 3.4437155145638751), (-3.2171776428648808, -2.0867326734388021), (-3.5763512667620065, -3.785293447306691), (-3.2489915323631275, -4.6589505137265448), (1.2817385669950028, -4.0553290947191964), (-5.5481507299407191, 5.2080477057573553), (-2.2817876881965624, 0.12512408298772948), (-3.4831125975271719, 1.7834950195462245), (-4.2064606598908139, -3.2421411165648886), (-5.3461204499811092, 0.65966593807378215), (-0.36559473517464181, -3.9248327086099932), (-4.4223418217602317, 4.790875007038224), (-3.9026572243192548, -0.21621909226838504), (0.16100173690141428, -4.8875278273011942), (-4.2792213808538602, 1.9041297697847308), (-4.4298318748123444, -3.8717874765920124), (3.2660121035644738, 3.8922848961161609), (4.4724681658043082, -1.8875314666371643), (-3.1337207059785208, 7.2290596706950154), (5.0970619686963916, 2.4188864705446997), (-1.824501293502089, 0.87811217547665232), (2.6141377553638456, 2.4736768016729647), (-3.9646033676482686, 1.7291507868196327), (-5.6494860793108481, -1.1744278681124489), (3.3291564189715617, 3.1892910878432268), (2.6260111359196396, -4.8029748349762125), (-4.1110554486386404, 0.0087017311510849682), (-3.812034605848817, 1.8310006567642712), (-4.0643824785110239, 4.7806635726760689), (-3.8724397920934015, 0.65927045141188367), (-3.6202135060380289, -0.18281430910806151), (1.8134764145891591, -4.0328054369849538), (4.0315824591034124, -3.5339867923196042), (-3.0906912982614791, -3.8390710019489158), (0.77019164393866146, 4.0099320163703895), (-3.2239134319849398, 2.5227757084315567), (-2.5342615497190861, -4.5402720724503229), (0.52313297572359074, 5.8268409663350287), (-2.0896974241486603, -0.83931337455192145), (5.9824769771009292, 1.8062615072223389), (-1.7151819974072808, -4.6553638508191835), (-0.94296691141453703, -4.3332773280899097), (-2.9080659785364102, 3.8017876981653527), (-4.146797854411842, -2.4943345068020939), (-1.6135304662636716, -4.5968234340599352), (-5.2240732422979015, -0.40050907128273239), (3.0003615064702411, 4.3564534485947091), (1.5251603471425388, 5.3602495377614252), (0.70829180528117897, 4.8705912438690024), (-1.9857439387875215, 4.3495410597763557), (-1.7415118623160484, -2.8482449535792851), (3.1227029816875906, -3.943690794192229), (2.5533372938495322, 0.23654193364300019), (4.9320538122814632, 0.27398085527961841), (3.5379571426787906, 3.5479478416595258), (-3.9952197756192462, 0.9519866242123729), (-0.63418929807710789, 4.9714021509147459), (3.7514419719026835, -3.7952656655539831), (5.8168652955867248, -5.8059389896821614), (-3.86083201462211, 1.6763339473293351), (5.2346287443442741, -2.0049022214331869), (3.0159172780756807, -4.6747832401686313), (1.9625789720275502, 0.21332969214064601), (-5.4459656516053521, 1.8490131071943328), (5.4887755131556295, 1.0537691340713213), (4.1214658457920255, 1.8180419262808878), (1.0417225435808637, 6.4876076903545457), (5.2056831059665383, 3.4403227294912879), (-3.29183542445509, 1.1299087065549616), (-4.6894950904308068, 0.67877427899602139), (-4.2334935303450196, 0.66692066781151726), (6.918359229911677, -0.43825691963852248), (5.0912552685819197, 5.9256467457380193), (3.9995400634925016, -4.2633779062253305), (-1.3270510253578853, -2.8998811026998816), (-3.4372749748248483, -2.800876689538256), (2.5720483206059228, -4.5479241832525954), (3.5107697954439923, -5.6063323885377114), (3.45355690226015, -1.3924594206301864), (4.8170391803389006, -1.3343907023480963), (1.1592191821861308, 4.551692003143347), (-2.2147820707711716, 0.55930561729387951), (-3.2364813901253862, -1.7059292544869302), (3.5980046177747229, 3.0606302788023871), (3.0235041652892747, -0.27015781708378661), (2.4303330714757383, 3.3989583334332432), (2.4649562148782955, -4.3524552397826168), (-3.3322237797463616, 1.6813558717119386), (4.3359544685337736, 2.7104894884469877), (3.350410042767797, -3.8412188670946792), (-2.8993273426849919, 5.5101185505218293), (3.3563537615645282, 5.0439247587050282), (3.3738404946436238, -0.43277784903448813), (1.6236691719193734, -4.8192122194763103), (-4.3000303214498619, 2.7045156595962521), (3.2036876689968699, -0.22379027409222038), (5.0078193725337679, -0.33061456656172339), (1.3173753727230917, 2.3292728936983247), (0.17305051546078376, -2.3708524146324814), (0.18920570140751003, 2.7288547711089577), (4.5559793038807355, 2.4460955268542377), (-0.65537111745445098, 4.3024274811626642), (0.32733974310015845, -2.6653194005399481), (-4.3495524342659682, 0.50620561077402126), (3.6859406925109957, 1.0042337939426813), (-5.4168309661540643, -2.3784247121303279), (2.0873449293614152, 3.8206900404120345), (3.3397623772131446, -2.2347446764630474), (2.8720948774765485, 2.6955132035521556), (5.9472576652843694, -3.3542922693748149), (1.030233796538444, 1.6199282129862145), (-1.7351581782776853, -5.5709314373179808), (0.14607908112131446, 2.79251837326064), (0.37002429983216167, 4.3653059393186942), (3.8616789948811956, 3.6100436336617339), (-4.8019087210485418, -3.5911421188072357), (1.6953052292111459, -4.3928959775316905), (-1.049532260408768, -2.9169000088107522), (-4.8042700374731648, -2.6636201843555991), (2.2856117402115821, -4.497386564362329), (-1.1085015582769402, -4.1635806015318408), (-0.51764720743541925, 3.3207617687324866), (2.6552485122750968, 1.9457154950840061), (4.4574030967957459, 0.13220998701481373), (4.1064026703010086, -4.6992062016898437), (3.6218017958370492, 2.4171784152426357), (2.1893570148164336, -0.53987360896641756), (-0.62289304323418893, 5.6377915319211773), (0.95656595366184183, -3.5482370903224183), (4.6552715153624238, -0.42419842122106877), (3.9138981541477369, 1.5211086418661788), (-5.7643908686171743, 3.3462875243179644), (4.4001664954474204, 1.8715548148469952), (3.7209034976257116, -4.3132712976844925), (2.0077653108424371, -3.8044349295045858), (-2.7004396541700451, 3.6313151291578776), (2.7805282578575432, -1.3496033840422226), (2.5149407509344646, -4.4491799573779538), (-3.4969549443875327, 0.59052341158001964), (2.5871839418980924, -2.8626995345211439), (4.530084220131168, 0.73947783901217035), (-4.2278934560638541, -1.4480933790189707), (-3.6638968948801822, -1.8603129450393652), (1.0034748779660814, 4.3783603559660618), (-0.24711046251746965, 5.0245225170472958), (-0.75233017871629115, -3.4003624728787472), (-5.3204808270534789, 0.8530050107548528), (-0.66555456366565435, -3.210607962975542), (4.4312598575388913, -1.8510534338146063), (-1.0579141292803367, -3.8599892658343156), (5.1580465239922022, -1.6376354853614972), (-2.6525127599513731, 2.9406618825179196), (3.3353268107001339, 4.5193520805659642), (4.9838132614191322, -4.5937246171656669)]
import math
def distance (p1, p2) :
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
return math.exp (- (dx**2 + dy**2) )
def CreateProximityMatrix (points) :
n = len (points)
mat = [ [ 0.0 for i in range (0,n)] for j in range (0,n) ]
for i in range (0, n) :
for j in range (0, n) :
if i != j :
mat [i][j] = - distance (points [i], points [j])
for i in range (0, n) :
mat [i][i] = -sum (mat [i])
return numpy.matrix(mat)
mat = CreateProximityMatrix(points)
mat
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: value noise
Step2: We can visualize r by plotting it.
Step3: We can use this table to define a function noise that given an integer value x will return a value in the range of 0 to 1. The r vector only has a limited amount of values. In order to make it work for any value of x we can perform a modulo operation with the length of the r vector.
Step4: By default we only have 8 values in the r vector so if we plot the noise function over a range 0 to 16 we can see that it repeats itself halfway.
Step5: Our noise function works for integer values of x ideally it works for real values of x as well. We can do this by interpolation.
Step6: Now let's create some noise in two-dimensions. We'll implement a naive solution at first and revisit it later. Let's start by re-seeding the r table in two dimensions.
Step7: And now we redefine our noise function so it will interpolate between the x and y value as well.
Step8: We can see clearly see a pattern here. That's because the dimensions of our r table are very small, only 4 in each axis. We'll get much better noise if we make it bigger.
Step9: There's still a pattern but that is to be expected with our still reasonably small r vector. It does like a lot better already but another thing that is noticeable is that it appears to be a bit blocky. This is because of the linear interpolation we're using to calculate the final noise values. We can fix this by introducing the smoothstep function.
Step10: And now we have to adjust our noise function to incorporate the smoothstep function.
Step11: Note that instead of interpolating on tx and ty directly we are using the smoothstep-ed values of sx and sy instead.
|
<ASSISTANT_TASK:>
Python Code:
img = np.random.ranf((128,128))
plt.imshow(img, cmap=plt.cm.ocean)
def seed(n, shape=(8,)):
global r
np.random.seed(n)
r = np.random.ranf(shape)
seed(0)
x = np.arange(0, len(r), 1)
plt.plot(x, r[x], 'bo')
plt.axis('tight')
def noise(x):
x = int(x % len(r))
return r[x]
x = np.arange(0, 16, 1)
y = [noise(x) for x in x]
plt.plot(x, y, 'bo')
def noise(x):
xi = int(x)
x0 = xi % len(r)
x1 = 0 if x0 == (len(r) - 1) else (x0 + 1)
v0, v1 = r[x0], r[x1]
t = x - xi
return np.interp(t, [0, 1], [v0, v1])
x = np.arange(0, 16, 1/10)
y = [noise(x) for x in x]
plt.plot(x, y)
seed(0, (4,4))
def noise(x, y):
xi, yi = int(x), int(y)
tx, ty = x - xi, y - yi
x0 = xi % len(r)
x1 = 0 if x0 == (len(r) - 1) else x0 + 1
y0 = yi % len(r[0])
y1 = 0 if y0 == (len(r) - 1) else y0 + 1
c00 = r[x0][y0]
c01 = r[x1][y0]
c10 = r[x0][y1]
c11 = r[x1][y1]
n0 = np.interp(tx, [0, 1], [c00, c01])
n1 = np.interp(tx, [0, 1], [c10, c11])
return np.interp(ty, [0, 1], [n0, n1])
img = np.array([[noise(x, y) for x in range(16)] for y in range(16)])
plt.imshow(img, plt.cm.ocean)
seed(0, (16,16))
img = np.array([[noise(x, y) for x in range(32)] for y in range(32)])
plt.imshow(img, cmap=plt.cm.ocean)
def smoothstep(t):
return 3 * t**2 - 2 * t**3
x = np.linspace(0, 1, 1000)
y = smoothstep(x)
plt.plot(x, y)
def noise(x, y, c = lambda t: t):
xi, yi = int(x), int(y)
tx, ty = x - xi, y - yi
x0 = xi % len(r)
x1 = 0 if x0 == (len(r) - 1) else x0 + 1
y0 = yi % len(r[0])
y1 = 0 if y0 == (len(r) - 1) else y0 + 1
c00 = r[x0][y0]
c01 = r[x1][y0]
c10 = r[x0][y1]
c11 = r[x1][y1]
sx = c(tx)
sy = c(ty)
n0 = np.interp(sx, [0, 1], [c00, c01])
n1 = np.interp(sx, [0, 1], [c10, c11])
return np.interp(sy, [0, 1], [n0, n1])
def gen_im(size=(64,64), c = lambda x: x):
w, h = size
pixels = [[noise(x, y, c) for x in range(w)] for y in range(h)]
return np.array(pixels)
seed(1, (32, 32))
img = gen_im(c = smoothstep)
plt.imshow(img, cmap=plt.cm.ocean)
def noise(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that we have loaded our file, we can begin to work on it. <i>Python</i> offers us a lot of pre-built tools to make the task of coding easier. Some of the most commonly used tools are known as <b><span style="cursor
Step2: Now we can call the function for any word we choose. The next example shows that there are <u>392</u> occurences of the word <code>privacy</code> contained in the transcripts for the 39th sitting of Parliament.
Step3: Unfortunately, there are two distinct problems here, centred around the fact that our function is only counting the string <code>privacy</code> exactly as it appears.</p>
Step4: Here is a more extreme example to illustrate the point.
Step5: The second problem is that of punctuation. Much like words are case-sensitive, they are also punctuation-sensitive. If a piece of punctuation has been included in the string, it will be included in the search. Here we count the occurrences of <code>privacy,</code> shown here with a comma after the word.
Step6: And here we count <code>privacy.</code>, with the word followed by a period.
Step7: We could comb through the text to find all of the different instantiations of <code>privacy</code>, and then run the code for each one and add together all of the numbers, but that would be time consuming and potentially inaccurate. Instead, we must process the text further to make the text uniform. In this case we want to make all of the characters lowercase, and remove all of the punctuation.</p>
Step8: Now, when we count the instances of <code>privacy</code>, we are presented with a total of <u>846</u> instances.
Step9: Part 2
Step10: Although we can display the contents of a directory by using the <code>listdir</code> function, <i>Python</i> needs those names stored in a list in order to iterate over it. We also want to specify that only files with the extension <code>.txt</code> are included. Here we create another function called <code>list_textfiles</code>.
Step11: Rather than writing code to open each file individually, we can create another custom function to open the file we pass to it. We'll call this one read_file.
Step12: Now we can open all of the files in our directory, strip each file of uppercase letters and punctuation, split the whole of each text into tokens, and store all the data as separate lists in our variable <code>corpus</code>.
Step13: Let's check to make sure the code worked by using the <code>len</code> function to count the number of items in our <code>corpus</code> list.
Step14: Let's create a function to make the names of the files more readable. First we'll have to strip the the file extension <code>.txt</code>.
Step15: Now let's make a function to remove the <code>data/</code>.
Step16: And finally, we'll write a function to tie the two functions together.
Step17: Now we can display a readable list of the files within our directory.
Step18: The next step involves iterating through both lists
Step19: What's exciting about this code is that we can now search the entire corpus for any word we choose. Let's search for <code>information</code>.
Step20: How about <code>ethics</code>?
|
<ASSISTANT_TASK:>
Python Code:
# 1. open the text file
infile = open('data/39.txt')
# 2. read the file and assign it to the variable 'text'
text = infile.read()
# 3. close the text file
infile.close()
# 4. split the variable 'text' into distinct word strings
words = text.split()
# 5. define the'count_in_list' function
def count_in_list(item_to_count, list_to_search):
"Counts the number of a specified word within a list of words"
number_of_hits = 0
for item in list_to_search:
if item == item_to_count:
number_of_hits += 1
return number_of_hits
# 6. here the function counts the instances of the word 'privacy'
print "Instances of the word \'privacy\':", (count_in_list("privacy", words))
print "Instances of the word \'Privacy\':", (count_in_list("Privacy", words))
print "Instances of the word \'pRiVaCy\':",(count_in_list("pRiVaCy", words))
print "Instances of the word \'privacy,\':", (count_in_list("privacy,", words))
print "Instances of the word \'privacy.\':",(count_in_list("privacy.", words))
infile = open('data/39.txt')
text = infile.read()
infile.close()
tokens = text.split()
#here we call the text cleaning function
words = [w.lower() for w in tokens if w.isalpha()]
print "Instances of the word \'privacy\':", (count_in_list("privacy", words))
# imports the os module
from os import listdir
# calls the listdir function to list the files in a specific directory
listdir("data")
def list_textfiles(directory):
"Return a list of filenames ending in '.txt'"
textfiles = []
for filename in listdir(directory):
if filename.endswith(".txt"):
textfiles.append(directory + "/" + filename)
return textfiles
def read_file(filename):
"Read the contents of FILENAME and return as a string."
infile = open(filename)
contents = infile.read()
infile.close()
return contents
corpus = []
for filename in list_textfiles("data"):
# reads the file
text = read_file(filename)
# splits the text into tokens
tokens = text.split()
# removes the punctuation and changes Uppercase to lower
words = [w.lower() for w in tokens if w.isalpha()]
# creates a set of word lists for each file
corpus.append(words)
print"There are", len(corpus), "files in the list, named: ", ', '.join(list_textfiles('data'))
from os.path import splitext
def remove_ext(filename):
"Removes the file extension, such as .txt"
name, extension = splitext(filename)
return name
for files in list_textfiles('data'):
remove_ext(files)
from os.path import basename
def remove_dir(filepath):
"Removes the path from the file name"
name = basename(filepath)
return name
for files in list_textfiles('data'):
remove_dir(files)
def get_filename(filepath):
"Removes the path and file extension from the file name"
filename = remove_ext(filepath)
name = remove_dir(filename)
return name
filenames = []
for files in list_textfiles('data'):
files = get_filename(files)
filenames.append(files)
print"There are", len(corpus), "files in the list, named:", ', '.join(filenames),"."
for words, names in zip(corpus, filenames):
print"Instances of the word \'privacy\' in",names, ":", count_in_list("privacy", words)
for words, names in zip(corpus, filenames):
print"Instances of the word \'information\' in",names, ":", count_in_list("information", words)
for words, names in zip(corpus, filenames):
print"Instances of the word \'ethics\' in",names, ":", count_in_list("ethics", words)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this case, we have multiple datasets, thus we have to iterate through the list of files.
Step2: <a id="filtering"></a>
Step3: <a id="visualisation"></a>
Step4: Comparison of single-cell and bulk datasets
Step5: <a id="meta"></a>
Step6: Topologically associating domains (TADs)
Step7: The problem of the variable parameter for TADs calling might be resolved via parameter optimization. For example, the best parameter could be selected based on mean TADs size fitting expectations (~ 500 Kb in this case).
|
<ASSISTANT_TASK:>
Python Code:
import os
from hiclib import mapping
from mirnylib import h5dict, genome
bowtie_path = '/opt/conda/bin/bowtie2'
enzyme = 'DpnII'
bowtie_index_path = '/home/jovyan/GENOMES/HG19_IND/hg19_chr1'
fasta_path = '/home/jovyan/GENOMES/HG19_FASTA/'
chrms = ['1']
genome_db = genome.Genome(fasta_path, readChrms=chrms)
min_seq_len = 120
len_step = 5
nthreads = 2
temp_dir = 'tmp'
bowtie_flags = '--very-sensitive'
experiment_ids = ['72-sc-1', '54-sc-1', '58-sc-1']
for exp_id in experiment_ids:
infile1 = '/home/jovyan/DATA/FASTQ1/K562_{}_R1.fastq'.format(exp_id)
infile2 = '/home/jovyan/DATA/FASTQ1/K562_{}_R2.fastq'.format(exp_id)
out1 = '/home/jovyan/DATA/SAM/K562_{}_R1.chr1.sam'.format(exp_id)
out2 = '/home/jovyan/DATA/SAM/K562_{}_R2.chr1.sam'.format(exp_id)
mapping.iterative_mapping(
bowtie_path = bowtie_path,
bowtie_index_path = bowtie_index_path,
fastq_path = infile1,
out_sam_path = out1,
min_seq_len = min_seq_len,
len_step = len_step,
nthreads = nthreads,
temp_dir = temp_dir,
bowtie_flags = bowtie_flags)
mapping.iterative_mapping(
bowtie_path = bowtie_path,
bowtie_index_path = bowtie_index_path,
fastq_path = infile2,
out_sam_path = out2,
min_seq_len = min_seq_len,
len_step = len_step,
nthreads = nthreads,
temp_dir = temp_dir,
bowtie_flags = bowtie_flags)
out = '/home/jovyan/DATA/HDF5/K562_{}.fragments.hdf5'
mapped_reads = h5dict.h5dict(out.format(exp_id))
mapping.parse_sam(
sam_basename1 = out1,
sam_basename2 = out2,
out_dict = mapped_reads,
genome_db = genome_db,
enzyme_name = enzyme,
save_seqs = False,
keep_ids = False)
import h5py
import numpy as np
from hiclib import fragmentHiC
from hiclib.fragmentHiC import HiCdataset as HiCdatasetorig
from mirnylib.numutils import uniqueIndex
class HiCdataset(HiCdatasetorig):
"Modification of HiCDataset to include all filters"
def filterLessThanDistance(self):
# This is the old function used to filter "duplicates".
#After the final submission of the manuscript, It was replaced by a better function that does the same,
#but at bp resolution, not 100 bp.
M = self.N
for i in range(5):
for j in range(5):
chrStrandID = 10000000 * 10000000 * (np.array(self.chrms1 * (self.strands1 + 1), dtype = np.int64) * 100 + self.chrms2 * (self.strands2 + 1))
print(len(np.unique(chrStrandID)))
posid = np.array((self.cuts1 + i * 100) // 500, dtype = np.int64) * 10000000 + (self.cuts2 + i * 100) // 500
N = self.N
self.maskFilter(uniqueIndex(posid + chrStrandID))
print(N, "filtered to", self.N)
self.metadata["321_quasiDuplicatesRemoved"] = M - self.N
output = []
for exp_id in experiment_ids:
inp = '/home/jovyan/DATA/HDF5/K562_{}.fragments.hdf5'.format(exp_id)
out = '/home/jovyan/DATA/HDF5/K562_{}.tmp.hdf5'.format(exp_id)
outstat = '/home/jovyan/DATA/HDF5/K562_{}.stat.txt'.format(exp_id)
fragments = HiCdataset(
filename = out,
genome = genome_db,
maximumMoleculeLength= 500,
enzymeName = 1000,
mode = 'w')
fragments.parseInputData(
dictLike=inp)
fragments.filterLessThanDistance()
fs = fragments.fragmentSum()
fragments.fragmentFilter(fs < 9)
output.append(list(fragments.metadata.items()))
out_bin = '/home/jovyan/DATA/HDF5/K562_{}.binned_{}.hdf5'
res_kb = [100, 20]
for res in res_kb:
print(res)
outmap = out_bin.format(exp_id, str(res)+'kb')
fragments.saveHeatmap(outmap, res*1000)
del fragments
output
from hiclib.binnedData import binnedDataAnalysis
res = 100
data_hic = binnedDataAnalysis(resolution=res*1000, genome=genome_db)
for exp_id in experiment_ids:
data_hic.simpleLoad('/home/jovyan/DATA/HDF5/K562_{}.binned_{}.hdf5'.format(exp_id, str(res)+'kb'), exp_id)
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
%matplotlib inline
plt.figure(figsize=[10,10])
plt.imshow(data_hic.dataDict['54-sc-1'][200:500, 200:500], cmap='jet', interpolation='None')
data_hic.simpleLoad('/home/jovyan/DATA/HDF5/K562_B-bulk.binned_{}.hdf5'.format(str(res)+'kb'),'bulk')
data_hic.removeDiagonal()
mtx1 = data_hic.dataDict['bulk']
mtx2 = data_hic.dataDict['54-sc-1']
mtx_tmp = np.triu(mtx1)/np.mean(mtx1) + np.tril(mtx2)/np.mean(mtx2)
plt.figure(figsize=[10,10])
plt.imshow(mtx_tmp[200:500, 200:500], cmap='Blues', interpolation='None', vmax=900)
mtx_merged = sum([data_hic.dataDict[exp_id] for exp_id in experiment_ids])
mtx1 = data_hic.dataDict['bulk']
mtx2 = mtx_merged
mtx_tmp = np.triu(mtx1)/np.mean(mtx1) + np.tril(mtx2)/np.mean(mtx2)
plt.figure(figsize=[10,10])
plt.imshow(mtx_tmp[200:500, 200:500], cmap='Blues', interpolation='None', vmax=800)
from matplotlib import gridspec
eig = np.loadtxt('/home/jovyan/DATA/ANNOT/comp_K562_100Kb_chr1.tsv')
bgn = 0
end = 500
fig = plt.figure(figsize=(10,10))
gs = gridspec.GridSpec(2, 1, height_ratios=[20,2])#width_ratios=[2,20],
gs.update(wspace=0.0, hspace=0.0)
ax = plt.subplot(gs[0,0])
ax.matshow(mtx_tmp[bgn:end, bgn:end], cmap='jet', origin='lower', aspect='auto')
ax.set_xticks([])
ax.set_yticks([])
axl = plt.subplot(gs[1,0])#, sharey=ax)
plt.plot(range(end-bgn), eig[bgn:end] )
plt.xlim(0, end-bgn)
plt.xlabel('Eigenvector values')
ticks = range(bgn, end+1, 100)
ticklabels = ['{} Kb'.format(x) for x in ticks]
plt.xticks(ticks, ticklabels)
import lavaburst
mtx = data_hic.dataDict['54-sc-1']
good_bins = mtx.astype(bool).sum(axis=0) > 1 # We have to mask rows/cols if data is missing
gammas=[0.1, 1.0, 5.0, 10.0] # set of parameters gamma for TADs calling
segments_dict = {}
for gam_current in gammas:
print(gam_current)
S = lavaburst.scoring.armatus_score(mtx, gamma=gam_current, binmask=good_bins)
model = lavaburst.model.SegModel(S)
segments = model.optimal_segmentation() # Positions of TADs for input matrix
segments_dict[gam_current] = segments.copy()
# TADs at different parameters for particular cell (54-sc-1)
A = mtx.copy()
good_bins = A.astype(bool).sum(axis=0) > 0
At = lavaburst.utils.tilt_heatmap(mtx, n_diags=100)
start_tmp = 0
end_tmp = 500
f = plt.figure(figsize=(20, 6))
ax = f.add_subplot(111)
blues = sns.cubehelix_palette(0.4, gamma=0.5, rot=-0.3, dark=0.1, light=0.9, as_cmap=True)
ax.matshow(np.log(At[start_tmp: end_tmp]), cmap=blues)
cmap = mpl.cm.get_cmap('brg')
gammas = segments_dict.keys()
for n, gamma in enumerate(gammas):
segments = segments_dict[gamma]
for a in segments[:-1]:
if a[1]<start_tmp or a[0]>end_tmp:
continue
ax.plot([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp], [0, -(a[1]-a[0])], c=cmap(n/len(gammas)), alpha=0.5)
ax.plot([a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [-(a[1]-a[0]), 0], c=cmap(n/len(gammas)), alpha=0.5)
a = segments[-1]
ax.plot([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp], [0, -(a[1]-a[0])], c=cmap(n/len(gammas)), alpha=0.5, label=gamma)
ax.plot([a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [-(a[1]-a[0]), 0], c=cmap(n/len(gammas)), alpha=0.5)
ax.set_xlim([0,end_tmp-start_tmp])
ax.set_ylim([100,-100])
ax.legend(bbox_to_anchor=(1.1, 1.05))
ax.set_aspect(0.5)
optimal_gammas = {}
for exp_id in experiment_ids:
mtx = data_hic.dataDict[exp_id][0:1000, 0:1000]
good_bins = mtx.astype(bool).sum(axis=0) > 1 # We have to mask rows/cols if data is missing
gammas = np.arange(2, 24, 1)*1000/3250 # Desired set of gammas for testing
means = []
for gam_current in gammas:
S = lavaburst.scoring.armatus_score(mtx, gamma=gam_current, binmask=good_bins)
model = lavaburst.model.SegModel(S)
segments = model.optimal_segmentation() # Positions of TADs for input matrix
tad_lens = segments[:,1]-segments[:,0]
good_lens = (tad_lens>=200/res)&(tad_lens<900) # We do not consider too large or too small segments as TADs
means.append(np.mean(tad_lens[good_lens]))
idx = np.argmin(np.abs(np.array(means)-500/res))
opt_mean, opt_gamma = means[idx], gammas[idx]
print(exp_id, opt_mean*res, opt_gamma)
optimal_gammas[exp_id] = opt_gamma
# TADs in single cells compared with merged single-cell data
A = mtx.copy()
good_bins = A.astype(bool).sum(axis=0) > 0
At = lavaburst.utils.tilt_heatmap(mtx_merged, n_diags=100)
start_tmp = 0
end_tmp = 500
f = plt.figure(figsize=(20, 6))
ax = f.add_subplot(111)
ax.matshow(np.log(At[start_tmp: end_tmp]), cmap='Reds')
for n, exp in enumerate(experiment_ids):
A = data_hic.dataDict[exp][bgn:end, bgn:end].copy()
good_bins = A.astype(bool).sum(axis=0) > 0
gamma = optimal_gammas[exp]
S = lavaburst.scoring.modularity_score(A, gamma=gamma, binmask=good_bins)
model = lavaburst.model.SegModel(S)
segments = model.optimal_segmentation()
for a in segments[:-1]:
if a[1]<start_tmp or a[0]>end_tmp:
continue
tad_len = a[1]-a[0]
if (tad_len<200/res)|(tad_len>=900):
continue
ax.fill_between([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [0, -(a[1]-a[0]), 0], 0,
facecolor='#6100FF', interpolate=True, alpha=0.2)
a = segments[-1]
tad_len = a[1]-a[0]
if (tad_len<200/res)|(tad_len>=900):
continue
ax.fill_between([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [0, -(a[1]-a[0]), 0], 0,
facecolor='#6100FF', interpolate=True, alpha=0.2)
ax.set_xlim([start_tmp,end_tmp])
ax.set_ylim([100,-100])
ax.set_aspect(0.5)
# TADs in single cells compared with bulk Hi-C data
A = mtx.copy()
good_bins = A.astype(bool).sum(axis=0) > 0
At = lavaburst.utils.tilt_heatmap(data_hic.dataDict['bulk'], n_diags=100)
start_tmp = 0
end_tmp = 300
f = plt.figure(figsize=(20, 6))
ax = f.add_subplot(111)
ax.matshow(np.log(At[start_tmp: end_tmp]), cmap='Reds')
for n, exp in enumerate(experiment_ids):
A = data_hic.dataDict[exp][bgn:end, bgn:end].copy()
good_bins = A.astype(bool).sum(axis=0) > 0
gamma = optimal_gammas[exp]
S = lavaburst.scoring.modularity_score(A, gamma=gamma, binmask=good_bins)
model = lavaburst.model.SegModel(S)
segments = model.optimal_segmentation()
for a in segments[:-1]:
if a[1]<start_tmp or a[0]>end_tmp:
continue
tad_len = a[1]-a[0]
if (tad_len<200/res)|(tad_len>=900):
continue
ax.fill_between([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [0, -(a[1]-a[0]), 0], 0,
facecolor='#6100FF', interpolate=True, alpha=0.2)
a = segments[-1]
tad_len = a[1]-a[0]
if (tad_len<200/res)|(tad_len>=900):
continue
ax.fill_between([a[0]-start_tmp, a[0]+(a[1]-a[0])/2-start_tmp, a[1]-start_tmp], [0, -(a[1]-a[0]), 0], 0,
facecolor='#6100FF', interpolate=True, alpha=0.2)
ax.set_xlim([start_tmp,end_tmp])
ax.set_ylim([100,-100])
ax.set_aspect(0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
Step2: We'll just add an 'lc' dataset
Step3: Relevant Parameters
Step4: ld_mode_bol = 'lookup'
Step5: To access the interpolated values that will be used under-the-hood, we can call b.compute_ld_coeffs.
Step6: ld_mode_bol = 'manual'
Step7: All other limb-darkening parameters (ld_mode, ld_func, ld_coeffs_source, and ld_coeffs) are per-component and per-dataset parameters with context='dataset'.
Step8: ld_mode = 'interp'
Step9: Note that ld_func, ld_coeffs_source, and ld_coeffs aren't visible (relevant) if ld_mode is set to 'interp' (which it is by default).
Step10: ld_mode = 'lookup'
Step11: To manually choose an available atmosphere table, you can choose some other value ('ck2004', for example).
Step12: Although not necesary, we can access the interpolated coefficients by calling b.compute_ld_coeffs.
Step13: ld_mode = 'manual'
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.2,<2.3"
%matplotlib inline
import phoebe
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('lc', times=phoebe.linspace(0,1,101), dataset='lc01')
print(b['ld_mode_bol@primary'])
print(b['ld_func_bol@primary'])
print(b['ld_func_bol@primary'].choices)
print(b['ld_coeffs_source_bol@primary'])
b.compute_ld_coeffs(dataset='bol')
b.set_value_all('ld_mode_bol', value='manual')
print(b['ld_coeffs_bol@primary'])
print(b.filter(qualifier='ld_mode', dataset='lc01'))
print(b.get_parameter(qualifier='ld_mode', dataset='lc01', component='primary').choices)
print(b.filter(qualifier='ld*', dataset='lc01'))
b.set_value(qualifier='ld_mode', dataset='lc01', component='primary', value='lookup')
print(b.filter(qualifier='ld*', dataset='lc01', component='primary'))
print(b.get_parameter(qualifier='ld_coeffs_source', dataset='lc01', component='primary'))
print(b.compute_ld_coeffs(dataset='lc01'))
b.set_value(qualifier='ld_mode', dataset='lc01', component='primary', value='manual')
print(b.filter(qualifier='ld*', dataset='lc01', component='primary'))
print(b.get_parameter(qualifier='ld_coeffs', dataset='lc01', component='primary'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: File Paths and Communities
Step2: Import Reference Databases
Step3: Amplicon and Read Extraction
Step4: Find Class Weights
Step5: Classifier Parameter Sweeps
Step6: Classifier fitting scripts
Step7: Additional files for tax-credit
|
<ASSISTANT_TASK:>
Python Code:
import glob
from os.path import join
import os
import csv
import shutil
import json
from itertools import product
from qiime2 import Artifact
from qiime2.plugins import feature_classifier
from q2_types.feature_data import DNAIterator
from q2_feature_classifier.classifier import \
spec_from_pipeline, pipeline_from_spec, _register_fitter
from pandas import DataFrame, Series
from tax_credit.framework_functions import \
generate_per_method_biom_tables, move_results_to_repository
project_dir = join('..', '..')
analysis_name = 'mock-community'
data_dir = join(project_dir, 'data', analysis_name)
precomputed_dir = join(project_dir, 'data', 'precomputed-results', analysis_name)
ref_db_dir = join(project_dir, 'data', 'ref_dbs')
gg_db = join(ref_db_dir, 'gg_13_8_otus/99_otus.fasta')
gg_tax = join(ref_db_dir, 'gg_13_8_otus/99_otu_taxonomy.txt')
unite_db = join(ref_db_dir, 'unite_20.11.2016/sh_refs_qiime_ver7_99_20.11.2016_dev_clean.fasta')
unite_tax = join(ref_db_dir, 'unite_20.11.2016/sh_taxonomy_qiime_ver7_99_20.11.2016_dev_clean.tsv')
results_dir = join(project_dir, 'temp_results')
if not os.path.exists(results_dir):
os.makedirs(results_dir)
mock_dirs = ['mock-' + str(m) for m in (3, 12, 18, 22, 24, '26-ITS1', '26-ITS9')]
ref_dest = 'ref_dbs'
refs = {m: join(ref_dest, '99_gg_seq.qza') for m in mock_dirs[:4]}
refs.update({m: join(ref_dest, '99_unite_seq.qza') for m in mock_dirs[4:]})
taxs = {m: join(ref_dest, '99_gg_tax.qza') for m in mock_dirs[:4]}
taxs.update({m: join(ref_dest, '99_unite_tax.qza') for m in mock_dirs[4:]})
if not os.path.exists(join(results_dir, ref_dest)):
os.makedirs(join(results_dir, ref_dest))
ref = Artifact.import_data('FeatureData[Sequence]', gg_db)
ref.save(join(results_dir, refs['mock-3']))
with open(gg_tax) as fh:
reader = csv.reader(fh, delimiter='\t')
data = {k:v for k, v in reader}
data = Series(data, name='Taxon')
data.index.name='Feature ID'
tax = Artifact.import_data('FeatureData[Taxonomy]', data)
tax.save(join(results_dir, taxs['mock-3']))
ref = Artifact.import_data('FeatureData[Sequence]', unite_db)
ref.save(join(results_dir, refs['mock-24']))
with open(unite_tax) as fh:
reader = csv.reader(fh, delimiter='\t')
data = {k:v for k, v in reader}
data = Series(data, name='Taxon')
data.index.name='Feature ID'
tax = Artifact.import_data('FeatureData[Taxonomy]', data)
tax.save(join(results_dir, taxs['mock-24']));
def guess_read_length(seqs):
seqs = Artifact.load(seqs)
lengths = [len(s) for s in seqs.view(DNAIterator)]
lengths.sort()
return lengths[len(lengths)//2]
def load_primers(primer_file):
with open(primer_file) as csvfile:
data = next(csv.DictReader(csvfile, delimiter='\t'))
return data['LinkerPrimerSequence'], data['ReversePrimer']
ref_dbs = {}
for mock in mock_dirs:
mockdir = join(data_dir, mock)
repseqs = join(mockdir, 'rep_seqs.qza')
if 'gg' in refs[mock]:
db_name = 'gg_13_8_otus_full'
else:
db_name = 'unite_20.11.2016_clean_full'
ref_dbs[mock] = [(db_name, refs[mock])]
if not os.path.exists(repseqs):
continue
primerfile = join(mockdir, 'sample-metadata.tsv')
primers = list(load_primers(primerfile))
if primers[0] == 'CCGTGCCAGCMGCCGCGGTAA':
primers[0] = 'GTGCCAGCMGCCGCGGTAA'
elif 'I' in primers[0]:
primers[0] = primers[0].replace('I', 'N')
readlength = guess_read_length(repseqs)
print(os.path.basename(mockdir), str(readlength), *primers)
ref = Artifact.load(join(results_dir, refs[mock]))
db_file = '_'.join(
[refs[mock].rsplit('.',1)[0], str(readlength)] +
list(primers)) + '.qza'
if 'gg' in refs[mock]:
db_name = 'gg_13_8_otus_read'
else:
db_name = 'unite_20.11.2016_clean_read'
ref_dbs[mock].append((db_name, db_file))
db_file = join(results_dir, db_file)
if not os.path.exists(db_file):
trimmed = feature_classifier.methods.extract_reads(
sequences=ref, trunc_len=readlength,
f_primer=primers[0], r_primer=primers[1]).reads
trimmed.save(db_file)
db_file = '_'.join(
[refs[mock].rsplit('.',1)[0]] +
list(primers)) + '.qza'
if 'gg' in refs[mock]:
db_name = 'gg_13_8_otus_amplicon'
else:
db_name = 'unite_20.11.2016_clean_amplicon'
ref_dbs[mock].append((db_name, db_file))
db_file = join(results_dir, db_file)
if not os.path.exists(db_file):
trimmed = feature_classifier.methods.extract_reads(
sequences=ref, f_primer=primers[0], r_primer=primers[1]).reads
trimmed.save(db_file)
weights_dest = 'weights'
if not os.path.exists(join(results_dir, weights_dest)):
os.makedirs(join(results_dir, weights_dest))
priors_files = {}
for mock in mock_dirs:
print(mock)
mockdir = join(data_dir, mock)
for db_name, db_file in ref_dbs[mock]:
print(db_name)
tax_weights = Artifact.load(join(results_dir, taxs[mock]))
seq_ids = Artifact.load(join(results_dir, db_file))
seq_ids = {s.metadata['id'] for s in seq_ids.view(DNAIterator)}
tax_weights = tax_weights.view(Series)
tax_weights = {tax_weights[sid]:0. for sid in tax_weights.index
if sid in seq_ids}
weights = Artifact.load(join(mockdir, 'feature_table.qza'))
weights = weights.view(DataFrame)
if len(weights.index) > 1:
weights = {s:sum(weights.loc[s]) for s in weights.index}
total = sum(weights.values())
weights = {s:w/total for s, w in weights.items()}
else:
weights = {weights.index[0]: 1.}
et_path = join(precomputed_dir, mock)
if db_name.startswith('gg_13_8_otus'):
et_path = join(et_path, 'gg_13_8_otus')
else:
et_path = join(et_path, 'unite_20.11.2016_clean_fullITS')
et_path = join(et_path, 'expected', 'expected-taxonomy.tsv')
with open(et_path) as tf:
reader = csv.DictReader(tf, delimiter='\t')
for row in reader:
tax = row['Taxonomy']
weight = sum(weights[s]*float(row[s]) for s in weights)
try:
tax_weights[tax] += weight
except KeyError:
species = {t for t in tax_weights if t.startswith(tax)}
if len(species) == 0:
print(tax)
else:
for s in species:
tax_weights[s] += weight/len(species)
for tax in tax_weights:
if tax_weights[tax] < 1e-9:
tax_weights[tax] = 1e-9
total = sum(tax_weights.values())
weights = [tax_weights[t]/total for t in sorted(tax_weights)]
filename = mock + '-' + db_name + '-weights.json'
weights_file = join(weights_dest, filename)
priors_files[mock] = priors_files.get(mock, {})
priors_files[mock][db_name] = weights_file
weights_file = join(results_dir, weights_file)
with open(weights_file, 'w') as wf:
json.dump(weights, wf)
nb_extra_sweep = \
{'feat-ext--n-features': [1024, 8192, 65536],
'feat-ext--ngram-range': [[4,4], [8, 8], [16, 16], [4,16]],
'norm--norm': ['l1', 'l2', None],
'norm--use-idf': [True, False],
'classify--alpha': [0.001, 0.01, 0.1],
'classify--class-prior': ['uniform', 'prior']}
classifier_params = {'nb-extra': nb_extra_sweep}
confidences = [0., 0.2, 0.4, 0.6, 0.8]
def get_classifier_command(method, inputs, params, priors):
cmd = ['qiime feature-classifier fit-classifier-' + method]
cls = [method]
for param in sorted(inputs):
value = inputs[param]
cmd.extend(['--i-' + param, value])
cls.append(os.path.basename(value).split('.')[0])
for param in sorted(params):
value = params[param]
if value == 'prior':
cls.append(os.path.basename(priors).split('.')[0])
else:
cls.append(str(value).replace(' ',''))
if type(value) is bool:
cmd.append('--p-' + ('' if value else 'no-') + param)
continue
if 'class-prior' not in param:
value = json.dumps(value)
if value[0] != '"' or value[-1] != '"':
value = '"' + value + '"'
cmd.extend(['--p-' + param, value])
continue
if value == 'uniform':
continue
cmd.extend(['--p-' + param, '"`cat ' + priors + '`"'])
cls = ':'.join(cls) + '.qza'
cls = os.path.sep + join('state', 'partition1', 'tmp', 'classifiers', cls)
cmd.extend(['--o-classifier', '"' + cls + '"'])
cmd = ' '.join(cmd)
return cls, cmd
def get_classify_command(classifier, reads, params,
confidence, directory, results_dir):
cmd = ['qiime feature-classifier classify-sklearn']
cmd.extend(['--i-classifier', '"' + classifier + '"'])
cmd.extend(['--i-reads', reads])
cmd.extend(['--p-confidence', str(confidence)])
parameters = [str(params[p]).replace(' ', '') for p in sorted(params)]
parameters.append(str(confidence))
output_directory = join(directory, ':'.join(parameters))
if not os.path.exists(join(results_dir, output_directory)):
os.makedirs(join(results_dir, output_directory))
output = join(output_directory, 'rep_seqs_tax_assignments.qza')
cmd.extend(['--o-classification', '"' + output + '"'])
return output, ' '.join(cmd)
def get_combinations(params):
params, values = zip(*params.items())
for combination in product(*values):
yield dict(zip(params, combination))
if not os.path.exists(join(results_dir, 'classifiers')):
os.makedirs(join(results_dir, 'classifiers'))
classifier_commands = set()
classify_commands = []
classifiers = set()
classifications = []
for mock in mock_dirs:
reads = join('..', 'data', 'mock-community', mock, 'rep_seqs.qza')
mock_directory = join('classifications', mock)
inputs = {'reference-taxonomy': taxs[mock]}
for db_name, db_file in ref_dbs[mock]:
db_directory = join(mock_directory, db_name)
inputs['reference-reads'] = db_file
for method in classifier_params:
method_directory = join(db_directory, method)
for params in get_combinations(classifier_params[method]):
priors = priors_files[mock][db_name]
classifier, command = get_classifier_command(method, inputs, params, priors)
classifier_commands.add(command)
classifiers.add(classifier)
for confidence in confidences:
classification, command = get_classify_command(
classifier, reads, params, confidence,
method_directory, results_dir)
classifications.append(classification)
classify_commands.append(command)
# write out the commands
with open(join(results_dir, 'classifier_commands.sh'), 'w') as cmds:
for cmd in classifier_commands:
cmds.write(cmd + '\n')
with open(join(results_dir, 'classify_commands.sh'), 'w') as cmds:
for cmd in classify_commands:
cmds.write(cmd + '\n')
bad_classifications = []
for classification in classifications:
full_classification = join(results_dir, classification)
output_dir = os.path.dirname(full_classification)
taxonomy_map_fp = join(output_dir, 'taxonomy.tsv')
if not os.path.exists(taxonomy_map_fp):
try:
Artifact.load(full_classification).export_data(output_dir)
except ValueError:
bad_classifications.append(classification)
print(len(bad_classifications), "bad classifications")
bc_combinations = None
for bc in bad_classifications:
if '[4,16]' not in bc:
print(bc)
continue
sbc = []
for tbc in bc.split(os.path.sep):
sbc.extend(tbc.split(':'))
if bc_combinations is None:
bc_combinations = [{tbc} for tbc in sbc]
else:
for tbc, bcc in zip(sbc, bc_combinations):
bcc.add(tbc)
print(bc_combinations)
taxonomy_glob = join(results_dir, 'classifications', 'mock-*', '*', 'nb-extra', '*', 'taxonomy.tsv')
generate_per_method_biom_tables(taxonomy_glob, data_dir)
precomputed_results_dir = join(project_dir, "data", "precomputed-results", analysis_name)
method_dirs = glob.glob(join(results_dir, 'classifications', 'mock-*', '*', 'nb-extra', '*'))
move_results_to_repository(method_dirs, precomputed_results_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the example data files if we don't already have them.
Step2: The XMM MOS2 image
Step3: imfits is a FITS object, containing multiple data structures. The image itself is an array of integer type, and size 648x648 pixels, stored in the primary "header data unit" or HDU.
Step4: Let's look at this with ds9.
Step5: If you don't have the image viewing tool ds9, you should install it - it's very useful astronomical software. You can download it (later!) from this webpage.
Step6: Exercise
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import astropy.io.fits as pyfits
import numpy as np
import os
import urllib
import astropy.visualization as viz
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 10.0)
targdir = 'a1835_xmm'
if not os.path.isdir(targdir):
os.mkdir()
filenames = ('P0098010101M2U009IMAGE_3000.FTZ',
'P0098010101M2U009EXPMAP3000.FTZ',
'P0098010101M2X000BKGMAP3000.FTZ')
remotedir = 'http://heasarc.gsfc.nasa.gov/FTP/xmm/data/rev0/0098010101/PPS/'
for filename in filenames:
path = os.path.join(targdir, filename)
url = os.path.join(remotedir, filename)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
imagefile, expmapfile, bkgmapfile = [os.path.join(targdir, filename) for filename in filenames]
for filename in os.listdir(targdir):
print('{0:>10.2f} KB {1}'.format(os.path.getsize(os.path.join(targdir, filename))/1024.0, filename))
imfits = pyfits.open(imagefile)
imfits.info()
im = imfits[0].data
!ds9 -log "$imagefile"
plt.imshow(viz.scale_image(im, scale='log', max_cut=40), cmap='gray', origin='lower');
plt.savefig("figures/cluster_image.png")
im[350:359,350:359]
index = np.unravel_index(im.argmax(), im.shape)
print("image dimensions:",im.shape)
print("location of maximum pixel value:",index)
print("maximum pixel value: ",im[index])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Select file(s) to be processed
Step2: Setup a cluster
Step3: Setup some parameters
Step4: Motion Correction
Step5: Load memory mapped file
Step6: Parameter setting for CNMF-E
Step7: Inspect summary images and set parameters
Step8: You can inspect the correlation and PNR images to select the threshold values for min_corr and min_pnr. The algorithm will look for components only in places where these value are above the specified thresholds. You can adjust the dynamic range in the plots shown above by choosing the selection tool (third button from the left) and selecting the desired region in the histogram plots on the right of each panel.
Step9: Run the CNMF-E algorithm
Step10: Alternate way to run the pipeline at once
Step11: Component Evaluation
Step12: Do some plotting
Step13: View traces of accepted and rejected components. Note that if you get data rate error you can start Jupyter notebooks using
Step14: Stop cluster
Step15: Some instructive movies
|
<ASSISTANT_TASK:>
Python Code:
try:
get_ipython().magic(u'load_ext autoreload')
get_ipython().magic(u'autoreload 2')
get_ipython().magic(u'matplotlib qt')
except:
pass
import logging
import matplotlib.pyplot as plt
import numpy as np
logging.basicConfig(format=
"%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s",
# filename="/tmp/caiman.log",
level=logging.DEBUG)
import caiman as cm
from caiman.source_extraction import cnmf
from caiman.utils.utils import download_demo
from caiman.utils.visualization import inspect_correlation_pnr, nb_inspect_correlation_pnr
from caiman.motion_correction import MotionCorrect
from caiman.source_extraction.cnmf import params as params
from caiman.utils.visualization import plot_contours, nb_view_patches, nb_plot_contour
import cv2
try:
cv2.setNumThreads(0)
except:
pass
import bokeh.plotting as bpl
import holoviews as hv
bpl.output_notebook()
hv.notebook_extension('bokeh')
fnames = ['data_endoscope.tif'] # filename to be processed
fnames = [download_demo(fnames[0])]
#%% start a cluster for parallel processing (if a cluster already exists it will be closed and a new session will be opened)
if 'dview' in locals():
cm.stop_server(dview=dview)
c, dview, n_processes = cm.cluster.setup_cluster(
backend='local', n_processes=None, single_thread=False)
# dataset dependent parameters
frate = 10 # movie frame rate
decay_time = 0.4 # length of a typical transient in seconds
# motion correction parameters
motion_correct = True # flag for performing motion correction
pw_rigid = False # flag for performing piecewise-rigid motion correction (otherwise just rigid)
gSig_filt = (3, 3) # size of high pass spatial filtering, used in 1p data
max_shifts = (5, 5) # maximum allowed rigid shift
strides = (48, 48) # start a new patch for pw-rigid motion correction every x pixels
overlaps = (24, 24) # overlap between pathes (size of patch strides+overlaps)
max_deviation_rigid = 3 # maximum deviation allowed for patch with respect to rigid shifts
border_nan = 'copy' # replicate values along the boundaries
mc_dict = {
'fnames': fnames,
'fr': frate,
'decay_time': decay_time,
'pw_rigid': pw_rigid,
'max_shifts': max_shifts,
'gSig_filt': gSig_filt,
'strides': strides,
'overlaps': overlaps,
'max_deviation_rigid': max_deviation_rigid,
'border_nan': border_nan
}
opts = params.CNMFParams(params_dict=mc_dict)
if motion_correct:
# do motion correction rigid
mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion'))
mc.motion_correct(save_movie=True)
fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig
if pw_rigid:
bord_px = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)),
np.max(np.abs(mc.y_shifts_els)))).astype(np.int)
else:
bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(np.int)
plt.subplot(1, 2, 1); plt.imshow(mc.total_template_rig) # % plot template
plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig) # % plot rigid shifts
plt.legend(['x shifts', 'y shifts'])
plt.xlabel('frames')
plt.ylabel('pixels')
bord_px = 0 if border_nan is 'copy' else bord_px
fname_new = cm.save_memmap(fname_mc, base_name='memmap_', order='C',
border_to_0=bord_px)
else: # if no motion correction just memory map the file
fname_new = cm.save_memmap(fnames, base_name='memmap_',
order='C', border_to_0=0, dview=dview)
# load memory mappable file
Yr, dims, T = cm.load_memmap(fname_new)
images = Yr.T.reshape((T,) + dims, order='F')
# parameters for source extraction and deconvolution
p = 1 # order of the autoregressive system
K = None # upper bound on number of components per patch, in general None
gSig = (3, 3) # gaussian width of a 2D gaussian kernel, which approximates a neuron
gSiz = (13, 13) # average diameter of a neuron, in general 4*gSig+1
Ain = None # possibility to seed with predetermined binary masks
merge_thr = .7 # merging threshold, max correlation allowed
rf = 40 # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80
stride_cnmf = 20 # amount of overlap between the patches in pixels
# (keep it at least large as gSiz, i.e 4 times the neuron size gSig)
tsub = 2 # downsampling factor in time for initialization,
# increase if you have memory problems
ssub = 1 # downsampling factor in space for initialization,
# increase if you have memory problems
# you can pass them here as boolean vectors
low_rank_background = None # None leaves background of each patch intact,
# True performs global low-rank approximation if gnb>0
gnb = 0 # number of background components (rank) if positive,
# else exact ring model with following settings
# gnb= 0: Return background as b and W
# gnb=-1: Return full rank background B
# gnb<-1: Don't return background
nb_patch = 0 # number of background components (rank) per patch if gnb>0,
# else it is set automatically
min_corr = .8 # min peak value from correlation image
min_pnr = 10 # min peak to noise ration from PNR image
ssub_B = 2 # additional downsampling factor in space for background
ring_size_factor = 1.4 # radius of ring is gSiz*ring_size_factor
opts.change_params(params_dict={'method_init': 'corr_pnr', # use this for 1 photon
'K': K,
'gSig': gSig,
'gSiz': gSiz,
'merge_thr': merge_thr,
'p': p,
'tsub': tsub,
'ssub': ssub,
'rf': rf,
'stride': stride_cnmf,
'only_init': True, # set it to True to run CNMF-E
'nb': gnb,
'nb_patch': nb_patch,
'method_deconvolution': 'oasis', # could use 'cvxpy' alternatively
'low_rank_background': low_rank_background,
'update_background_components': True, # sometimes setting to False improve the results
'min_corr': min_corr,
'min_pnr': min_pnr,
'normalize_init': False, # just leave as is
'center_psf': True, # leave as is for 1 photon
'ssub_B': ssub_B,
'ring_size_factor': ring_size_factor,
'del_duplicates': True, # whether to remove duplicates from initialization
'border_pix': bord_px}) # number of pixels to not consider in the borders)
# compute some summary images (correlation and peak to noise)
cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False) # change swap dim if output looks weird, it is a problem with tiffile
# inspect the summary images and set the parameters
nb_inspect_correlation_pnr(cn_filter, pnr)
# print parameters set above, modify them if necessary based on summary images
print(min_corr) # min correlation of peak (from correlation image)
print(min_pnr) # min peak to noise ratio
cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts)
cnm.fit(images)
# cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview)
# cnm1.fit_file(motion_correct=motion_correct)
#%% COMPONENT EVALUATION
# the components are evaluated in three ways:
# a) the shape of each component must be correlated with the data
# b) a minimum peak SNR is required over the length of a transient
# c) each shape passes a CNN based classifier
min_SNR = 3 # adaptive way to set threshold on the transient size
r_values_min = 0.85 # threshold on space consistency (if you lower more components
# will be accepted, potentially with worst quality)
cnm.params.set('quality', {'min_SNR': min_SNR,
'rval_thr': r_values_min,
'use_cnn': False})
cnm.estimates.evaluate_components(images, cnm.params, dview=dview)
print(' ***** ')
print('Number of total components: ', len(cnm.estimates.C))
print('Number of accepted components: ', len(cnm.estimates.idx_components))
#%% plot contour plots of accepted and rejected components
cnm.estimates.plot_contours_nb(img=cn_filter, idx=cnm.estimates.idx_components)
# accepted components
cnm.estimates.hv_view_components(img=cn_filter, idx=cnm.estimates.idx_components,
denoised_color='red', cmap='gray')
# rejected components
cnm.estimates.hv_view_components(img=cn_filter, idx=cnm.estimates.idx_components_bad,
denoised_color='red', cmap='gray')
cm.stop_server(dview=dview)
# with background
cnm.estimates.play_movie(images, q_max=99.5, magnification=2,
include_bck=True, gain_res=10, bpx=bord_px)
# without background
cnm.estimates.play_movie(images, q_max=99.9, magnification=2,
include_bck=False, gain_res=4, bpx=bord_px)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: naive
Step2: even use pandas
|
<ASSISTANT_TASK:>
Python Code:
# index
# index1 = table.cols.date.create_index()
# read the first couple of rows (like df.head)
tbf = tb.open_file("/global/scratch/ryee/symbol_count/agg_count.h5", "a")
table = tbf.root.count_table.read()
table['count'].sum()
df = DataFrame(table)
df.head()
df["count"].sum()
cdf = DataFrame(df["date"].value_counts(), columns=['c'])
cdf['ts']= cdf.index
cdf.head()
%matplotlib inline
cdf.plot(x='ts', y='c', kind='scatter')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we load the data, in this case a DICOM image set and a GE P-file. For the DICOM volume we pass a single file and the containing folder is processed to collect any other files from the same series, these are then collected into a 3D volume in a suspect.ImageBase class, which provides access to a lot of useful functionality around transforms and co-registering. In this case the exam is a single voxel PRESS in the hippocampus, which is convenient as it is a double oblique and very oblong voxel.
Step2: We start by constructing a voxel mask, passing in the spectroscopy data which defines the excitation region and an image volume which defines the coordinate system for the mask. For display purposes in Matplotlib we then create a "masked" mask using the Numpy MaskedArray class, so that the mask will only be displayed where the mask is true.
Step3: To draw the voxel outline we must define a set of vertices defining a path which will trace all the edges of the voxel. The list below is the most efficient ordering I can come up with, but feel free to raise an issue on GitHub if you can think of a better one. I am planning to bring this inside Suspect as a property of the MRSBase class so that it is readily available. Note that because this is in the MRS coordinates, the corners of the voxel are universal, irrespective of the size and shape of the voxel.
Step4: Here we transform the coordinate system into the coordinates of the image volume. First we use the to_scanner() method of the MRS data to move the locations of the voxel corners into the scanner space, then use image.from_scanner() to bring them into the coordinates of the image volume, which are the voxel indices.
Step5: The final bit of preparation we are doing here is to calculate where the centre of the MRS voxel is located in the image volume so we know which slice (in each plane) we need to render at the centre of the voxel. Note that coordinates are a bit tricky here - data is organised on the scanner with slices as the outer loop, then columns and then rows, conceptually that means that the z coordinate is the first index into the array, then y and then x. However all manipulation of coordinates in transforms assumes the more conventional ordering of (x, y, z). Thus we find that the centre of the voxel is at slice 58, for example.
Step6: Now we are ready to actually render the voxels overlaid with the masks and/or voxel outlines. The anatomical scan we started with was a sagittal volume, but of course we can easily extract orthogonal slices either axially or coronally, and in this case the scan is isotropic so we don't even have to worry about rectangular pixels. We do however need to transpose the axial and coronal images to display them in the conventional orientation.
Step7: Because the voxel in this case is double oblique it can be difficult to get a good sense of its orientation from either the outline or the mask through a single slice. To help with this problem we can use the resample() function to adjust the orientation of the image so that the image direction vectors align with those of the spectroscopy voxel.
Step8: We could also resample the mask but that would introduce partial volume effects, so instead we create a new mask from the resampled image. We also get the centre of the voxel in this new, resampled image volume.
Step9: We now replot the same 3 orthogonal slices as before, but using the resampled image, which puts the spectroscopy voxel perfectly aligned with the image planes.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import suspect
image = suspect.image.load_dicom_volume("00004_MPRAGE/i5881167.MRDC.15.img")
data, wref = suspect.io.load_pfile("P75264.e02941.s00007.7")
voxel_mask = suspect.image.create_mask(data, image)
masked_mask = np.ma.masked_where(voxel_mask == False, voxel_mask)
corner_coords_voxel = [[-0.5, -0.5, -0.5],
[0.5, -0.5, -0.5],
[0.5, 0.5, -0.5],
[-0.5, 0.5, -0.5],
[-0.5, -0.5, -0.5],
[-0.5, -0.5, 0.5],
[0.5, -0.5, 0.5],
[0.5, -0.5, -0.5],
[0.5, -0.5, 0.5],
[0.5, 0.5, 0.5],
[0.5, 0.5, -0.5],
[0.5, 0.5, 0.5],
[-0.5, 0.5, 0.5],
[-0.5, 0.5, -0.5],
[-0.5, 0.5, 0.5],
[-0.5, -0.5, 0.5]]
corner_coords = np.array([image.from_scanner(*data.to_scanner(*coord)) for coord in corner_coords_voxel])
voxel_centre_in_image = image.from_scanner(data.position).round().astype(int)
print(voxel_centre_in_image)
fig, (ax_sag, ax_ax, ax_cor) = plt.subplots(1, 3, figsize=(15, 5))
ax_sag.axis('off')
ax_ax.axis('off')
ax_cor.axis('off')
ax_sag.imshow(image[voxel_centre_in_image[2]], cmap=plt.cm.gray, vmax=1400)
ax_ax.imshow(image[:, voxel_centre_in_image[1]].T, cmap=plt.cm.gray, vmax=1400)
ax_cor.imshow(image[:, :, voxel_centre_in_image[0]].T, cmap=plt.cm.gray, vmax=1400)
ax_sag.imshow(masked_mask[voxel_centre_in_image[2]], cmap=plt.cm.viridis_r, alpha=0.5)
ax_ax.imshow(masked_mask[:, voxel_centre_in_image[1]].T, cmap=plt.cm.viridis_r, alpha=0.5)
ax_cor.imshow(masked_mask[:, :, voxel_centre_in_image[0]].T, cmap=plt.cm.viridis_r, alpha=0.5)
ax_sag.plot(corner_coords[:, 0], corner_coords[:, 1])
ax_ax.plot(corner_coords[:, 2], corner_coords[:, 0])
ax_cor.plot(corner_coords[:, 2], corner_coords[:, 1])
res_image = image.resample(data.coronal_vector,
data.axial_vector,
image.shape,
image.centre,
(1, -1, 1),
)
mask_res = suspect.image.create_mask(data, res_image)
masked_mask_res = np.ma.masked_where(mask_res == False, mask_res)
voxel_centre_in_res_image = res_image.from_scanner(data.position).round().astype(int)
print(voxel_centre_in_res_image)
fig, (ax_sag, ax_ax, ax_cor) = plt.subplots(1, 3, figsize=(15, 5))
ax_sag.axis('off')
ax_ax.axis('off')
ax_cor.axis('off')
ax_sag.imshow(res_image[voxel_centre_in_res_image[2]], cmap=plt.cm.gray, vmax=1400)
ax_ax.imshow(res_image[:, voxel_centre_in_res_image[1]].T, cmap=plt.cm.gray, vmax=1400)
ax_cor.imshow(res_image[:, :, voxel_centre_in_res_image[0]].T, cmap=plt.cm.gray, vmax=1400)
ax_sag.imshow(masked_mask_res[voxel_centre_in_res_image[2]], cmap=plt.cm.viridis_r, alpha=0.5)
ax_ax.imshow(masked_mask_res[:, voxel_centre_in_res_image[1]].T, cmap=plt.cm.viridis_r, alpha=0.5)
ax_cor.imshow(masked_mask_res[:, :, voxel_centre_in_res_image[0]].T, cmap=plt.cm.viridis_r, alpha=0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-1', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: WQP
Step2: Read csv data into python.
Step3: Rename columns to match with other data later.
Step4: Define unneeded columns that will be dropped to save memory.
Step5: Define a function to fix funky dates found in the WQP database. This includes impossible dates or dates with too many numbers.
Step6: Drop unwanted fields
Step7: Convert result values and the MDL values to numeric fields from string fields.
Step8: Remove station duplicates by removing the preceding 'WQX' found in the station id field.
Step9: Standardize all ug/l data to mg/l by changing out the unit descriptor and dividing by 1000.
Step10: Normalize common nutrients so that they are all in the same type of units. For example, sometimes nitrate is reported "as nitrogen" and sometimes it is reported "as nitrate". The difference between the two types of reporting is a factor of 4.427!
Step11: WQP Stations
Step12: Rename and simplify station data column names for later compilation.
Step13: Define the fields to drop to save memory.
Step14: Make station types in the StationType field consistent for easier summary and compilation later on.
Step15: Remove preceding WQX from StationId field to remove duplicate station data created by legacy database.
Step16: SDWIS
Step17: Normalize units and nutrient data so that they are consistent with the WQP data. This includes standardizing ug/l to mg/l
Step18: Drop unneeded SDWIS fields to save memory and reduce confusion.
Step19: Rename chemical parameters in the SDWIS Param field to match those of the WQP data.
Step20: Make station types consistent with the WQP data.
Step21: SDWIS facility type code (FacTypeCode)
Step22: Create SDWIS results file from the SDWIS data. Drop unneeded fields from the results file. These are fields that are in the station field and apply to stations.
Step23: Create a sample media field and populate it with the value Groundwater.
Step24: UDAF
Step25: Drop unneeded fields to save memory.
Step26: Add UDAF prefix to the station identification field (StationId) to make station ids unique.
Step27: UDAF Results
Step28: Create a detection condition field and populate it based on values in the imported data.
Step29: Fill null result values with zeros when data are reported as below detection limit.
Step30: Make data consistent by cleaning up parameter descriptions.
Step31: STORET Legacy
Step32: Parse choppy text data from the STORET Legacy database.
Step33: UGS Data
Step34: State Lab
Step35: Combine Data
Step36: Clean Up Non Detects
Step37: Pivot Data
Step38: Add GIS Information
Step39: Convert and Balance Samples
Step40: Subset Data
Step41: Summarize & Plot Data
Step42: Export data
|
<ASSISTANT_TASK:>
Python Code:
#rootname = "/media/p/5F5B-8FCB/PROJECTS/UMAR/Data/chem/" #thumb on ubuntu
rootname = "E:\\PROJECTS\\UMAR\\Data\\chem\\" #thumb on windows
WQPResultsFile = rootname + "result.csv"
WQPStationFile = rootname + "station.csv"
SDWISFile = rootname + "SDWIS_Cache.txt"
AGStationsFile = rootname + "AG_Stations_Cache.csv"
AGResultsFile = rootname + "AG_byparam.csv"
UGSFile = rootname + "UGS_Cache.txt"
STORLegStatFile = rootname + "UT_Cache_sta_001.txt"
STORLegResFile = rootname + "UT_Cache_res_001.txt"
STORParamFile = rootname + "parameter.txt"
fielddata = rootname + "FieldData.xlsx"
statelabresults0 = rootname + "utgs1402.txt"
statelabresults1 = rootname + "utgs1403.txt"
statelabresults2 = rootname + "utgs1501.txt"
statelabstations = rootname + "UtahStateLabStations.xlsx"
df = wc.WQP.WQPimportRes(WQPResultsFile)
df = wc.WQP.WQPmassageResults(df)
df
Rdtypes = {"OrganizationIdentifier":np.str_, "OrganizationFormalName":np.str_, "ActivityIdentifier":np.str_,
"ActivityStartTime/Time":np.str_,
"ActivityTypeCode":np.str_, "ActivityMediaName":np.str_, "ActivityMediaSubdivisionName":np.str_,
"ActivityStartDate":np.str_, "ActivityStartTime/Time":np.str_, "ActivityStartTime/TimeZoneCode":np.str_,
"ActivityEndDate":np.str_, "ActivityEndTime/Time":np.str_, "ActivityEndTime/TimeZoneCode":np.str_,
"ActivityDepthHeightMeasure/MeasureValue":np.float16, "ActivityDepthHeightMeasure/MeasureUnitCode":np.str_,
"ActivityDepthAltitudeReferencePointText":np.str_, "ActivityTopDepthHeightMeasure/MeasureValue":np.float16,
"ActivityTopDepthHeightMeasure/MeasureUnitCode":np.str_,
"ActivityBottomDepthHeightMeasure/MeasureValue":np.float16,
"ActivityBottomDepthHeightMeasure/MeasureUnitCode":np.str_,
"ProjectIdentifier":np.str_, "ActivityConductingOrganizationText":np.str_,
"MonitoringLocationIdentifier":np.str_, "ActivityCommentText":np.str_,
"SampleAquifer":np.str_, "HydrologicCondition":np.str_, "HydrologicEvent":np.str_,
"SampleCollectionMethod/MethodIdentifier":np.str_, "SampleCollectionMethod/MethodIdentifierContext":np.str_,
"SampleCollectionMethod/MethodName":np.str_, "SampleCollectionEquipmentName":np.str_,
"ResultDetectionConditionText":np.str_, "CharacteristicName":np.str_, "ResultSampleFractionText":np.str_,
"ResultMeasureValue":np.str_, "ResultMeasure/MeasureUnitCode":np.str_, "MeasureQualifierCode":np.str_,
"ResultStatusIdentifier":np.str_, "StatisticalBaseCode":np.str_, "ResultValueTypeName":np.str_,
"ResultWeightBasisText":np.str_, "ResultTimeBasisText":np.str_, "ResultTemperatureBasisText":np.str_,
"ResultParticleSizeBasisText":np.str_, "PrecisionValue":np.str_, "ResultCommentText":np.str_,
"USGSPCode":np.str_, "ResultDepthHeightMeasure/MeasureValue":np.float16,
"ResultDepthHeightMeasure/MeasureUnitCode":np.str_, "ResultDepthAltitudeReferencePointText":np.str_,
"SubjectTaxonomicName":np.str_, "SampleTissueAnatomyName":np.str_,
"ResultAnalyticalMethod/MethodIdentifier":np.str_, "ResultAnalyticalMethod/MethodIdentifierContext":np.str_,
"ResultAnalyticalMethod/MethodName":np.str_, "MethodDescriptionText":np.str_, "LaboratoryName":np.str_,
"AnalysisStartDate":np.str_, "ResultLaboratoryCommentText":np.str_,
"DetectionQuantitationLimitTypeName":np.str_, "DetectionQuantitationLimitMeasure/MeasureValue":np.str_,
"DetectionQuantitationLimitMeasure/MeasureUnitCode":np.str_, "PreparationStartDate":np.str_,
"ProviderName":np.str_}
dt = [6,56,61]
WQP = pd.read_csv(WQPResultsFile, dtype=Rdtypes, parse_dates=dt)
ResFieldDict = {"AnalysisStartDate":"AnalysisDate", "ResultAnalyticalMethod/MethodIdentifier":"AnalytMeth",
"ResultAnalyticalMethod/MethodName":"AnalytMethId", "ResultDetectionConditionText":"DetectCond",
"ResultLaboratoryCommentText":"LabComments", "LaboratoryName":"LabName",
"DetectionQuantitationLimitTypeName":"LimitType", "DetectionQuantitationLimitMeasure/MeasureValue":"MDL",
"DetectionQuantitationLimitMeasure/MeasureUnitCode":"MDLUnit", "MethodDescriptionText":"MethodDescript",
"OrganizationIdentifier":"OrgId", "OrganizationFormalName":"OrgName", "CharacteristicName":"Param",
"ProjectIdentifier":"ProjectId", "MeasureQualifierCode":"QualCode", "ResultCommentText":"ResultComment",
"ResultStatusIdentifier":"ResultStatus", "ResultMeasureValue":"ResultValue",
"ActivityCommentText":"SampComment", "ActivityDepthHeightMeasure/MeasureValue":"SampDepth",
"ActivityDepthAltitudeReferencePointText":"SampDepthRef",
"ActivityDepthHeightMeasure/MeasureUnitCode":"SampDepthU", "SampleCollectionEquipmentName":"SampEquip",
"ResultSampleFractionText":"SampFrac", "ActivityStartDate":"SampleDate", "ActivityIdentifier":"SampleId",
"ActivityStartTime/Time":"SampleTime", "ActivityMediaSubdivisionName":"SampMedia",
"SampleCollectionMethod/MethodIdentifier":"SampMeth", "SampleCollectionMethod/MethodName":"SampMethName",
"ActivityTypeCode":"SampType", "MonitoringLocationIdentifier":"StationId",
"ResultMeasure/MeasureUnitCode":"Unit", "USGSPCode":"USGSPCode",
"ActivityStartDate":"StartDate","ActivityStartTime/Time":"StartTime"}
WQP.rename(columns=ResFieldDict,inplace=True)
resdroplist = ["ActivityBottomDepthHeightMeasure/MeasureUnitCode", "ActivityBottomDepthHeightMeasure/MeasureValue",
"ActivityConductingOrganizationText", "ActivityEndDate", "ActivityEndTime/Time",
"ActivityEndTime/TimeZoneCode", "ActivityMediaName", "ActivityStartTime/TimeZoneCode",
"ActivityTopDepthHeightMeasure/MeasureUnitCode", "ActivityTopDepthHeightMeasure/MeasureValue",
"HydrologicCondition", "HydrologicEvent", "PrecisionValue", "PreparationStartDate", "ProviderName",
"ResultAnalyticalMethod/MethodIdentifierContext", "ResultDepthAltitudeReferencePointText",
"ResultDepthHeightMeasure/MeasureUnitCode", "ResultDepthHeightMeasure/MeasureValue",
"ResultParticleSizeBasisText", "ResultTemperatureBasisText",
"ResultTimeBasisText", "ResultValueTypeName", "ResultWeightBasisText", "SampleAquifer",
"SampleCollectionMethod/MethodIdentifierContext", "SampleTissueAnatomyName", "StatisticalBaseCode",
"SubjectTaxonomicName","StartTime","StartDate","StartTime","StartDate"]
def datetimefix(x,format):
'''
This script cleans date-time errors
input
x = date-time string
format = format of date-time string
output
formatted datetime type
'''
d = str(x[0]).lstrip().rstrip()[0:10]
t = str(x[1]).lstrip().rstrip()[0:5].zfill(5)
try:
int(d[0:2])
except(ValueError,TypeError,NameError):
return np.nan
try:
int(t[0:2])
int(t[3:5])
except(ValueError,TypeError,NameError):
t = "00:00"
if int(t[0:2])>23:
t = "00:00"
elif int(t[3:5])>59:
t = "00:00"
else:
t = t[0:2].zfill(2) + ":" + t[3:5]
return datetime.datetime.strptime(d + " " + t, format)
WQP["SampleDate"] = WQP[["StartDate","StartTime"]].apply(lambda x: datetimefix(x,"%Y-%m-%d %H:%M"),1)
WQP.drop(resdroplist,inplace=True,axis=1)
WQP['ResultValue'] = WQP['ResultValue'].convert_objects(convert_numeric=True)
WQP['MDL'] = WQP['MDL'].convert_objects(convert_numeric=True)
WQP['StationId'] = WQP['StationId'].str.replace('_WQX-','-')
#standardize all ug/l data to mg/l
def unitfix(x):
z = str(x).lower()
if z == "ug/l":
return "mg/l"
elif z == "mg/l":
return "mg/l"
else:
return x
WQP.Unit = WQP.Unit.apply(lambda x: str(x).rstrip(), 1)
WQP.ResultValue = WQP[["ResultValue","Unit"]].apply(lambda x: x[0]/1000 if str(x[1]).lower()=="ug/l" else x[0], 1)
WQP.Unit = WQP.Unit.apply(lambda x: unitfix(x),1)
def parnorm(x):
p = str(x[0]).rstrip().lstrip().lower()
u = str(x[2]).rstrip().lstrip().lower()
if p == 'nitrate' and u == 'mg/l as n':
return 'Nitrate', x[1]*4.427, 'mg/l'
elif p == 'nitrite' and u == 'mg/l as n':
return 'Nitrite', x[1]*3.285, 'mg/l'
elif p == 'ammonia-nitrogen' or p == 'ammonia-nitrogen as n' or p == 'ammonia and ammonium':
return 'Ammonium', x[1]*1.288, 'mg/l'
elif p == 'ammonium' and u == 'mg/l as n':
return 'Ammonium', x[1]*1.288, 'mg/l'
elif p == 'sulfate as s':
return 'Sulfate', x[1]*2.996, 'mg/l'
elif p in ('phosphate-phosphorus', 'phosphate-phosphorus as p','orthophosphate as p'):
return 'Phosphate', x[1]*3.066, 'mg/l'
elif (p == 'phosphate' or p == 'orthophosphate') and u == 'mg/l as p':
return 'Phosphate', x[1]*3.066, 'mg/l'
elif u == 'ug/l':
return x[0], x[1]/1000, 'mg/l'
else:
return x[0], x[1], str(x[2]).rstrip()
WQP['Param'], WQP['ResultValue'], WQP['Unit'] = zip(*WQP[['Param','ResultValue','Unit']].apply(lambda x: parnorm(x),1))
WQPStat = pd.read_csv(WQPStationFile)
StatFieldDict = {"MonitoringLocationIdentifier":"StationId", "AquiferName":"Aquifer", "AquiferTypeName":"AquiferType",
"ConstructionDateText":"ConstDate", "CountyCode":"CountyCode", "WellDepthMeasure/MeasureValue":"Depth",
"WellDepthMeasure/MeasureUnitCode":"DepthUnit", "VerticalMeasure/MeasureValue":"Elev",
"VerticalAccuracyMeasure/MeasureValue":"ElevAcc", "VerticalAccuracyMeasure/MeasureUnitCode":"ElevAccUnit",
"VerticalCollectionMethodName":"ElevMeth", "VerticalCoordinateReferenceSystemDatumName":"ElevRef",
"VerticalMeasure/MeasureUnitCode":"ElevUnit", "FormationTypeText":"FmType",
"WellHoleDepthMeasure/MeasureValue":"HoleDepth", "WellHoleDepthMeasure/MeasureUnitCode":"HoleDUnit",
"HorizontalAccuracyMeasure/MeasureValue":"HorAcc", "HorizontalAccuracyMeasure/MeasureUnitCode":"HorAccUnit",
"HorizontalCollectionMethodName":"HorCollMeth", "HorizontalCoordinateReferenceSystemDatumName":"HorRef",
"HUCEightDigitCode":"HUC8", "LatitudeMeasure":"Lat_Y", "LongitudeMeasure":"Lon_X",
"OrganizationIdentifier":"OrgId", "OrganizationFormalName":"OrgName", "StateCode":"StateCode",
"MonitoringLocationDescriptionText":"StationComment", "MonitoringLocationName":"StationName",
"MonitoringLocationTypeName":"StationType"}
WQPStat.rename(columns=StatFieldDict,inplace=True)
statdroplist = ["ContributingDrainageAreaMeasure/MeasureUnitCode", "ContributingDrainageAreaMeasure/MeasureValue",
"DrainageAreaMeasure/MeasureUnitCode", "DrainageAreaMeasure/MeasureValue", "CountryCode", "ProviderName",
"SourceMapScaleNumeric"]
WQPStat.drop(statdroplist,inplace=True,axis=1)
TypeDict = {"Stream: Canal":"Stream", "River/Stream":"Stream",
"Stream: Canal":"Stream", "Well: Test hole not completed as a well":"Well"}
WQPStat.StationType = WQPStat["StationType"].apply(lambda x: TypeDict.get(x,x),1)
WQPStat.Elev = WQPStat.Elev.apply(lambda x: np.nan if x==0.0 else round(x,1), 1)
WQPStat['StationId'] = WQPStat['StationId'].str.replace('_WQX-','-')
WQPStat.drop_duplicates(subset=['StationId'],inplace=True)
SDWIS = pd.read_csv(SDWISFile)
def sampid(x):
return "SDWIS" + str(x[0]) + str(x[1]) + str(x[2])[:-7]
def statid(x):
return "SDWIS" + str(x[0]) + str(x[1])
def statnm(x):
return str(str(x[0]) + " " + str(x[1])).title()
SDWIS["StationId"] = SDWIS[["FED_NM","ST_ID"]].apply(lambda x: statid(x),1)
SDWIS["StationName"] = SDWIS[["SYS_NM","FAC_NM"]].apply(lambda x: statnm(x),1)
SDWIS["SampleId"] = SDWIS[["FED_NM","ST_ID","DTE"]].apply(lambda x: sampid(x),1)
SDWIS["OrgId"] = "UDDW"
SDWIS["OrgName"] = "Utah Division of Drinking Water"
SDWIS["Elev"] = SDWIS["Z"].apply(lambda x: round(x*3.2808,1),1)
SDWIS["Unit"] = SDWIS["UOM_CODE"].apply(lambda x: str(x).lower(),1)
SDWIS["MDLUnit"] = SDWIS["DET_UOM"].apply(lambda x: str(x).lower(),1)
SDWIS["Param"] = SDWIS["ANLY_NM"].apply(lambda x: str(x).title().rstrip(),1)
SDWISFields ={"DTE":"SampleDate", "TYPE_CODE":"StationType",
"CONC_MSR":"ResultValue", "DET_LIM":"MDL",
"Y":"Lat_Y", "X":"Lon_X"}
SDWIS.rename(columns=SDWISFields,inplace=True)
def datetimefixSDWIS(x,format):
d = str(x).lstrip().rstrip()
try:
return datetime.datetime.strptime(d, "%m/%d/%Y %H:%M:%S")
except(ValueError):
return datetime.datetime.strptime(d, "%Y-%m-%d %H:%M:%S")
SDWIS["SampleDate"] = SDWIS["SampleDate"].apply(lambda x: datetimefixSDWIS(x,"%m/%d/%Y %H:%M:%S"),1)
print sorted(list(SDWIS.Param.unique()))
def parnormSDWIS(x):
p = str(x[0]).rstrip().lstrip().lower()
u = str(x[2]).rstrip().lstrip().lower()
if p == 'nitrate':
return 'Nitrate', x[1]*4.427, 'mg/l'
elif p == 'nitrite':
return 'Nitrite', x[1]*3.285, 'mg/l'
elif p == 'nitrogen-ammonia as (n)':
return 'Ammonium', x[1]*1.288, 'mg/l'
elif u == 'ug/l':
return x[0], x[1]/1000, 'mg/l'
else:
return x[0], x[1], str(x[2]).rstrip()
SDWIS['Param'], SDWIS['ResultValue'], SDWIS['Unit'] = zip(*SDWIS[['Param','ResultValue','Unit']].apply(lambda x: parnormSDWIS(x),1))
SDWIS.drop(["FED_NM", "DET_UOM", "UOM_CODE","ANLY_NM", "FAC_NM", "ST_ID",
"SYS_NM", "COUNTY", "SY_NBR", "Z", "ID_NUM"],inplace=True, axis=1)
SDWISPmatch = {"Ph":"pH","Tds":"TDS","Nitrogen-Ammonia As (N)":"Nitrogen-Ammonia as (N)",
"Hydroxide As Calcium Carbonate":"Hydroxide as Calcium Carbonate",
"Bicarbonate As Hco3":"Bicarbonate as HCO3"}
SDWIS["Param"] = SDWIS["Param"].apply(lambda x: SDWISPmatch.get(x,x))
SDWIS["StationName"] = SDWIS["StationName"].apply(lambda x: x.replace("Wtp","WTP"))
SDWIS["ResultValue"] = SDWIS[["ResultValue","Unit"]].apply(lambda x: x[0]/1000 if x[1]=="ug/L" else x[0], 1)
SDWISType = {"SP":"Spring","WL":"Well","TP":"Facility Other","IN":"Stream","CC":"Connection","WH":"Well"}
SDWIS.StationType = SDWIS.StationType.apply(lambda x: SDWISType.get(x,x),1)
SDWISSta = SDWIS.drop([u'SampleDate', u'ResultValue', u'MDL', u'SampleId', u'Unit', u'MDLUnit', u'Param'], axis=1)
SDWISSta.drop_duplicates(inplace=True)
SDWISRes = SDWIS.drop([u'StationType', u'Lat_Y', u'Lon_X', u'StationName', u'Elev'], axis=1)
SDWISRes["SampMedia"] = "Groundwater"
AGStat = pd.read_csv(AGStationsFile)
AGStat["StationType"] = "Well"
AGStatFields = {"SITEID":"StationId","FINISHEDDE":"Depth","POINT_Y":"Lat_Y",
"POINT_X":"Lon_X","ELEV_FT":"Elev","ACCURACY":"HorAcc"}
AGStat.rename(columns=AGStatFields,inplace=True)
AGStat.drop(["OBJECTID_1", "OBJECTID", "PUB_YR", "SAMPLENO", "WLDATE", "WLDEPTH"], inplace=True, axis=1)
AGStat.StationId = AGStat.StationId.apply(lambda x: "UDAF-"+str(int(x)).zfill(5),1)
names = ["SampleId","ResultValue", "ParAbb", "Unit", "Param", "MDL","BelowLim","TestNo",
"StationId","SampleDate","SampYear"]
AGRes = pd.read_csv(AGResultsFile, names=names, index_col=10)
AGRes["DetectCond"] = AGRes["BelowLim"].apply(lambda x: 'Not Detected' if x=='Y' else np.nan,1)
AGRes.ResultValue = AGRes[["BelowLim","ResultValue"]].apply(lambda x: np.nan if x[0]=="Y" or x[1] == 0.0 else x[1], 1)
def parnormAG(x):
p = str(x[0]).rstrip().lstrip().lower()
u = str(x[2]).rstrip().lstrip().lower()
if p == 'nitrate-n':
return 'Nitrate', x[1]*4.427, 'mg/l'
elif u == 'ug/l':
return x[0], x[1]/1000, 'mg/l'
else:
return x[0], x[1], str(x[2]).rstrip()
AGRes['Param'], AGRes['ResultValue'], AGRes['Unit'] = zip(*AGRes[['Param','ResultValue','Unit']].apply(lambda x: parnormAG(x),1))
AGRes.Unit.unique()
AGRes.dropna(subset=["StationId","ResultValue"], how="any", inplace=True)
AGRes.StationId = AGRes.StationId.apply(lambda x: "UDAF-"+str(int(x)).zfill(5),1)
AGStAv = list(AGStat.StationId.values)
AGRes = AGRes[AGRes.StationId.isin(AGStAv)]
AGRes["SampMedia"] = "Groundwater"
AGStat['OrgId']='UDAF'
STORLegSta = pd.read_table(STORLegStatFile, skiprows=[1])
STORLegRes = pd.read_table(STORLegResFile, skiprows=[1])
STORParam = pd.read_table(STORParamFile)
rescol = list(STORLegRes.columns)
j = []
for i in rescol:
j.append(i.rstrip("\t").rstrip().lstrip().replace(" ",""))
resdict = dict(zip(rescol,j))
STORLegRes.rename(columns=resdict,inplace=True)
statcol = list(STORLegSta.columns)
k = []
for i in statcol:
k.append(i.rstrip("\t").rstrip().lstrip().replace(" ",""))
statdict = dict(zip(statcol,k))
STORLegSta.rename(columns=statdict,inplace=True)
STORLegRes["SampleDate"] = STORLegRes[["StartDate","StartTime"]].apply(lambda x: datetimefix(x,"%Y-%m-%d %H:%M"),1)
STORLegRes = STORLegRes[STORLegRes.SecondaryActivityCategory.isin(['Water',np.nan])]
STORParamDict = dict(zip(STORParam['Parameter No.'].values, STORParam['Full Name'].values))
STORLegRes.Param = STORLegRes.Param.apply(lambda x: STORParamDict.get(x),1)
STORResField = {"Agency":"OrgId","AgencyName":"OrgName","Station":"StationId","SampleDepth":"SampDepth"}
STORLegRes.rename(columns=STORResField,inplace=True)
STORLegRes.drop(["StateName", "CountyName", "HUC", "EndDate", "UMK", "CS", "ReplicateNumber",
"COMPOSITE_GRAB_NUMBER","CM","PrimaryActivityCategory","PrimaryActivityCategory",
"SecondaryActivityCategory",
"EndTime", "StartDate", "StartTime", "Latitude", "Longitude"],inplace=True,axis=1)
STORLegRes["SampleId"] = STORLegRes[["StationId","SampleDate"]].apply(lambda x: str(x[0]) + "-" + str(x[1]),1 )
STORLegRes["StationId"] = STORLegRes["StationId"].apply(lambda x: "EPALeg-" + x, 1)
STORLegRes.Param = STORLegRes.Param.apply(lambda x: str(x).title(),1)
STORLegRes.columns
def parnormSTOR(x):
p = str(x[0]).rstrip().lstrip().lower()
if p == 'nitrate nitrogen, total (mg/L as n)' or p== 'nitrate nitrogen, total':
return 'Nitrate', x[1]*4.427, 'mg/l'
elif p == 'nitrite nitrogen, total (mg/l as n)':
return 'Nitrite', x[1]*3.285, 'mg/l'
elif p == 'nitrogen, ammonia, total (mg/l as n)':
return 'Ammonium', x[1]*1.288, 'mg/l'
elif p == 'sulfate (as s) whole water, mg/L':
return 'Sulfate', x[1]*2.996, 'mg/l'
elif p in ('phosphorus, dissolved orthophosphate (mg/l as p)'):
return 'Phosphate', x[1]*3.066, 'mg/l'
else:
return x[0], x[1], np.nan
STORLegRes['Param'], STORLegRes['ResultValue'], STORLegRes['Unit'] = zip(*STORLegRes[['Param','ResultValue']].apply(lambda x: parnormSTOR(x),1))
STORKeepers = ['Temperature, Water (Degrees Centigrade)',
'Temperature, Water (Degrees Fahrenheit)',
'Specific Conductance,Field (Umhos/Cm @ 25C)',
'Specific Conductance (Umhos/Cm @ 25C)',
'Sulfate (As S) Whole Water, Mg/L',
'Oxygen, Dissolved Mg/L',
'Oxygen, Dissolved, Percent Of Saturation %',
'Bod, 5 Day, 20 Deg C Mg/L',
'Ph (Standard Units)', 'Ph, Lab, Standard Units Su',
'Carbon Dioxide (Mg/L As Co2)', 'Alkalinity,Total,Low Level Gran Analysis Ueq/L',
'Alkalinity, Total (Mg/L As Caco3)', 'Bicarbonate Ion (Mg/L As Hco3)', 'Carbonate Ion (Mg/L As Co3)',
'Nitrogen, Ammonia, Total (Mg/L As N)', 'Ammonia, Unionzed (Mg/L As N)',
'Nitrite Nitrogen, Total (Mg/L As N)', 'Ammonia, Unionized (Calc Fr Temp-Ph-Nh4) (Mg/L)',
'Nitrate Nitrogen, Total (Mg/L As N)', 'Nitrogen, Kjeldahl, Total, (Mg/L As N)',
'Nitrite Plus Nitrate, Total 1 Det. (Mg/L As N)', 'Phosphorus (P), Water, Total Recoverable Ug/L',
'Phosphorus, Total (Mg/L As P)', 'Phosphorus, Dissolved Orthophosphate (Mg/L As P)',
'Carbon, Dissolved Organic (Mg/L As C)',
'Carbon, Dissolved Inorganic (Mg/L As C)',
'Hardness, Total (Mg/L As Caco3)', 'Calcium (Mg/L As Caco3)',
'Calcium, Dissolved (Mg/L As Ca)',
'Magnesium, Dissolved (Mg/L As Mg)',
'Sodium, Dissolved (Mg/L As Na)',
'Potassium, Dissolved (Mg/L As K)',
'Chloride, Dissolved In Water Mg/L',
'Sulfate, Dissolved (Mg/L As So4)',
'Fluoride, Dissolved (Mg/L As F)',
'Silica, Dissolved (Mg/L As Si02)',
'Arsenic, Dissolved (Ug/L As As)', 'Arsenic, Total (Ug/L As As)',
'Barium, Dissolved (Ug/L As Ba)', 'Barium, Total (Ug/L As Ba)',
'Beryllium, Total (Ug/L As Be)', 'Boron, Dissolved (Ug/L As B)',
'Boron, Total (Ug/L As B)', 'Cadmium, Dissolved (Ug/L As Cd)',
'Cadmium, Total (Ug/L As Cd)', 'Chromium, Dissolved (Ug/L As Cr)',
'Chromium, Hexavalent (Ug/L As Cr)', 'Chromium, Total (Ug/L As Cr)',
'Copper, Dissolved (Ug/L As Cu)', 'Copper, Total (Ug/L As Cu)',
'Iron, Dissolved (Ug/L As Fe)', 'Lead, Dissolved (Ug/L As Pb)',
'Lead, Total (Ug/L As Pb)', 'Manganese, Total (Ug/L As Mn)',
'Manganese, Dissolved (Ug/L As Mn)', 'Thallium, Total (Ug/L As Tl)',
'Nickel, Dissolved (Ug/L As Ni)', 'Nickel, Total (Ug/L As Ni)',
'Silver, Dissolved (Ug/L As Ag)', 'Silver, Total (Ug/L As Ag)',
'Zinc, Dissolved (Ug/L As Zn)', 'Zinc, Total (Ug/L As Zn)',
'Antimony, Total (Ug/L As Sb)', 'Aluminum, Total (Ug/L As Al)',
'Selenium, Dissolved (Ug/L As Se)', 'Selenium, Total (Ug/L As Se)',
'Tritium (1H3),Total (Picocuries/Liter)',
'Hardness, Ca Mg Calculated (Mg/L As Caco3)',
'Chlorine, Total Residual (Mg/L)',
'Residue,Total Filtrable (Dried At 180C),Mg/L',
'Nitrate Nitrogen, Dissolved (Mg/L As No3)', 'Iron (Ug/L As Fe)',
'Phosphorus, Total, As Po4 - Mg/L', 'Mercury, Total (Ug/L As Hg)']
STORLegRes = STORLegRes[STORLegRes.Param.isin(STORKeepers)]
def parsplit(x,p):
x = str(x).rstrip().lstrip()
if p == "Un":
z = -1
x = str(x).replace("Mg/L", "mg/l")
x = str(x).replace("Ug/L", "ug/l")
x = str(x).replace("o", "O")
x = str(x).replace("c", "C")
x = str(x).replace("TOtal ReCOverable","Total Recoverable")
x = str(x).replace("UmhOs", "umhos")
x = str(x).replace("TOtal","Total")
elif p== "Par":
z = 0
x = str(x).replace(", Standard Units","")
x = str(x).replace(", Unionized","")
x = str(x).replace(", Unionzed","")
x = str(x).replace(",Low Level Gran Analysis","")
x = str(x).replace(" Ion","")
x = str(x).replace(",Total",", Total")
if x == "Ph" or x == "Ph, Lab":
x = str(x).replace("Ph","pH")
if "(" in x:
x = str(x).replace(" As ", " as ")
return str(x).split(" (")[z].rstrip(")").rstrip().lstrip()
else:
return str(x).split(" ")[z].rstrip().lstrip()
def splitmore(x):
if "NO3" in x:
return x
elif " as " in x:
return x.split(" as ")[0]
elif x == "As S) WhOle Water, mg/l" or x == "Dried At 180C),mg/l" or x=="PhOsphOrus, Total, As PO4 - mg/l":
return "mg/l"
elif x == "P), Water, Total Recoverable ug/l":
return "ug/l"
else:
return x
def unitconv(x):
if x[1]=="ug/l":
return x[0]/1000
elif x[1]=="Degrees Fahrenheit":
return (float(x[0])-32.0)*(5.0/9.0)
else:
return x[0]
STORLegRes["Unit"] = STORLegRes["Param"].apply(lambda x: parsplit(x,"Un"), 1)
STORLegRes["Param"] = STORLegRes["Param"].apply(lambda x: parsplit(x,"Par"), 1)
STORLegRes["Unit"] = STORLegRes["Unit"].apply(lambda x: splitmore(x), 1)
STORLegRes["ResultValue"] = STORLegRes[["ResultValue","Unit"]].apply(lambda x: unitconv(x), 1)
STORLegRes["Unit"] = STORLegRes["Unit"].apply(lambda x: "mg/l" if x=="ug/l" else x, 1)
STORLegRes["Unit"] = STORLegRes["Unit"].apply(lambda x: "Degrees Centigrade" if x=="Degrees Fahrenheit" else x, 1)
STORStaField = {"Agency":"OrgId","AgencyName":"OrgName","Station":"StationId", "DepthUnits":"DepthUnit",
"Latitude":"Lat_Y", "Longitude":"Lon_X", "HUC":"HUC8", "StationDepth":"Depth"}
STORLegSta.rename(columns=STORStaField,inplace=True)
STORLegSta.columns
STORLegSta.drop(["RchmileSegment", "MilesUpReach", "Rchonoff", "Description", "G", "S", "StationAlias",
"Rchname", "StateName", "CountyName"], inplace=True, axis=1)
STORLegSta.StationType = STORLegSta.StationType.apply(lambda x: str(x).rstrip(" ").strip("/SUPPLY").split("/")[-1].title(),1)
LegTypeDict = {"We":"Well"}
STORLegSta.StationType = STORLegSta.StationType.apply(lambda x: LegTypeDict.get(x,x),1)
STORLegSta.StationId = STORLegSta["StationId"].apply(lambda x: "EPALeg-" + x, 1)
UGSfield = pd.read_excel(fielddata,"FieldChem") #Field data
UGSNO3 = pd.read_excel(fielddata,"Nitrate") #Nitrate data provided by Millville City
UGS = pd.read_csv(UGSFile, engine="python")
UGS["StationId"] = UGS["SITE"].apply(lambda x:"UGS-"+str(x).zfill(4),1)
UGSSta = UGS.drop([u'OBJECTID_1',u'SITE', u'TDS', u'Temp', u'Cond', u'CO2', u'HCO3',
u'CO3',u'Na', u'pH', u'Ca', u'SO4', u'NO3', u'As_', u'Cl', u'K',
u'Mg', u'Hard', u'NH4'], axis=1)
UGSRe = UGS.drop([u'OBJECTID_1',u'SITE',u'StationType', u'Geology', u'Elev', u'Lat_Y', u'Lon_X', u'StationName',
u'OrgId', u'WRNUM', u'SITE', u'UTM_X', u'UTM_Y', u'Depth_ft'], axis=1)
UGSRe["SampleId"] = UGSRe.index
UGSRe.reset_index(inplace=True)
UGSRe.set_index(["StationId","SampleId"], inplace=True)
UGSRe.drop(UGSRe.columns[0],inplace=True,axis=1)
UGSStack = UGSRe.stack().to_frame()
UGSStack.columns = ["ResultValue"]
UGSStack.reset_index(inplace=True)
UGSStack.columns=["StationId","SampleId","Param","ResultValue"]
def unitcon(x):
if x=="pH":
return ""
elif x=="Temp":
return "C"
elif x=="Cond":
return "uS/cm"
else:
return "mg/l"
UGSStack["Unit"] = UGSStack["Param"].apply(lambda x: unitcon(x),1)
UGSStack["ParAbb"] = UGSStack["Param"]
UGSStack["OrgId"] = "UGS"
UGSStack["OrgName"] = "Utah Geological Survey"
UGSStack["ResultValue"] = UGSStack[['Param','ResultValue']].apply(lambda x: x[1]*1.288 if x[0]=='Ammonia as N' else x[1],1)
UGSStack["Param"] = UGSStack['Param'].apply(lambda x: 'Ammonia' if x=='Ammonia as N' else x, 1)
UGSStack["ResultValue"] = UGSStack[['Param','ResultValue']].apply(lambda x: x[1]*3.066 if x[0]=='Phosphate, Tot. Dig. (as P)' else x[1],1)
UGSStack["Param"] = UGSStack['Param'].apply(lambda x: 'Phosphate' if x=='Phosphate, Tot. Dig. (as P)' else x, 1)
SLSampMatch = pd.read_excel(fielddata,"StateLabMatch")
SLStat = pd.read_excel(fielddata,"Stations")
#SLStat = pd.merge(SLSampMatch, SLStations, on='StationId', how='outer')
#SLStat.reset_index(inplace=True)
SLStat
SL0 = pd.read_table(statelabresults0, sep="\t", lineterminator="\n", error_bad_lines=False)
SL0 = SL0[SL0['Collector']=='PI']
SL1 = pd.read_table(statelabresults1, sep="\t", lineterminator="\n", error_bad_lines=False)
SL1 = SL1[SL1['Collector']=='PI']
SL2 = pd.read_table(statelabresults2, sep="\t", lineterminator="\n", error_bad_lines=False)
SL2 = SL2[SL2['Collector']=='PI']
SL = pd.concat([SL0,SL1,SL2])
SL["OrgId"] = "UGS"
SL["OrgName"] = "Utah Geological Survey"
SL['DetectCond'] = SL['Problem#Identifier'].apply(lambda x: 'Not Detected' if str(x).rstrip()=='<' else np.nan,1)
SL['SampleDate'] = SL[['Sample#Date','Sample#Time']].apply(lambda x: datetimefix(x,"%m/%d/%y %H:%M"),1)
SLHead = {'Sample#Number':'SampleId', 'Param#Description':'Param', 'Result#Value':'ResultValue','Units':'Unit',
'Lower#Report#Limit':'MDL','Method#ID':'SampMeth','Analysis#Date':'AnalysisDate'}
SL.rename(columns=SLHead,inplace=True)
SL['Sample#Description'].unique()
SL.drop([u'Lab#Code', u'Station#ID', u'Source#Code', u'Sample#Date',
u'Sample#Time', u'Sample#Type', u'Cost#Code', u'Billing#Code',
u'Agency#Bill#Code', u'Trip#ID', u'Sample#Description', u'Collector',
u'Sample#Recieved#Date', u'Chain#of#Custody#Ind.', u'Replicate#Number',
u'Sample#Comment', u'Method#Number', u'Method#Agency',
u'Method#Description', u'Param#Number', u'CAS#Number',
u'Matrix#Number', u'Matrix#Description', u'Preparation#Date',
u'Problem#Identifier', u'Result#Code',
u'Upper#Quant#Limit', u'Method#Detect#Limit',
u'Confidence#Limit', u'%#Confidence#Limit',u'Dilution#Factor',
u'Batch#Number',u'Comment#Number', u'Comment#Text'], inplace=True, axis=1)
SL.columns
SLRes = pd.merge(SL, SLSampMatch, on='SampleId', how='left')
SLStat.drop_duplicates(inplace=True)
def SLparnorm(x):
p = str(x[0]).rstrip().lstrip().lower()
u = str(x[2]).rstrip().lstrip().lower()
if p == 'nitrate nitrogen, total (mg/l as n)':
return 'Nitrate', x[1]*4.427, 'mg/l'
elif p == 'nitrite nitrogen, total (mg/l as n)':
return 'Nitrite', x[1]*3.285, 'mg/l'
elif p == 'ammonia as n':
return 'Ammonium', x[1]*1.288, 'mg/l'
elif p == 'sulfate (as s) whole water, mg/L':
return 'Sulfate', x[1]*2.996, 'mg/l'
elif p in ('phosphate, tot. dig. (as p)', 'phosphate-phosphorus as p','orthophosphate as p'):
return 'Phosphate', x[1]*3.066, 'mg/l'
elif u == 'ug/l':
return x[0], x[1]/1000, 'mg/l'
else:
return x[0], x[1], str(x[2]).rstrip()
def MDLfix(x):
u = str(x[1]).rstrip().lstrip().lower()
if np.isfinite(x[2]):
return x[0]
elif u=='ug/l':
return x[0]/1000
else:
return x[0]
SLRes['MDL'] = SLRes[['MDL','Unit','ResultValue']].apply(lambda x: MDLfix(x),1)
SLRes['Param'], SLRes['ResultValue'], SLRes['Unit'] = zip(*SLRes[['Param','ResultValue','Unit']].apply(lambda x: parnorm(x),1))
SLRes.StationId.unique()
Res = pd.concat([STORLegRes,AGRes,SDWISRes,WQP,UGSStack,SLRes,UGSfield,UGSNO3])
Res = Res[~Res["Unit"].isin(['ueq/L','Ueq/L','ueq/l','tons/ac ft','tons/day','meq/L'])]
Res = Res[~Res["Param"].isin(["Heptachlorobiphenyl", "Hydrocarbons", "Hydroxide", "Ionic strength",
"Floating debris, severity", "Carbon Tetrachloride", "Trichlorobiphenyl",
"Vinyl Chloride", "True color", "Color", "Trash, Debris, Floatables",
"Total volatile solids", "Temperature, air", "Residue, Total Filtrable",
"Pentachlorobiphenyl", "Odor threshold number", "Odor, atmospheric",
"Instream features, est. stream width", "Hydroxide",
"Light, transmissivity","Algae, floating mats (severity)"])]
len(Res)
Res[["Param","Unit","USGSPCode"]].drop_duplicates(subset=["Param","Unit"]).sort_values(by=["Param"]).to_clipboard()
Stat = pd.concat([STORLegSta, AGStat, SDWISSta, WQPStat, SLStat, UGSSta])
parmatch = pd.read_excel(rootname + "Aquachem.xlsx")
parmatchdict = dict(zip(parmatch.Param.values, parmatch.ParrAbb.values))
Res["ParAbb"] = Res[["ParAbb","Param"]].apply(lambda x: parmatchdict.get(x[1],x[0]),1)
results = Res.dropna(subset=["StationId","Param","SampleId"], how="any")
Stat.loc[:,"StationName"] = Stat["StationName"].apply(lambda x: str(x).strip().lstrip().rstrip(),1)
Stat.loc[:,"StationId"] = Stat["StationId"].apply(lambda x: str(x).strip().lstrip().rstrip(),1)
Res.loc[:,"StationId"] = Res["StationId"].apply(lambda x: str(x).strip().lstrip().rstrip(),1)
results.loc[:,"Unit"] = results[["ParAbb","Unit"]].apply(lambda x: "C" if x[0]=="Temp" else x[1],1)
results.loc[:,"Unit"] = results[["ParAbb","Unit"]].apply(lambda x: "umhos/cm" if x[0]=="Cond" else x[1],1)
results.loc[:,"Unit"] = results[["ParAbb","Unit"]].apply(lambda x: "" if x[0]=="pH" else x[1],1)
results.drop(["AnalysisDate","AnalytMeth","SampType","AnalytMethId", "BelowLim", "StationName",
"MethodDescript", "LabComments", "LabName", "LimitType", "ProjectId", "QualCode",
"OrgName","R", "ResultComment","ResultStatus","SampComment", "SampEquip",
"SampDepthRef", "SampDepthU","SampDepth","SampType", "USGSPCode",
"SampMeth", "SampMethName","SampYear","TestNo"],inplace=True,axis=1)
NDs = {'Not Detected':'<', 'Present Above Quantification Limit':'<', 'ND ':'<', '*Present >QL ':'>',
'Present Below Quantification Limit':'<', '*Non-detect ':'<', 'Detected Not Quantified':'<',
'Systematic Contamination':'<'}
results.DetectCond = results.DetectCond.apply(lambda x: NDs.get(x,np.nan),1)
def is_nan(x):
'''
this function identifies nan values
Source: http://stackoverflow.com/questions/944700/how-to-check-for-nan-in-python
'''
try:
return math.isnan(x)
except:
return False
def detected(x):
'''
Finds nondetects and fixes units and values
'''
if x[1]=='<' and np.isfinite(x[0]):
return x[1]+str(x[0])
elif x[1]=='<' and np.isfinite(x[2]):
if str(x[3]).rstrip().lower() == 'ug/l':
return x[1]+str(x[2]/1000)
else:
return x[1]+str(x[2])
else:
return x[0]
results.ResultValue = results[['ResultValue','DetectCond','MDL','MDLUnit']].apply(lambda x: detected(x),1)
def MDLfill(x):
if x[0] <= 0 and x[1]>0:
return 0
elif x[2] == '<':
return 0
elif x[0] < x[1]:
return 0
else:
return 1
results.loc[:,'ResValue'] = pd.to_numeric(results['ResultValue'], errors='coerce')
results.loc[:,'Censored'] = results[['ResValue','MDL','DetectCond']].apply(lambda x: MDLfill(x),1)
matchDict = {'414143111495501':'USGS-414143111495501','414115111490301':'USGS-414115111490301',
'SDWIS3117.0WS004':'USGS-414115111490301',
'EPALeg-0301203':'414029111483501','SDWIS3116.0WS003':'414029111483501',
'EPALeg-0301201':'USGS-414024111481101','SDWIS5435.0WS001':'USGS-414024111481101',
'414024111481101':'USGS-414024111481101','EPALeg-0300101':'SDWIS5411.0WS001',
'EPALeg-0300102':'SDWIS5412.0WS002', 'EPALeg-0300103':'SDWIS5413.0WS003',
'UGS-107.5':'SDWIS3143.0WS001','UDAF-01492':'UGS-0412', 'UDAF-03165':'UGS-106.5',
'SDWIS3126.0WS002':'USGS-414216111485201', 'EPALeg-0301702':'USGS-414216111485201',
'EPALeg-0301901':'USGS-414328111493001', 'SDWIS3131.0WS001':'USGS-414328111493001',
'EPALeg-0301005':'SDWIS3112.0WS005', 'EPALeg-0301002':'USGS-414417111484301',
'SDWIS3109.0WS002':'USGS-414417111484301', 'SDWIS3113.0WS006':'USGS-414459111493601',
'SDWIS3127.0WS003':'414213111493101', 'SDWIS3159.0WS003':'SDWIS3157.0WS001','UDAF-01500':'UGS-63.5',
'SDWIS3111.0WS004':'USGS-414441111490701', 'EPALeg-0301904':'SDWIS3133.0WS004',
'EPALeg-0301004':'USGS-414441111490701','EPALeg-0301502':'SDWIS3118.0WS002',
'UDAF-01589':'UDAF-01568','UDAF-01586':'UDAF-01566','UGS-0050':'UDAF-01566',
'EPALeg-0300104':'SDWIS3088.0WS004', 'UDAF-01585':'UGS-0032', 'UDAF-01565':'UGS-0032',
'EPALeg-0300201':'SDWIS3091.0WS001', 'EPALeg-0300204':'SDWIS3094.0WS004',
'EPALeg-0301803':'SDWIS3129.0WS003','EPALeg-0300405':'SDWIS5418.0WS005',
'EPALeg-0300404':'SDWIS5417.0WS004', 'EPALeg-0300403':'SDWIS5416.0WS003',
'SDWIS5439.0WS003':'SDWIS5416.0WS003', 'SDWIS5460.0WS003':'SDWIS5416.0WS003',
'SDWIS5414.0WS001':'SDWIS5458.0WS001', 'SDWIS5437.0WS001':'SDWIS5458.0WS001',
'EPALeg-0308601':'USGS-415828111460001', 'SDWIS5487.0WS001':'USGS-415828111460001',
'SDWIS5430.0WS002':'USGS-415828111460001', 'SDWIS5423.0WS003':'USGS-415828111460001',
'SDWIS5421.0WS001':'USGS-415836111464701', 'EPALeg-0304901':'SDWIS5479.0WS001',
'EPALeg-0303201':'SDWIS5470.0WS001', 'SDWIS5432.0WS001':'USGS-414535111423001',
'EPALeg-0303001':'SDWIS5469.0WS001','EPALeg-0307701':'SDWIS5485.0WS001',
'EPALeg-0308301':'SDWIS5486.0WS001', 'EPALeg-0301501':'SDWIS5445.0WS001',
'EPALeg-0300701':'USGS-415120111440001', 'SDWIS5424.0WS001':'USGS-415120111440001',
'EPALeg-0302001':'SDWIS5455.0WS001', 'EPALeg-0301101':'SDWIS5433.0WS001',
'EPALeg-0301102':'SDWIS5434.0WS002'}
Stat.loc[:,'StationId'] = Stat['StationId'].apply(lambda x: matchDict.get(x,x),1)
results.loc[:,'StationId'] = results['StationId'].apply(lambda x: matchDict.get(x,x),1)
results.loc[:,'SampleDate'] = pd.to_datetime(results.SampleDate)
def depthFill(x):
if x > 0:
return x
def depthUnitFill(x):
if x > 0:
return 'ft'
Stat.Depth = Stat['Depth_ft'].apply(lambda x: depthFill(x),1)
Stat.DepthUnit = Stat['Depth_ft'].apply(lambda x: depthUnitFill(x),1)
WINdict = {'SDWIS3180.0WS001':435116, 'UGS-47.5':32700, 'UDAF-01566':30211, 'UGS-46.5':12420,
'USGS-414525111503705':427268, 'UDAF-01569':28327, 'SDWIS3112.0WS005':2694,
'UDAF-03162':434818, 'USGS-414328111493001':2823, 'USGS-414332111491001':2836,
'SDWIS3133.0WS004':2848, 'UGS-91.5':28647, 'UGS-95.5':35814, 'SDWIS3128.0WS004':18590,
'UGS-0102':426853, 'USGS-414115111490301':2722, 'UT4140521114843201':32975, '414029111483501':2721,
'SDWIS3088.0WS004':2741,'UGS-63.5':7126, 'UGS-0084':9639, 'USGS-414134111544701':434098, 'UGS-0070':35061,
'UGS-0029':32851,'UGS-0030':26663,'UGS-0034':29110, 'UGS-0055':3728, 'UGS-61.5':9280,
'SDWIS3129.0WS003':2816, 'UGS-0043':29329, 'UGS-0889':24493, 'UGS-44.5':28333}
Stat.WIN = Stat['StationId'].apply(lambda x: WINdict.get(x,x),1)
results.SampleID = results.SampleId.apply(lambda x: str(x).replace(' ',''),1)
results.StationId = results.StationId.apply(lambda x: str(x).replace(' ',''),1)
Stat.StationId = Stat.StationId.apply(lambda x: str(x).replace(' ',''),1)
results.drop_duplicates(subset = ['SampleId','ParAbb'],inplace=True)
Stat.drop_duplicates(subset = ['StationId'],inplace=True)
resultsNoND = results[(~results['DetectCond'].isin(['<','>']))]
datap = resultsNoND.pivot(index='SampleId', columns='ParAbb', values='ResValue')
datap.dropna(subset=['SO4','Cond','Temp','TDS','pH_field'],how='all',inplace=True)
datap.drop(datap.columns[[0]], axis=1, inplace=True)
results.columns
resdrop = [ 'DetectCond', u'Comment#Number.1', u'Comment#Text.1', 'ResultValue', 'ResValue',
'MDL', 'MDLUnit', 'OrgId', 'Param', 'ResultValue', 'SampFrac',
'SampMedia', 'Unit', 'ParAbb']
resPivot = results.drop(resdrop, axis=1)
datapiv = pd.merge(datap, resPivot, left_index=True, right_on='SampleId',how='left')
datapiv.drop_duplicates(subset=['SampleId'],inplace=True)
def projy(x):
inProj = Proj(init='epsg:4326') #WGS84
outProj = Proj(init='epsg:2152') #NAD83(CSRS98) / UTM zone 12N
x2,y2 = transform(inProj,outProj,x[0],x[1])
return y2
def projx(x):
inProj = Proj(init='epsg:4326') #WGS84
outProj = Proj(init='epsg:2152') #NAD83(CSRS98) / UTM zone 12N
x2,y2 = transform(inProj,outProj,x[0],x[1])
return x2
def getelev(x):
elev = "http://ned.usgs.gov/epqs/pqs.php?x="+str(x[0])+"&y="+str(x[1])+"&units=Meters&output=xml"
response = urllib2.urlopen(elev)
html = response.read()
d = xmltodict.parse(html)
return float(d['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation'])
Stat.loc[:,'UTM_X'] = Stat[['Lon_X','Lat_Y']].apply(lambda x: projx(x),1)
Stat.loc[:,'UTM_Y'] = Stat[['Lon_X','Lat_Y']].apply(lambda x: projy(x),1)
Stat.loc[:,'Elev'] = Stat[['Lon_X','Lat_Y']].apply(lambda x: getelev(x),1)
pivStats = Stat.drop(['Aquifer', 'ConstDate', 'Depth', 'DepthUnit','AquiferType', 'HorCollMeth', 'Geology',
'HoleDUnit', 'HoleDepth', 'HUC8', 'HorAccUnit', 'HoleDUnit', 'SCREENDEPT',
'ElevUnit', 'ElevRef', 'ElevAcc', 'ElevMeth','CountyCode', 'ElevAccUnit',
'HorAcc', 'StateCode', 'HorRef',
'OrgId', 'StationComment'], axis=1)
pivStats.reset_index(inplace=True)
pivStats.set_index("StationId",inplace=True)
pivdata = pd.merge(datapiv, pivStats, left_on="StationId", right_index=True, how='left')
pivdata.drop_duplicates(subset=['SampleId'],inplace=True)
alkmatch = pivdata[(pivdata['Meas_Alk']>0)&(pivdata['HCO3']>0)]
x = [np.float64(i) for i in alkmatch['Meas_Alk'].values]
y = [np.float64(i) for i in alkmatch['HCO3'].values]
X = sm.add_constant(x)
res = sm.RLM(y,X).fit()
b = res.params[0]
m = res.params[1]
print m
print b
plt.figure()
plt.scatter(x,y)
plt.plot(x, res.fittedvalues, color='red')
def HCO3fix(x):
if x[0]>0:
return x[0]
elif x[1]>0:
return x[1]*m+b
else:
pass
pivdata['HCO3'] = pivdata[['HCO3','Meas_Alk']].apply(lambda x: HCO3fix(x),1)
parlist = ['Ca','Mg','Na','K','Cl','HCO3','CO3','SO4','NO3','NO2','CO2','TDS','Si','Zn_tot','As_tot']
def removeInf(x):
if x <= 0:
return np.nan
else:
return np.log(x)
for i in parlist:
if i in pivdata.columns:
pivdata[i+'Ln'] = pivdata[i].apply(lambda x: removeInf(x),1)
d = {'Ca':0.04990269, 'Mg':0.082287595, 'Na':0.043497608, 'K':0.02557656, 'Cl':0.028206596, 'HCO3':0.016388838, 'CO3':0.033328223, 'SO4':0.020833333, 'NO2':0.021736513, 'NO3':0.016129032}
chemlist = ['Ca','Mg','Na','K','Cl','HCO3','CO3','SO4','NO3','NO2']
for i in chemlist:
if i in pivdata.columns:
pivdata[i+'Meq'] = pivdata.loc[:,i] * d[i]
pivdata.drop_duplicates(subset = ['StationId','SampleDate'], inplace=True)
def sumIons(x):
b = 0
for i in x:
if i>0:
b = i + b
else:
b = b
return b
pivdata['Anions'] = pivdata[['ClMeq','HCO3Meq','SO4Meq','CO3Meq']].apply(lambda x: sumIons(x),1)
pivdata['Cations'] = pivdata[['KMeq','MgMeq','NaMeq','CaMeq']].apply(lambda x: sumIons(x),1)
pivdata['EC'] = pivdata['Anions'] - pivdata['Cations']
pivdata['CBE'] = ((pivdata['Cations']-np.abs(pivdata['Anions']))/(pivdata['Cations']+np.abs(pivdata['Anions'])))*100
#piperdata = pivdata.dropna(subset = ['Ca','Na','Cl','Mg','SO4','HCO3'], how='any')
#piperdata.drop_duplicates(subset=['SampleId'], inplace=True)
print(len(pivdata))
pivgrps = pivdata.groupby(['StationId']).median()
pivGoodData = pivdata[abs(pivdata.CBE)<=5]
pipergrps = pivGoodData.groupby(['StationId']).median()
pipergrps['sampCount'] = pivGoodData.groupby(['StationId'])['CBE'].agg({'cnt':(lambda x: np.count_nonzero(~np.isnan(x)))}).reset_index#squeeze=True
pivgrp = pd.merge(pivgrps, pivStats, left_index=True, right_index=True, how='left')
pipergrp = pd.merge(pipergrps, pivStats, left_index=True, right_index=True, how='left')
pipergrp.drop_duplicates(inplace=True)
pivgrp = pivgrp.reset_index().drop_duplicates(subset=['StationId']).set_index('StationId')
princpiv = pivGoodData[(pivGoodData.SampleDate < datetime.datetime(2014,3,10))&(pivGoodData.UTM_X < 435000) & (pivGoodData.UTM_X > 422000) \
& (pivGoodData.UTM_Y > 4608000) & (pivGoodData.UTM_Y < 4634000) & (pivGoodData.StationType=='Well')]
princpiv.drop_duplicates(subset = ['SampleId'],inplace=True)
ResOldPrinc = resultsNoND[(resultsNoND.SampleId.isin(princpiv.SampleId))]
GWStat = Stat[Stat.StationType.isin(['Well','Spring'])]
GWRes = results[results.StationId.isin(list(GWStat.StationId))]
Nitrate = GWRes[GWRes['ParAbb'].isin(['N','NO2','NO3','NH4'])]
NitrateStat = GWStat[GWStat.StationId.isin(list(Nitrate.StationId))]
ParrAbbSummary = ResOldPrinc.groupby('ParAbb')['ResValue'].agg({'min':np.min, 'mean':np.mean,
'qrt5':(lambda x: np.percentile(x,q=5)),
'qrt95':(lambda x: np.percentile(x,q=95)),
'range':(lambda x: np.max(x)-np.min(x)),
'lqrt':(lambda x: np.percentile(x,q=25)),
'median':np.median,
'uqrt':(lambda x: np.percentile(x,q=75)),
'max':np.max, 'std':np.std,
'cnt':(lambda x: np.count_nonzero(~np.isnan(x)))}).reset_index()
ParrAbbSummary
manyPars = list(ParrAbbSummary[ParrAbbSummary['cnt'] >= 30]['ParAbb'])
ResOldPrinc = ResOldPrinc[ResOldPrinc['ParAbb'].isin(manyPars)]
summaryStats = ParrAbbSummary[ParrAbbSummary['ParAbb'].isin(manyPars)]
summaryStats
from pylab import rcParams
rcParams['figure.figsize'] = 15, 10
parLabCounts = ParrAbbSummary.reset_index()
parLabCounts = parLabCounts.set_index(['ParAbb'])
plt.figure()
boxres= ResOldPrinc[ResOldPrinc['ParAbb'].isin(['pH_lab','pH_field'])]
boxres.boxplot(column='ResValue', by='ParAbb',vert=False)
plt.title('Boxplot of Principal Aquifer pH')
plt.yticks([1,2],['Field pH (n = %s)'%(parLabCounts.loc['pH_field','cnt']),'Lab pH (n = %s)'%(parLabCounts.loc['pH_lab','cnt'])])
plt.xlim(6,9)
plt.xticks(np.arange(6,9.25,0.25))
plt.xlabel('pH')
plt.savefig(rootname+"pHBoxplot.svg")
plt.savefig(rootname+"pHBoxplot.pdf")
plt.figure()
boxres= ResOldPrinc[ResOldPrinc['ParAbb'].isin(['Temp'])]
boxres.boxplot(column='ResValue', by='ParAbb',vert=False)
plt.title('Boxplot of Principal Aquifer Temperature')
plt.yticks([1],['Temperature (deg. C) (n = %s)'%(parLabCounts.loc['Temp','cnt'])])
plt.xticks(np.arange(5,30,1))
plt.xlabel('Temp. (deg. C)')
plt.savefig(rootname+"pHBoxplot.pdf")
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
rcParams['figure.figsize'] = 15, 20
parLabCounts = ParrAbbSummary.reset_index()
parLabCounts = parLabCounts.set_index(['ParAbb'])
parlist = ['Mg','Ca','Na','Cl','SO4','HCO3','Si','K','NO3','TDS','N']
boxres = ResOldPrinc[ResOldPrinc['ParAbb'].isin(parlist)]
plt.figure()
sns.violinplot(x="ResValue", y='ParAbb', data=boxres, palette="Set3", scale='width', cut=0)
plt.xlabel('mg/L')
plt.xlim(0,1200)
plt.ylabel('Chemical Constituent')
plt.savefig(rootname+'violinMajor.pdf')
parLabCounts = ParrAbbSummary.reset_index()
parLabCounts = parLabCounts.set_index(['ParAbb'])
def parboxplot(parlist):
plt.figure()
boxres= ResOldPrinc[ResOldPrinc['ParAbb'].isin(parlist)]
boxres.boxplot(column='ResValue', by='ParAbb',vert=False)
#labs = [str(parlist[i]) + " (n= %s)"%(parLabCounts.loc[parlist[i],'cnt']) for i in range(len(parlist))]
#tickloc = [b+1 for b in range(len(parlist))]
#plt.yticks(tickloc,labs)
parlist = ['pH_lab','pH_field']
parboxplot(parlist)
plt.xlabel('pH')
plt.savefig(rootname+'pHBoxplot')
parlist = ['Mg','Ca','Na','Cl','SO4','HCO3','Si','K','NO3','TDS','N']
parboxplot(parlist)
plt.title('Major Ions')
plt.xlabel('mg/L')
plt.grid(which='both',axis='both')
plt.xscale('log')
plt.xlim(0.1,1000)
plt.savefig(rootname+'MajorIonsBoxplot.pdf')
#plt.xlim(0.00001,1000)
#plt.xscale('log')
pipergrps.to_csv(rootname+'avgpiper.csv',index_label='StationId')
pivdata.to_csv(rootname+'pivotdata.csv',index_label='OBJECTID')
princpiv.to_csv(rootname+'PrincAquiferData.csv',index_label='OBJECTID')
pivgrp.to_csv(rootname+'pivgrps.csv',index_label='StationId')
Nitrate.to_csv(rootname+'NitrateResults.csv')
NitrateStat.to_csv(rootname+'NitrateStations.csv')
summaryStats.to_csv(rootname+'PrincAquifStats.csv')
summaryStats.to_clipboard()
GWStat.to_csv(rootname+'GWStations.csv',index_label='ObjectID')
GWRes.to_csv(rootname+'GWResults.csv',index_label='ObjectID')
writer = pd.ExcelWriter(rootname + "combined_out.xlsx", engine="xlsxwriter")
Stat.to_excel(writer, "stations", index=False)
results.to_excel(writer, "results", index=False)
GWStat.to_excel(writer, 'GWStations',index=False)
GWRes.to_excel(writer, 'GWResults',index=False)
pipergrps.to_excel(writer,'avgpiper')
pivdata.to_excel(writer,'pivotdata')
summaryStats.to_excel(writer,'princaquifstats')
writer.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We've seen finite automata, pushdown automata, and Turing machines, but many other kinds of automata can be created by instantiating a Machine directly.
Step2: The first argument is required. This machine has four stores, all of type BASE (to be explained below).
Step3: This is something like a 2-stack PDA that recognizes the language ${w###w}$. It works by transferring the first half of the input to the first stack, transferring the first stack to the second stack (reversing it), then checking the second half of the input against it.
Step4: This looks just like a pushdown automaton.
Step5: Low-level interface
Step6: start_config specifies an initial value for every store (not just the state). The initial value for the input is ignored, as it will be replaced by the input string.
Step7: accept_configs is a set of configurations, each which specifies a pattern for every store (not just the state).
Step8: This is how STREAM stores are able to require that the input is fully consumed -- by making the accept configuration for the input store to default to a blank (_).
|
<ASSISTANT_TASK:>
Python Code:
from tock import *
m1 = Machine([BASE, BASE, BASE, BASE], state=0, input=1)
m1.set_start_state('q1')
m1.add_transition('q1, &, &, & -> q2, &, $, $')
m1.add_transition('q2, a, &, & -> q2, &, a, &')
m1.add_transition('q2, b, &, & -> q2, &, b, &')
m1.add_transition('q2, # # #, &, & -> q3, &, &, &')
m1.add_transition('q3, &, a, & -> q3, &, &, a')
m1.add_transition('q3, &, b, & -> q3, &, &, b')
m1.add_transition('q3, &, $, & -> q4, &, &, &')
m1.add_transition('q4, a, &, a -> q4, &, &, &')
m1.add_transition('q4, b, &, b -> q4, &, &, &')
m1.add_transition('q4, _, &, $ -> q5, &, &, &')
m1.add_accept_state('q5')
m1
run(m1, 'a a b # # # a a b').shortest_path()
run(m1, 'a a b # # # b a a').has_path()
m2 = Machine([BASE, STREAM, BASE], state=0, input=1)
m2.set_start_state('q1')
m2.add_transition('q1, &, & -> q2, $')
m2.add_transition('q2, a, & -> q2, a')
m2.add_transition('q2, &, & -> q3, &')
m2.add_transition('q3, b, a -> q3, &')
m2.add_transition('q3, &, $ -> q4, &')
m2.add_accept_state('q4')
m2
m3 = Machine([BASE, TAPE], state=0, input=1)
m3.set_start_state('q1')
m3.add_transition('q1, a -> q2, b, R')
m3.add_accept_state('q2')
m3
for t in m3.transitions: print(t)
m3.transitions.append(machines.Transition('q1, b -> q2, c ^'))
for t in m3.transitions: print(t)
m3.start_config
m3.start_config = machines.Configuration('q2, &')
m3.start_config
for c in m3.accept_configs: print(c)
# accept in state q3 if current symbol is a
m3.accept_configs.add(machines.Configuration('q3, a'))
for c in m2.accept_configs: print(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some useful numpy references
Step2: Question 2
Step3: Potentially useful links
Step4: Potentially useful links
Step6: Assignment wrapup
|
<ASSISTANT_TASK:>
Python Code:
# Imports the functionality that we need to display YouTube videos in
# a Jupyter Notebook.
# You need to run this cell before you run ANY of the YouTube videos.
from IPython.display import YouTubeVideo
# Don't forget to watch the video in full-screen mode!
YouTubeVideo("BTXyE3KLIOs",width=640,height=360) # numpy
import numpy as np
# put your code here!
%matplotlib inline
import matplotlib.pyplot as plt
# put your code here!
# Don't forget to watch the video in full-screen mode!
YouTubeVideo("cozbOliNwSs",width=640,height=360) # Boolean logic and if statements
# write your program here, using multiple cells if necessary (adding extra cells using
# the 'Cell' menu at the top of this notebook). Don't forget that you can execute
# your program by holding down 'shift' and pressing 'enter' in each cell!
# Don't forget to watch the video in full-screen mode!
YouTubeVideo("VnTN5sFIPD0",width=640,height=360) # loops in python
# Program 1 here
# Program 2 here
# Program 3 here
from IPython.display import HTML
HTML(
<iframe
src="https://goo.gl/forms/F1MvFMDpIWPScchr2?embedded=true"
width="80%"
height="1200px"
frameborder="0"
marginheight="0"
marginwidth="0">
Loading...
</iframe>
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 2 - Outline of the Assignment
Step4: Expected Output
Step6: Expected Output
Step8: Expected Output
Step10: Expected Output
Step12: Expected Output
Step14: Expected Output
Step16: Expected Output
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import h5py
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
# GRADED FUNCTION: zero_pad
def zero_pad(X, pad):
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image,
as illustrated in Figure 1.
Argument:
X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images
pad -- integer, amount of padding around each image on vertical and horizontal dimensions
Returns:
X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C)
### START CODE HERE ### (≈ 1 line)
X_pad = np.pad(X, ((0, 0), (pad, pad), (pad, pad), (0, 0) ), 'constant', constant_values = 0)
### END CODE HERE ###
return X_pad
np.random.seed(1)
x = np.random.randn(4, 3, 3, 2)
x_pad = zero_pad(x, 2)
print ("x.shape =", x.shape)
print ("x_pad.shape =", x_pad.shape)
print ("x[1,1] =", x[1,1])
print ("x_pad[1,1] =", x_pad[1,1])
fig, axarr = plt.subplots(1, 2)
axarr[0].set_title('x')
axarr[0].imshow(x[0,:,:,0])
axarr[1].set_title('x_pad')
axarr[1].imshow(x_pad[0,:,:,0])
# GRADED FUNCTION: conv_single_step
def conv_single_step(a_slice_prev, W, b):
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = np.multiply(a_slice_prev, W)
# Sum over all entries of the volume s.
Z = np.sum(s)
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + float(b)
### END CODE HERE ###
return Z
np.random.seed(1)
a_slice_prev = np.random.randn(4, 4, 3)
W = np.random.randn(4, 4, 3)
b = np.random.randn(1, 1, 1)
Z = conv_single_step(a_slice_prev, W, b)
print("Z =", Z)
# GRADED FUNCTION: conv_forward
def conv_forward(A_prev, W, b, hparameters):
Implements the forward propagation for a convolution function
Arguments:
A_prev -- output activations of the previous layer, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
W -- Weights, numpy array of shape (f, f, n_C_prev, n_C)
b -- Biases, numpy array of shape (1, 1, 1, n_C)
hparameters -- python dictionary containing "stride" and "pad"
Returns:
Z -- conv output, numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward() function
### START CODE HERE ###
# Retrieve dimensions from A_prev's shape (≈1 line)
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape (≈1 line)
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters" (≈2 lines)
stride = hparameters['stride']
pad = hparameters['pad']
# Compute the dimensions of the CONV output volume using the formula given above. Hint: use int() to floor. (≈2 lines)
n_H = int((n_H_prev + 2*pad - f) /stride + 1)
n_W = int((n_W_prev + 2*pad - f) /stride + 1)
# Initialize the output volume Z with zeros. (≈1 line)
Z = np.zeros((m, n_H, n_W, n_C))
# Create A_prev_pad by padding A_prev
A_prev_pad = zero_pad(A_prev, pad)
for i in range(m): # loop over the batch of training examples
a_prev_pad = A_prev_pad[i] # Select ith training example's padded activation
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over channels (= #filters) of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = f + vert_start
horiz_start = w * stride
horiz_end = f + horiz_start
# Use the corners to define the (3D) slice of a_prev_pad (See Hint above the cell). (≈1 line)
a_slice_prev = a_prev_pad[vert_start: vert_end, horiz_start: horiz_end, :]
# Convolve the (3D) slice with the correct filter W and bias b, to get back one output neuron. (≈1 line)
Z[i, h, w, c] = conv_single_step(a_slice_prev, W[..., c], b[..., c])
### END CODE HERE ###
# Making sure your output shape is correct
assert(Z.shape == (m, n_H, n_W, n_C))
# Save information in "cache" for the backprop
cache = (A_prev, W, b, hparameters)
return Z, cache
np.random.seed(1)
A_prev = np.random.randn(10,4,4,3)
W = np.random.randn(2,2,3,8)
b = np.random.randn(1,1,1,8)
hparameters = {"pad" : 2,
"stride": 2}
Z, cache_conv = conv_forward(A_prev, W, b, hparameters)
print("Z's mean =", np.mean(Z))
print("Z[3,2,1] =", Z[3,2,1])
print("cache_conv[0][1][2][3] =", cache_conv[0][1][2][3])
# GRADED FUNCTION: pool_forward
def pool_forward(A_prev, hparameters, mode = "max"):
Implements the forward pass of the pooling layer
Arguments:
A_prev -- Input data, numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
hparameters -- python dictionary containing "f" and "stride"
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
A -- output of the pool layer, a numpy array of shape (m, n_H, n_W, n_C)
cache -- cache used in the backward pass of the pooling layer, contains the input and hparameters
# Retrieve dimensions from the input shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve hyperparameters from "hparameters"
f = hparameters["f"]
stride = hparameters["stride"]
# Define the dimensions of the output
n_H = int(1 + (n_H_prev - f) / stride)
n_W = int(1 + (n_W_prev - f) / stride)
n_C = n_C_prev
# Initialize output matrix A
A = np.zeros((m, n_H, n_W, n_C))
### START CODE HERE ###
for i in range(m): # loop over the training examples
for h in range(n_H): # loop on the vertical axis of the output volume
for w in range(n_W): # loop on the horizontal axis of the output volume
for c in range (n_C): # loop over the channels of the output volume
# Find the corners of the current "slice" (≈4 lines)
vert_start = h * stride
vert_end = f + vert_start
horiz_start = w * stride
horiz_end = f + horiz_start
# Use the corners to define the current slice on the ith training example of A_prev, channel c. (≈1 line)
a_prev_slice = A_prev[i, vert_start:vert_end, horiz_start:horiz_end, c]
# Compute the pooling operation on the slice. Use an if statment to differentiate the modes. Use np.max/np.mean.
if mode == "max":
A[i, h, w, c] = np.max(a_prev_slice)
elif mode == "average":
A[i, h, w, c] = np.mean(a_prev_slice)
### END CODE HERE ###
# Store the input and hparameters in "cache" for pool_backward()
cache = (A_prev, hparameters)
# Making sure your output shape is correct
assert(A.shape == (m, n_H, n_W, n_C))
return A, cache
np.random.seed(1)
A_prev = np.random.randn(2, 4, 4, 3)
hparameters = {"stride" : 2, "f": 3}
A, cache = pool_forward(A_prev, hparameters)
print("mode = max")
print("A =", A)
print()
A, cache = pool_forward(A_prev, hparameters, mode = "average")
print("mode = average")
print("A =", A)
def conv_backward(dZ, cache):
Implement the backward propagation for a convolution function
Arguments:
dZ -- gradient of the cost with respect to the output of the conv layer (Z), numpy array of shape (m, n_H, n_W, n_C)
cache -- cache of values needed for the conv_backward(), output of conv_forward()
Returns:
dA_prev -- gradient of the cost with respect to the input of the conv layer (A_prev),
numpy array of shape (m, n_H_prev, n_W_prev, n_C_prev)
dW -- gradient of the cost with respect to the weights of the conv layer (W)
numpy array of shape (f, f, n_C_prev, n_C)
db -- gradient of the cost with respect to the biases of the conv layer (b)
numpy array of shape (1, 1, 1, n_C)
### START CODE HERE ###
# Retrieve information from "cache"
(A_prev, W, b, hparameters) = cache
# Retrieve dimensions from A_prev's shape
(m, n_H_prev, n_W_prev, n_C_prev) = A_prev.shape
# Retrieve dimensions from W's shape
(f, f, n_C_prev, n_C) = W.shape
# Retrieve information from "hparameters"
stride = hparameters['stride']
pad = hparameters['pad']
# Retrieve dimensions from dZ's shape
(m, n_H, n_W, n_C) = dZ.shape
# Initialize dA_prev, dW, db with the correct shapes
dA_prev = np.zeros(A_prev.shape)
dW = np.zeros(W.shape)
db = np.zeros(b.shape)
# Pad A_prev and dA_prev
A_prev_pad = zero_pad(A_prev, pad)
dA_prev_pad = zero_pad(dA_prev, pad)
for i in range(m): # loop over the training examples
# select ith training example from A_prev_pad and dA_prev_pad
a_prev_pad = A_prev_pad[i]
da_prev_pad = dA_prev_pad[i]
for h in range(n_H): # loop over vertical axis of the output volume
for w in range(n_W): # loop over horizontal axis of the output volume
for c in range(n_C): # loop over the channels of the output volume
# Find the corners of the current "slice"
vert_start = h * stride
vert_end = f + vert_start
horiz_start = w * stride
horiz_end = f + horiz_start
# Use the corners to define the slice from a_prev_pad
a_slice = a_prev_pad[vert_start: vert_end, horiz_start:horiz_end, :]
# Update gradients for the window and the filter's parameters using the code formulas given above
da_prev_pad[vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c]
dW[:,:,:,c] += a_slice * dZ[i, h, w, c]
db[:,:,:,c] += dZ[i, h, w, c]
# Set the ith training example's dA_prev to the unpaded da_prev_pad (Hint: use X[pad:-pad, pad:-pad, :])
dA_prev[i, :, :, :] = da_prev_pad[pad:-pad, pad:-pad, :]
### END CODE HERE ###
# Making sure your output shape is correct
assert(dA_prev.shape == (m, n_H_prev, n_W_prev, n_C_prev))
return dA_prev, dW, db
np.random.seed(1)
dA, dW, db = conv_backward(Z, cache_conv)
print("dA_mean =", np.mean(dA))
print("dW_mean =", np.mean(dW))
print("db_mean =", np.mean(db))
def create_mask_from_window(x):
Creates a mask from an input matrix x, to identify the max entry of x.
Arguments:
x -- Array of shape (f, f)
Returns:
mask -- Array of the same shape as window, contains a True at the position corresponding to the max entry of x.
### START CODE HERE ### (≈1 line)
mask = x == np.max(x)
### END CODE HERE ###
return mask
np.random.seed(1)
x = np.random.randn(2,3)
mask = create_mask_from_window(x)
print('x = ', x)
print("mask = ", mask)
def distribute_value(dz, shape):
Distributes the input value in the matrix of dimension shape
Arguments:
dz -- input scalar
shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz
Returns:
a -- Array of size (n_H, n_W) for which we distributed the value of dz
### START CODE HERE ###
# Retrieve dimensions from shape (≈1 line)
(n_H, n_W) = shape
# Compute the value to distribute on the matrix (≈1 line)
average = dz/(n_H * n_W)
# Create a matrix where every entry is the "average" value (≈1 line)
a = np.ones(shape) * average
### END CODE HERE ###
return a
a = distribute_value(2, (2,2))
print('distributed value =', a)
def pool_backward(dA, cache, mode = "max"):
Implements the backward pass of the pooling layer
Arguments:
dA -- gradient of cost with respect to the output of the pooling layer, same shape as A
cache -- cache output from the forward pass of the pooling layer, contains the layer's input and hparameters
mode -- the pooling mode you would like to use, defined as a string ("max" or "average")
Returns:
dA_prev -- gradient of cost with respect to the input of the pooling layer, same shape as A_prev
### START CODE HERE ###
# Retrieve information from cache (≈1 line)
(A_prev, hparameters) = cache
# Retrieve hyperparameters from "hparameters" (≈2 lines)
stride = hparameters['stride']
f = hparameters['f']
# Retrieve dimensions from A_prev's shape and dA's shape (≈2 lines)
m, n_H_prev, n_W_prev, n_C_prev = A_prev.shape
m, n_H, n_W, n_C = dA.shape
# Initialize dA_prev with zeros (≈1 line)
dA_prev = np.zeros(A_prev.shape)
for i in range(m): # loop over the training examples
# select training example from A_prev (≈1 line)
a_prev = A_prev[i]
for h in range(n_H): # loop on the vertical axis
for w in range(n_W): # loop on the horizontal axis
for c in range(n_C): # loop over the channels (depth)
# Find the corners of the current "slice" (≈4 lines)
vert_start = h
vert_end = f + vert_start
horiz_start = w
horiz_end = f + horiz_start
# Compute the backward propagation in both modes.
if mode == "max":
# Use the corners and "c" to define the current slice from a_prev (≈1 line)
a_prev_slice = a_prev[vert_start:vert_end, horiz_start:horiz_end, c]
# Create the mask from a_prev_slice (≈1 line)
mask = create_mask_from_window(a_prev_slice)
# Set dA_prev to be dA_prev + (the mask multiplied by the correct entry of dA) (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += np.multiply(mask, dA[i, h, w, c])
elif mode == "average":
# Get the value a from dA (≈1 line)
da = dA[i, h, w, c]
# Define the shape of the filter as fxf (≈1 line)
shape = (f,f)
# Distribute it to get the correct slice of dA_prev. i.e. Add the distributed value of da. (≈1 line)
dA_prev[i, vert_start: vert_end, horiz_start: horiz_end, c] += distribute_value(da, shape)
### END CODE ###
# Making sure your output shape is correct
assert(dA_prev.shape == A_prev.shape)
return dA_prev
np.random.seed(1)
A_prev = np.random.randn(5, 5, 3, 2)
hparameters = {"stride" : 1, "f": 2}
A, cache = pool_forward(A_prev, hparameters)
dA = np.random.randn(5, 4, 2, 2)
dA_prev = pool_backward(dA, cache, mode = "max")
print("mode = max")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
print()
dA_prev = pool_backward(dA, cache, mode = "average")
print("mode = average")
print('mean of dA = ', np.mean(dA))
print('dA_prev[1,1] = ', dA_prev[1,1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ~mne.Annotations in MNE-Python are a way of storing short strings of
Step2: Notice that orig_time is None, because we haven't specified it. In
Step3: Since the example data comes from a Neuromag system that starts counting
Step4: If you know that your annotation onsets are relative to some other time, you
Step5: <div class="alert alert-info"><h4>Note</h4><p>If your annotations fall outside the range of data times in the
Step6: The three annotations appear as differently colored rectangles because they
Step7: The colored rings are clickable, and determine which existing label will be
Step8: Notice that it is possible to create overlapping annotations, even when they
Step9: You can also iterate over the annotations within an ~mne.Annotations
Step10: Note that iterating, indexing and slicing ~mne.Annotations all
Step11: Reading and writing Annotations to/from a file
|
<ASSISTANT_TASK:>
Python Code:
import os
from datetime import timedelta
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
my_annot = mne.Annotations(onset=[3, 5, 7], # in seconds
duration=[1, 0.5, 0.25], # in seconds, too
description=['AAA', 'BBB', 'CCC'])
print(my_annot)
raw.set_annotations(my_annot)
print(raw.annotations)
# convert meas_date (a tuple of seconds, microseconds) into a float:
meas_date = raw.info['meas_date']
orig_time = raw.annotations.orig_time
print(meas_date == orig_time)
time_of_first_sample = raw.first_samp / raw.info['sfreq']
print(my_annot.onset + time_of_first_sample)
print(raw.annotations.onset)
time_format = '%Y-%m-%d %H:%M:%S.%f'
new_orig_time = (meas_date + timedelta(seconds=50)).strftime(time_format)
print(new_orig_time)
later_annot = mne.Annotations(onset=[3, 5, 7],
duration=[1, 0.5, 0.25],
description=['DDD', 'EEE', 'FFF'],
orig_time=new_orig_time)
raw2 = raw.copy().set_annotations(later_annot)
print(later_annot.onset)
print(raw2.annotations.onset)
fig = raw.plot(start=2, duration=6)
fig.canvas.key_press_event('a')
new_annot = mne.Annotations(onset=3.75, duration=0.75, description='AAA')
raw.set_annotations(my_annot + new_annot)
raw.plot(start=2, duration=6)
print(raw.annotations[0]) # just the first annotation
print(raw.annotations[:2]) # the first two annotations
print(raw.annotations[(3, 2)]) # the fourth and third annotations
for ann in raw.annotations:
descr = ann['description']
start = ann['onset']
end = ann['onset'] + ann['duration']
print("'{}' goes from {} to {}".format(descr, start, end))
# later_annot WILL be changed, because we're modifying the first element of
# later_annot.onset directly:
later_annot.onset[0] = 99
# later_annot WILL NOT be changed, because later_annot[0] returns a copy
# before the 'onset' field is changed:
later_annot[0]['onset'] = 77
print(later_annot[0]['onset'])
raw.annotations.save('saved-annotations.csv', overwrite=True)
annot_from_file = mne.read_annotations('saved-annotations.csv')
print(annot_from_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's do a simple example where we do a short initial integration to isolate the particles that interest us for a longer simulation
Step2: At this stage, we might be interested in particles that remained within some semimajor axis range, particles that were in resonance with a particular planet, etc. Let's imagine a simple (albeit arbitrary) case where we only want to keep particles that had $x > 0$ at the end of the preliminary integration. Let's first print out the particle ID and x position.
Step3: Next, let's use the remove() function to filter out particle. As an argument, we pass the corresponding index in the particles array.
Step4: By default, the remove() function removes the i-th particle from the particles array, and shifts all particles with higher indices down by 1. This ensures that the original order in the particles array is preserved (e.g., to help with output).
Step5: We see that the particles array is no longer sorted by ID. Note that the default keepSorted=1 only keeps things sorted (i.e., if they were sorted by ID to start with). If you custom-assign IDs out of order as you add particles, the default will simply preserve the original order.
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import numpy as np
def setupSimulation(Nplanets):
sim = rebound.Simulation()
sim.integrator = "ias15" # IAS15 is the default integrator, so we don't need this line
sim.add(m=1.,id=0)
for i in range(1,Nbodies):
sim.add(m=1e-5,x=i,vy=i**(-0.5),id=i)
sim.move_to_com()
return sim
Nbodies=10
sim = setupSimulation(Nbodies)
print([sim.particles[i].id for i in range(sim.N)])
Noutputs = 1000
xs = np.zeros((Nbodies, Noutputs))
ys = np.zeros((Nbodies, Noutputs))
times = np.linspace(0.,50*2.*np.pi, Noutputs, endpoint=False)
for i, time in enumerate(times):
sim.integrate(time)
xs[:,i] = [sim.particles[j].x for j in range(Nbodies)]
ys[:,i] = [sim.particles[j].y for j in range(Nbodies)]
%matplotlib inline
import matplotlib.pyplot as plt
fig,ax = plt.subplots(figsize=(15,5))
for i in range(Nbodies):
plt.plot(xs[i,:], ys[i,:])
ax.set_aspect('equal')
print("ID\tx")
for i in range(Nbodies):
print("{0}\t{1}".format(i, xs[i,-1]))
for i in reversed(range(1,Nbodies)):
if xs[i,-1] < 0:
sim.remove(i)
print("Number of particles after cut = {0}".format(sim.N))
print("IDs of remaining particles = {0}".format([p.id for p in sim.particles]))
sim.remove(2, keepSorted=0)
print("Number of particles after cut = {0}".format(sim.N))
print("IDs of remaining particles = {0}".format([p.id for p in sim.particles]))
sim.remove(id=9)
print("Number of particles after cut = {0}".format(sim.N))
print("IDs of remaining particles = {0}".format([p.id for p in sim.particles]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 解码用于医学成像的 DICOM 文件
Step2: 安装要求的软件包,然后重新启动运行时
Step3: 解码 DICOM 图像
Step4: 解码 DICOM 元数据和使用标记
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!curl -OL https://github.com/tensorflow/io/raw/master/docs/tutorials/dicom/dicom_00000001_000.dcm
!ls -l dicom_00000001_000.dcm
try:
# Use the Colab's preinstalled TensorFlow 2.x
%tensorflow_version 2.x
except:
pass
!pip install tensorflow-io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_io as tfio
image_bytes = tf.io.read_file('dicom_00000001_000.dcm')
image = tfio.image.decode_dicom_image(image_bytes, dtype=tf.uint16)
skipped = tfio.image.decode_dicom_image(image_bytes, on_error='skip', dtype=tf.uint8)
lossy_image = tfio.image.decode_dicom_image(image_bytes, scale='auto', on_error='lossy', dtype=tf.uint8)
fig, axes = plt.subplots(1,2, figsize=(10,10))
axes[0].imshow(np.squeeze(image.numpy()), cmap='gray')
axes[0].set_title('image')
axes[1].imshow(np.squeeze(lossy_image.numpy()), cmap='gray')
axes[1].set_title('lossy image');
tag_id = tfio.image.dicom_tags.PatientsAge
tag_value = tfio.image.decode_dicom_data(image_bytes,tag_id)
print(tag_value)
print(f"PatientsAge : {tag_value.numpy().decode('UTF-8')}")
tag_id = tfio.image.dicom_tags.PatientsSex
tag_value = tfio.image.decode_dicom_data(image_bytes,tag_id)
print(f"PatientsSex : {tag_value.numpy().decode('UTF-8')}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Flip the plot by assigning the data variable to the y axis
Step2: Plot distributions for each column of a wide-form dataset
Step3: Use less smoothing
Step4: Use more smoothing, but don't smooth past the extreme data points
Step5: Plot conditional distributions with hue mapping of a second variable
Step6: "Stack" the conditional distributions
Step7: Normalize the stacked distribution at each value in the grid
Step8: Estimate the cumulative distribution function(s), normalizing each subset
Step9: Estimate distribution from aggregated data, using weights
Step10: Map the data variable with log scaling
Step11: Use numeric hue mapping
Step12: Modify the appearance of the plot
Step13: Plot a bivariate distribution
Step14: Map a third variable with a hue semantic to show conditional distributions
Step15: Show filled contours
Step16: Show fewer contour levels, covering less of the distribution
Step17: Fill the axes extent with a smooth distribution, using a different colormap
|
<ASSISTANT_TASK:>
Python Code:
tips = sns.load_dataset("tips")
sns.kdeplot(data=tips, x="total_bill")
sns.kdeplot(data=tips, y="total_bill")
iris = sns.load_dataset("iris")
sns.kdeplot(data=iris)
sns.kdeplot(data=tips, x="total_bill", bw_adjust=.2)
ax= sns.kdeplot(data=tips, x="total_bill", bw_adjust=5, cut=0)
sns.kdeplot(data=tips, x="total_bill", hue="time")
sns.kdeplot(data=tips, x="total_bill", hue="time", multiple="stack")
sns.kdeplot(data=tips, x="total_bill", hue="time", multiple="fill")
sns.kdeplot(
data=tips, x="total_bill", hue="time",
cumulative=True, common_norm=False, common_grid=True,
)
tips_agg = (tips
.groupby("size")
.agg(total_bill=("total_bill", "mean"), n=("total_bill", "count"))
)
sns.kdeplot(data=tips_agg, x="total_bill", weights="n")
diamonds = sns.load_dataset("diamonds")
sns.kdeplot(data=diamonds, x="price", log_scale=True)
sns.kdeplot(data=tips, x="total_bill", hue="size")
sns.kdeplot(
data=tips, x="total_bill", hue="size",
fill=True, common_norm=False, palette="crest",
alpha=.5, linewidth=0,
)
geyser = sns.load_dataset("geyser")
sns.kdeplot(data=geyser, x="waiting", y="duration")
sns.kdeplot(data=geyser, x="waiting", y="duration", hue="kind")
sns.kdeplot(
data=geyser, x="waiting", y="duration", hue="kind", fill=True,
)
sns.kdeplot(
data=geyser, x="waiting", y="duration", hue="kind",
levels=5, thresh=.2,
)
sns.kdeplot(
data=geyser, x="waiting", y="duration",
fill=True, thresh=0, levels=100, cmap="mako",
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A variable save_path is created which contains the path to the folder where the tweet files in json format will be stored. The folder name is saved in the format "tweet_MM_DD_YYYY".
Step2: 2. Tweet Extraction
Step3: 2.2. Search API
Step4: The hyperlink has details related to the structure of a tweet
|
<ASSISTANT_TASK:>
Python Code:
myvars = {}
with open("Twitter_keys.txt") as myfile:
for line in myfile:
name, var = line.partition("=")[::2]
myvars[name.strip()] = var
APP_KEY = myvars["APP_KEY"].rstrip()
APP_SECRET = myvars["APP_SECRET"].rstrip()
OAUTH_TOKEN = myvars["OAUTH_TOKEN"].rstrip()
OAUTH_TOKEN_SECRET = myvars["OAUTH_TOKEN_SECRET"].rstrip()
import os
now=datetime.datetime.now()
day=int(now.day)
month=now.month
year=int(now.year)
current_dir=os.getcwd()
save_path=os.path.join(current_dir, r'tweet_%i_%i_%i' % (now.month, now.day, now.year))
if not os.path.exists(save_path):
os.makedirs(save)
#import libraries
from twython import TwythonStreamer
from twython import Twython, TwythonError
import time
import sys
import os
#Configurable parameters
query_word="access-role"
max_tweets=5
searched_tweets_strm=[]
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if len(searched_tweets_strm)<max_tweets:
if 'text' in data:
#print data['text'].encode('utf-8')
searched_tweets_strm.append(data['text'].encode('utf-8'))
else:
print ("No tweets found")
self.disconnect()
else:
print ("Max tweets extracted")
sys.exit()
def on_error(self, status_code, data):
print (status_code, data)
print ("Exception raised, waiting 15 minutes")
time.sleep(15*60)
# Requires Authentication as of Twitter API v1.1
stream = MyStreamer(APP_KEY, APP_SECRET,
OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
stream.statuses.filter(track=query_word)
print (searched_tweets_strm)
#import libraries
from twython import Twython, TwythonError
import time
from time import gmtime, strftime
import json
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import datetime
import os
#Configurable parameters
query_word="CVE"
max_tweets=800
tweet_cnt=0
# Requires Authentication as of Twitter API v1.1
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
searched_tweets_srch = []
while len(searched_tweets_srch) < max_tweets:
remaining_tweets = max_tweets - len(searched_tweets_srch)
try:
search_results = twitter.search(q=query_word, count=100)
if not search_results:
print('no tweets found')
break
tweet_cnt=tweet_cnt+len(search_results["statuses"])
searched_tweets_srch.extend(search_results["statuses"])
except TwythonError as e:
print (e)
print ("exception raised, waiting 16 minutes")
print (strftime("%H:%M:%S"+ gmtime()))
time.sleep(16*60)
print ("Total tweets extracted for "+query_word+": "+str(tweet_cnt))
def save_tweets_json(save_path):
data=pd.DataFrame(data=[tweets['id'] for tweets in searched_tweets_srch],columns=['user_id'])
i=0
for tweets in searched_tweets_srch:
if(tweets['id']==data.iloc[i]['user_id']):
file=open(save_path+"/"+tweets['user']['screen_name']+'_'+ str(data.iloc[i]['user_id'])+".json", "w")
file.write(json.dumps(tweets,indent=4))
file.write("\n")
file.close()
i=i+1
save_tweets_json(save_path)
print (searched_tweets_srch)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span>Bio.Entrez</span> will then use this email address with each
Step2: The tool parameter will default to Biopython.
Step3: Since this is a fairly simple XML file, we could extract the information
Step4: Now record is a dictionary with exactly one key
Step5: The values stored in this key is the list of database names shown in the
Step6: For each of these databases, we can use EInfo again to obtain more
Step7: Try record["DbInfo"].keys() for other information stored in this
Step8: That’s a long list, but indirectly this tells you that for the PubMed
Step9: In this output, you see seven PubMed IDs (including 19304878 which is
Step10: Each of the IDs (126789333, 37222967, 37222966, …) is a GenBank
Step11: Again, we could use EFetch to obtain more information for each of these
Step12: The returned XML includes two important strings, QueryKey and WebEnv
Step13: Section History and WebEnv shows how to use the history feature.
Step14: EFetch
Step15: The arguments rettype="gb" and retmode="text" let us download this
Step16: Note that a more typical use would be to save the sequence data to a
Step17: To get the output in XML format, which you can parse using the
Step18: So, that dealt with sequences. For examples of parsing file formats
Step19: The record variable consists of a Python list, one for each database
Step20: The actual search results are stored as under the "Link" key. In
Step21: This is the article we searched for, which doesn’t help us much, so
Step22: This paper, with PubMed ID 14630660, is about the Biopython PDB parser.
Step23: Now that was nice, but personally I am often more interested to find out
Step24: See the EGQuery help
Step25: See the ESpell help
Step26: Here, the parser didn’t find the <?xml ... tag with which an XML file
Step27: Of course, the information contained in the XML tags that are not in the
Step28: The record now contains the Medline record as a Python dictionary
Step29: The key names used in a Medline record can be rather obscure; use
Step30: for a brief summary.
Step31: Instead of parsing Medline records stored in files, you can also parse
Step32: We now use Bio.Entrez.efetch to download these Medline records
Step33: Here, we specify rettype="medline", retmode="text" to obtain the
Step34: For comparison, here we show an example using the XML format
Step35: Note that in both of these examples, for simplicity we have naively
Step36: You can search the “gds” database (GEO datasets) with ESearch
Step37: From the Entrez website, UID “200000016” is GDS16 while the other hit
Step38: The record returned by UniGene.read is a Python object with
Step39: The EXPRESS and RESTR_EXPR lines are stored as Python lists of
Step40: and similarly for the PROTSIM and SEQUENCE lines.
Step41: Using a proxy
Step42: Now we use the Bio.Entrez.efetch function to download the PubMed IDs
Step43: Now that we’ve got them, we obviously want to get the corresponding
Step44: NOTE - We’ve just done a separate search and fetch here, the NCBI much
Step45: Let’s now iterate over the records to print out some information about
Step46: The output for this looks like
Step47: Hopefully this section gave you an idea of the power and flexibility of
Step48: So, we expect to find 814 Entrez Nucleotide records (this is the number
Step49: Here, record is a Python dictionary containing the search results and
Step50: First, let’s check how many results were found
Step51: which is the number we expected. The 814 results are stored in
Step52: Let’s look at the first five results
Step53: [sec
Step54: Each of these records corresponds to one GenBank record.
Step55: You could use this to quickly set up searches – but for heavy usage, see
Step56: Now we download the list of GenBank identifiers
Step57: Now we use these GIs to download the GenBank records - note that with
Step58: If you want to look at the raw GenBank files, you can read from this
Step59: In this case, we are just getting the raw records. To get the records in
Step60: We can now step through the records and look at the information we are
Step61: Using these automated query retrieval functionality is a big plus over
Step62: Now, we use efetch to download this entry in the Taxonomy database,
Step63: Again, this record stores lots of information
Step64: We can get the lineage directly from this record
Step65: The record data contains much more than just the information shown here
Step66: When you get the XML output back, it will still include the usual search
Step67: Having stored these values in variables <span>session_cookie</span> and
Step68: Great - eleven articles. But why hasn’t the Biopython application note
|
<ASSISTANT_TASK:>
Python Code:
from Bio import Entrez
Entrez.email = "A.N.Other@example.com"
from Bio import Entrez
Entrez.tool = "MyLocalScript"
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.einfo()
result = handle.read()
print(result)
from Bio import Entrez
handle = Entrez.einfo()
record = Entrez.read(handle)
record.keys()
record["DbList"]
from Bio import Entrez
handle = Entrez.einfo(db="pubmed")
record = Entrez.read(handle)
record["DbInfo"]["Description"]
record['DbInfo'].keys()
handle = Entrez.einfo(db="pubmed")
record = Entrez.read(handle)
record["DbInfo"]["Description"]
record["DbInfo"]["Count"]
record["DbInfo"]["LastUpdate"]
for field in record["DbInfo"]["FieldList"]:
print("%(Name)s, %(FullName)s, %(Description)s" % field)
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="pubmed", term="biopython")
record = Entrez.read(handle)
record["IdList"]
record
handle = Entrez.esearch(db="nucleotide", term="Cypripedioideae[Orgn] AND matK[Gene]")
record = Entrez.read(handle)
record["Count"]
record["IdList"]
# nlmcatalog
# handle = Entrez.esearch(db="nlmcatalog", term="computational")
# record = Entrez.read(handle)
# record["Count"]
handle = Entrez.esearch(db="nlmcatalog", term="biopython[Journal]", RetMax='20')
record = Entrez.read(handle)
print("{} computational Journals found".format(record["Count"]))
print("The first 20 are\n{}".format(record['IdList']))
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
id_list = ["19304878", "18606172", "16403221", "16377612", "14871861", "14630660"]
print(Entrez.epost("pubmed", id=",".join(id_list)).read())
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
id_list = ["19304878", "18606172", "16403221", "16377612", "14871861", "14630660"]
search_results = Entrez.read(Entrez.epost("pubmed", id=",".join(id_list)))
webenv = search_results["WebEnv"]
query_key = search_results["QueryKey"]
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esummary(db="nlmcatalog", term="[journal]", id="101660833")
record = Entrez.read(handle)
info = record[0]['TitleMainList'][0]
print("Journal info\nid: {}\nTitle: {}".format(record[0]["Id"], info["Title"]))
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text")
print(handle.read())
from Bio import Entrez, SeqIO
handle = Entrez.efetch(db="nucleotide", id="186972394", rettype="gb", retmode="text")
record = SeqIO.read(handle, "genbank")
handle.close()
print(record)
import os
from Bio import SeqIO
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
filename = "gi_186972394.gbk"
if not os.path.isfile(filename):
# Downloading...
with Entrez.efetch(db="nucleotide",id="186972394",rettype="gb", retmode="text") as net_handle:
with open(filename, "w") as out_handle:
out_handle.write(net_handle.read())
print("Saved")
print("Parsing...")
record = SeqIO.read(filename, "genbank")
print(record)
from Bio import Entrez
handle = Entrez.efetch(db="nucleotide", id="186972394", retmode="xml")
record = Entrez.read(handle)
handle.close()
record[0]["GBSeq_definition"]
record[0]["GBSeq_source"]
from Bio import Entrez
Entrez.email = "A.N.Other@example.com"
pmid = "19304878"
record = Entrez.read(Entrez.elink(dbfrom="pubmed", id=pmid))
print(record[0].keys())
print('The record is from the {} database.'.format(record[0]["DbFrom"]))
print('The IdList is {}.'.format(record[0]["IdList"]))
print('There are {} search results'.format(len(record[0]["LinkSetDb"])))
for linksetdb in record[0]["LinkSetDb"]:
print(linksetdb["DbTo"], linksetdb["LinkName"], len(linksetdb["Link"]))
record[0]["LinkSetDb"][0]["Link"][0]
record[0]["LinkSetDb"][0]["Link"][1]
for link in record[0]["LinkSetDb"][0]["Link"]:
print(link["Id"])
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="biopython")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
print(row["DbName"], row["Count"])
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.espell(term="biopythooon")
record = Entrez.read(handle)
record["Query"]
record["CorrectedQuery"]
from Bio import Entrez
from Bio.Entrez.Parser import NotXMLError
handle = open("data/NC_005816.fna", 'rb') # a Fasta file
try:
record = Entrez.read(handle)
except NotXMLError as e:
print('We are expecting to get NotXMLError')
print(e)
from Bio import Entrez
handle = open("data/einfo3.xml", 'rb')
record = Entrez.read(handle, validate=False)
from Bio import Medline
with open("data/pubmed_result1.txt") as handle:
record = Medline.read(handle)
record["PMID"]
record["AB"]
help(record)
from Bio import Medline
with open("data/pubmed_result2.txt") as handle:
for record in Medline.parse(handle):
print(record["TI"])
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="pubmed", term="biopython")
record = Entrez.read(handle)
record["IdList"]
idlist = record["IdList"]
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="text")
from Bio import Medline
records = Medline.parse(handle)
for record in records:
print(record["AU"])
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="pubmed", term="biopython")
record = Entrez.read(handle)
idlist = record["IdList"]
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline", retmode="xml")
records = Entrez.read(handle)
for record in records:
print(record["MedlineCitation"]["Article"]["ArticleTitle"])
from Bio import Geo
handle = open("data/GSE16.txt")
records = Geo.parse(handle)
for record in records:
print(record)
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="gds", term="GSE16")
record = Entrez.read(handle)
record["Count"]
record["IdList"]
# from Bio import UniGene
# input = open("data/myunigenefile.data")
# record = UniGene.read(input)
# record.ID
# record.title
# record.sts[0].acc
# record.sts[0].unists
# from Bio import UniGene
# input = open("unigenerecords.data")
# records = UniGene.parse(input)
# for record in records:
# print(record.ID)
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="orchid")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
if row["DbName"]=="pubmed":
print(row["Count"])
handle = Entrez.esearch(db="pubmed", term="orchid", retmax=463)
record = Entrez.read(handle)
idlist = record["IdList"]
print("The first 10 Id's containing all of the PubMed IDs of articles related to orchids:\n {}".format(idlist[:10]))
from Bio import Medline
handle = Entrez.efetch(db="pubmed", id=idlist, rettype="medline")
records = Medline.parse(handle)
records = list(records)
for record in records:
print("title:", record.get("TI", "?"))
print("authors:", record.get("AU", "?"))
print("source:", record.get("SO", "?"))
print("")
search_author = "Waits T"
for record in records:
if not "AU" in record:
continue
if search_author in record["AU"]:
print("Author %s found: %s" % (search_author, record["SO"]))
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="Cypripedioideae")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
if row["DbName"]=="nuccore":
print(row["Count"])
from Bio import Entrez
handle = Entrez.esearch(db="nucleotide", term="Cypripedioideae", retmax=814)
record = Entrez.read(handle)
print(record.keys())
print(record["Count"])
len(record["IdList"])
record["IdList"][:5]
idlist = ",".join(record["IdList"][:5])
print(idlist)
handle = Entrez.efetch(db="nucleotide", id=idlist, retmode="xml")
records = Entrez.read(handle)
len(records)
print(records[0].keys())
print(records[0]["GBSeq_primary-accession"])
print(records[0]["GBSeq_other-seqids"])
print(records[0]["GBSeq_definition"])
print(records[0]["GBSeq_organism"])
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.egquery(term="Opuntia AND rpl16")
record = Entrez.read(handle)
for row in record["eGQueryResult"]:
if row["DbName"]=="nuccore":
print(row["Count"])
handle = Entrez.esearch(db="nuccore", term="Opuntia AND rpl16")
record = Entrez.read(handle)
gi_list = record["IdList"]
gi_list
gi_str = ",".join(gi_list)
handle = Entrez.efetch(db="nuccore", id=gi_str, rettype="gb", retmode="text")
text = handle.read()
print(text)
from Bio import SeqIO
handle = Entrez.efetch(db="nuccore", id=gi_str, rettype="gb", retmode="text")
records = SeqIO.parse(handle, "gb")
for record in records:
print("%s, length %i, with %i features" \
% (record.name, len(record), len(record.features)))
from Bio import Entrez
Entrez.email = "A.N.Other@example.com" # Always tell NCBI who you are
handle = Entrez.esearch(db="Taxonomy", term="Cypripedioideae")
record = Entrez.read(handle)
record["IdList"]
record["IdList"][0]
handle = Entrez.efetch(db="Taxonomy", id="158330", retmode="xml")
records = Entrez.read(handle)
records[0].keys()
records[0]["Lineage"]
from Bio import Entrez
Entrez.email = "history.user@example.com"
search_handle = Entrez.esearch(db="nucleotide",term="Opuntia[orgn] and rpl16", usehistory="y")
search_results = Entrez.read(search_handle)
search_handle.close()
gi_list = search_results["IdList"]
count = int(search_results["Count"])
assert count == len(gi_list)
print("The WebEnv is {}".format(search_results["WebEnv"]))
print("The QueryKey is {}".format(search_results["QueryKey"]))
from Bio import Entrez
Entrez.email = "A.N.Other@example.com"
pmid = "14630660"
results = Entrez.read(Entrez.elink(dbfrom="pubmed", db="pmc",
LinkName="pubmed_pmc_refs", from_uid=pmid))
pmc_ids = [link["Id"] for link in results[0]["LinkSetDb"][0]["Link"]]
pmc_ids
results2 = Entrez.read(Entrez.elink(dbfrom="pmc", db="pubmed", LinkName="pmc_pubmed",
from_uid=",".join(pmc_ids)))
pubmed_ids = [link["Id"] for link in results2[0]["LinkSetDb"][0]["Link"]]
pubmed_ids
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Top cities and countries posting jobs
Step2: Top technologies for a given city (London, Amsterdam and San Francisco)
Step3: Dumping out data to csv
Step4: Writing it to Excel
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from unidecode import unidecode
import time
from bokeh.charts import Bar, output_file, show
from bokeh.sampledata.autompg import autompg as df
from bokeh.io import output_notebook, show
output_notebook()
jobs = pd.read_csv('../data/stackoverflow_jobs_enhanced.csv', thousands=',')
technologies = pd.read_csv('../data/technologies.csv')
# this is needed for excel export
jobs.country = jobs.country.astype(str)
jobs.city = jobs.city.astype(str)
jobs['city']=jobs['city'].apply( lambda x: unidecode(unicode(x, encoding = "utf-8")))
jobs['country']=jobs['country'].apply( lambda x: unidecode(unicode(x, encoding = "utf-8")))
top_cities = jobs.groupby(['city'])['jobid'].count().sort_values(ascending=False)
top_cities = top_cities.nlargest(15)
# create a data frame out of the series.
# found this trick at stackoverflow:
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
df_cities = pd.DataFrame({'count' : top_cities}).reset_index()
p = Bar(df_cities, values='count', title='Jobs posted by citites', legend='top_right', label=CatAttr(columns=['city'], sort=False),)
# Need to add different color for different cities
show(p)
top_countries = jobs.groupby(['country'])['jobid'].count().sort_values(ascending=False)
top_countries = top_countries.nlargest(15)
df_countries = top_countries.to_frame('count').reset_index()
top_countries
bar_countries = Bar(df_countries, values='count', title='Jobs posted by countries', legend='top_right', label=CatAttr(columns=['country'], sort=False),)
# Need to add different color for different cities
show(bar_countries)
# London top technologies
london_tech = technologies[technologies.city == 'London'].groupby(['city', 'tech'])['jobid'].count().sort_values(ascending=False)
london_tech.nlargest(10)
# Amsterdam top technologies
amsterdam_tech = technologies[technologies.city == 'Amsterdam'].groupby(['city', 'tech'])['jobid'].count().sort_values(ascending=False)
amsterdam_tech.nlargest(10)
# Berlin
berlin_tech = technologies[technologies.city == 'Berlin'].groupby(['city', 'tech'])['jobid'].count().sort_values(ascending=False)
berlin_tech.nlargest(10)
# Silicon Valley
cal_tech = technologies[technologies.state == 'CA'].groupby('tech')['jobid'].count().sort_values(ascending=False)
cal_tech.nlargest(10)
# Ordered view of what technologies are the most sought after in wich city
all_tech = technologies.groupby(['city', 'tech'])['jobid'].count().sort_values(ascending=False)
amsterdam_tech.nlargest(100)
cities.to_frame('city').to_csv('../data/cities.csv', encoding = 'utf-8')
countries.to_frame('countries').to_csv('../data/countries.csv', encoding = 'utf-8')
ew = pd.ExcelWriter('../data/stackjobs.xlsx',options={'encoding':'utf-8'})
df_cities.to_excel(ew, 'City')
df_countries.to_excel(ew, 'Country')
ew.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Layout of the corrugated structure insertion. Create Ocelot lattice <img src="4_layout.png" />
Step2: Load beam file
Step3: Initialization of the wakes and the places of their applying
Step4: Add the wakes in the lattice
Step5: Longitudinal beam distribution
Step6: Beam distribution
|
<ASSISTANT_TASK:>
Python Code:
# the output of plotting commands is displayed inline within frontends,
# directly below the code cell that produced it
%matplotlib inline
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
import time
# import from Ocelot main modules and functions
from ocelot import *
# import from Ocelot graphical modules
from ocelot.gui.accelerator import *
# load beam distribution
# this function convert Astra beam distribution to Ocelot format - ParticleArray. ParticleArray is designed for tracking.
# in order to work with converters we have to import specific module from ocelot.adaptors
from ocelot.adaptors.astra2ocelot import *
D00m25 = Drift(l = 0.25)
D01m = Drift(l = 1)
D02m = Drift(l = 2)
# Create markers for defining places of the wakes applying
w1_start = Marker()
w1_stop = Marker()
w2_start = Marker()
w2_stop = Marker()
w3_start = Marker()
w3_stop = Marker()
w4_start = Marker()
w4_stop = Marker()
w5_start = Marker()
w5_stop = Marker()
w6_start = Marker()
w6_stop = Marker()
# quadrupoles
Q1 = Quadrupole(l = 0.5, k1 = 0.215)
# lattice
lattice = (D01m, w1_start, D02m, w1_stop, w2_start, D02m, w2_stop, w3_start, D02m, w3_stop, D00m25, Q1,
D00m25, w4_start, D02m, w4_stop, w5_start, D02m, w5_stop, w6_start, D02m, w6_stop, D01m)
# creation MagneticLattice
method = MethodTM()
method.global_method = SecondTM
lat = MagneticLattice(lattice, method=method)
# calculate twiss functions with initial twiss parameters
tws0 = Twiss()
tws0.E = 14 # in GeV
tws0.beta_x = 22.5995
tws0.beta_y = 22.5995
tws0.alpha_x = -1.4285
tws0.alpha_y = 1.4285
tws = twiss(lat, tws0, nPoints=None)
# ploting twiss paramentrs.
plot_opt_func(lat, tws, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
# load and convert ASTRA file to OCELOT beam distribution
# p_array_init = astraBeam2particleArray(filename='beam_chirper.ast')
# save ParticleArray to compresssed numpy array
# save_particle_array("chirper_beam.npz", p_array_init)
p_array_init = load_particle_array("chirper_beam.npz")
plt.plot(-p_array_init.tau()*1000, p_array_init.p(), "r.")
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
from ocelot.cpbd.wake3D import *
# load wake tables of corrugated structures
wk_vert = WakeTable('wake_vert_1m.txt')
wk_hor = WakeTable('wake_hor_1m.txt')
# creation of wake object with parameters
wake_v1 = Wake()
# w_sampling - defines the number of the equidistant sampling points for the one-dimensional
# wake coefficients in the Taylor expansion of the 3D wake function.
wake_v1.w_sampling = 500
wake_v1.wake_table = wk_vert
wake_v1.step = 1 # step in Navigator.unit_step, dz = Navigator.unit_step * wake.step [m]
wake_h1 = Wake()
wake_h1.w_sampling = 500
wake_h1.wake_table = wk_hor
wake_h1.step = 1
wake_v2 = deepcopy(wake_v1)
wake_h2 = deepcopy(wake_h1)
wake_v3 = deepcopy(wake_v1)
wake_h3 = deepcopy(wake_h1)
navi = Navigator(lat)
# add physics proccesses
navi.add_physics_proc(wake_v1, w1_start, w1_stop)
navi.add_physics_proc(wake_h1, w2_start, w2_stop)
navi.add_physics_proc(wake_v2, w3_start, w3_stop)
navi.add_physics_proc(wake_h2, w4_start, w4_stop)
navi.add_physics_proc(wake_v3, w5_start, w5_stop)
navi.add_physics_proc(wake_h3, w6_start, w6_stop)
# definiing unit step in [m]
navi.unit_step = 0.2
# deep copy of the initial beam distribution
p_array = deepcopy(p_array_init)
print("tracking with Wakes .... ")
start = time.time()
tws_track, p_array = track(lat, p_array, navi)
print("\n time exec:", time.time() - start, "sec")
tau0 = p_array_init.tau()
p0 = p_array_init.p()
tau1 = p_array.tau()
p1 = p_array.p()
print(len(p1))
plt.figure(1)
plt.plot(-tau0*1000, p0, "r.", -tau1*1000, p1, "b.")
plt.legend(["before", "after"], loc=4)
plt.grid(True)
plt.xlabel(r"$\tau$, mm")
plt.ylabel(r"$\frac{\Delta E}{E}$")
plt.show()
tau = np.array([p.tau for p in p_array])
dp = np.array([p.p for p in p_array])
x = np.array([p.x for p in p_array])
y = np.array([p.y for p in p_array])
ax1 = plt.subplot(311)
ax1.plot(-tau*1000, x*1000, 'r.')
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel("x, mm")
plt.grid(True)
ax2 = plt.subplot(312, sharex=ax1)
ax2.plot(-tau*1000, y*1000, 'r.')
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel("y, mm")
plt.grid(True)
ax3 = plt.subplot(313, sharex=ax1)
ax3.plot(-tau*1000, dp, 'r.')
plt.ylabel("dp/p")
plt.xlabel("s, mm")
plt.grid(True)
# plotting twiss parameters.
plot_opt_func(lat, tws_track, top_plot=["Dx"], fig_name="i1", legend=False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Helper methods for visualization
Step2: 1. Define the trial
Step3: Choose trial parameters
Step4: 2. Load incidence forecasts
Step5: 3. Simulate the trial
Step6: 4. Optimize the trial
Step7: Plot the resulting sites
Step8: Example
Step9: Plot the resulting sites
Step10: Example
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
import functools
import importlib.resources
import numpy as np
import os
import pandas as pd
pd.plotting.register_matplotlib_converters()
import xarray as xr
from IPython.display import display
# bsst imports
from bsst import demo_data
from bsst import io as bsst_io
from bsst import util
from bsst import optimization
from bsst import sim
from bsst import sim_scenarios
from bsst import public_data
def plot_participants(participants):
time = participants.time.values
util.sum_all_but_dims(['time'], participants).cumsum('time').plot()
plt.title('Participants recruited (both control and treatment arm)')
plt.xlim(time[0], time[-1])
plt.ylim(bottom=0)
plt.show()
def plot_events(events):
time = events.time.values
events.cumsum('time').plot.line(x='time', color='k', alpha=.02, add_legend=False)
for analysis, num_events in c.needed_control_arm_events.to_series().items():
plt.axhline(num_events, linestyle='--')
plt.text(time[0], num_events, analysis, ha='left', va='bottom')
plt.ylim(0, 120)
plt.xlim(time[0], time[-1])
plt.title(f'Control arm events\n{events.scenario.size} simulated scenarios')
plt.show()
def plot_success(c, events):
time = c.time.values
success_day = xr.DataArray(util.success_day(c.needed_control_arm_events, events),
coords=(events.scenario, c.analysis))
fig, axes = plt.subplots(c.analysis.size, 1, sharex=True)
step = max(1, int(np.timedelta64(3, 'D') / (time[1] - time[0])))
bins = mpl.units.registry[np.datetime64].convert(time[::step], None, None)
for analysis, ax in zip(c.analysis.values, axes):
success_days = success_day.sel(analysis=analysis).values
np.where(np.isnat(success_days), np.datetime64('2050-06-01'), success_days)
ax.hist(success_days, bins=bins, density=True)
ax.yaxis.set_visible(False)
# subtract time[0] to make into timedelta64s so that we can take a mean/median
median = np.median(success_days - time[0]) + time[0]
median = pd.to_datetime(median).date()
ax.axvline(median, color='r')
ax.text(time[0], 0, f'{analysis}\n{median} median', ha='left', va='bottom')
plt.xlabel('Date when sufficient statistical power is achieved')
plt.xlim(time[0], time[-1])
plt.xticks(rotation=35)
plt.show()
with importlib.resources.path(demo_data, 'site_list1.csv') as p:
demo_data_file_path = os.fspath(p)
site_df = pd.read_csv(demo_data_file_path, index_col=0)
site_df.index.name = 'location'
site_df['start_date'] = pd.to_datetime(site_df['start_date'])
display(site_df)
# Add in information we have about each county.
site_df = pd.concat([site_df, public_data.us_county_data().loc[site_df.opencovid_key].set_index(site_df.index)], axis=1)
start_day = np.datetime64('2021-05-15')
end_day = np.datetime64('2021-10-01')
time_resolution = np.timedelta64(1, 'D')
time = np.arange(start_day, end_day + time_resolution, time_resolution)
c = xr.Dataset(coords=dict(time=time))
c['proportion_control_arm'] = 0.5
# Assume some intermediate analyses.
frac_control = float(c.proportion_control_arm)
efficacy = np.array([.55, .65, .75, .85, .95])
ctrl_events = util.needed_control_arm_events(efficacy, frac_control)
vaccine_events = (1 - efficacy) * ctrl_events * (1 - frac_control) / frac_control
ctrl_events, vaccine_events = np.round(ctrl_events), np.round(vaccine_events)
efficacy = 1 - (vaccine_events / ctrl_events)
total_events = ctrl_events + vaccine_events
analysis_names = [
f'{int(t)} total events @{int(100 * e)}% VE' for t, e in zip(total_events, efficacy)
]
c['needed_control_arm_events'] = xr.DataArray(
ctrl_events, dims=('analysis',)).assign_coords(analysis=analysis_names)
c['recruitment_type'] = 'default'
c['observation_delay'] = int(np.timedelta64(28, 'D') / time_resolution) # 28 days
c['trial_size_cap'] = 30000
# convert weekly capacity to capacity per time step
site_capacity = site_df.capacity.to_xarray() * time_resolution / np.timedelta64(7, 'D')
site_capacity = site_capacity.broadcast_like(c.time).astype('float')
# Can't recruit before the activation date
activation_date = site_df.start_date.to_xarray()
for l in activation_date.location.values:
date = activation_date.loc[l]
site_capacity.loc[site_capacity.time < date, l] = 0.0
c['site_capacity'] = site_capacity.transpose('location', 'time')
c['site_activation'] = xr.ones_like(c.site_capacity)
# For the sake of simplicity, this code assumes black and hisp_lat are
# non-overlapping, and that obese/smokers/diabetes are non-overlapping.
frac_and_scalar = util.fraction_and_incidence_scaler
fraction_scalers = [
frac_and_scalar(site_df, 'age', ['over_60'], [1], 'under_60'),
frac_and_scalar(site_df, 'ethnicity', ['black', 'hisp_lat'], [1, 1],
'other'),
frac_and_scalar(site_df, 'comorbidity', ['smokers', 'diabetes', 'obese'],
[1, 1, 1], 'none')
]
fractions, incidence_scalers = zip(*fraction_scalers)
# We assume that different categories are independent (e.g. the proportion of
# smokers over 60 is the same as the proportion of smokers under 60)
c['population_fraction'] = functools.reduce(lambda x, y: x * y, fractions)
# We assume the participants are drawn uniformly from the population.
c['participant_fraction'] = c['population_fraction']
# Assume some boosted incidence risk for subpopulations. We pick random numbers
# here, but in actual use you'd put your best estimate for the incidence risk
# of each demographic category.
# Since we assume participants are uniformly drawn from the county population,
# this actually doesn't end up affecting the estimated number of clinical events.
c['incidence_scaler'] = functools.reduce(lambda x, y: x * y,
incidence_scalers)
c.incidence_scaler.loc[dict(age='over_60')] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(comorbidity=['smokers', 'diabetes', 'obese'])] = 1 + 2 * np.random.random()
c.incidence_scaler.loc[dict(ethnicity=['black', 'hisp_lat'])] = 1 + 2 * np.random.random()
# We assume a constant incidence_to_event_factor.
c['incidence_to_event_factor'] = 0.6 * xr.ones_like(c.incidence_scaler)
util.add_empty_history(c)
# Extrapolate out a bit extra to ensure we're within bounds when we interpolate later.
full_pred = public_data.fetch_cdc_forecasts([('COVIDhub-ensemble', '2021-05-10'),
('COVIDhub-baseline', '2021-05-10')],
end_date=c.time.values[-1] + np.timedelta64(15, 'D'),
num_samples=50)
full_gt = public_data.fetch_opencovid_incidence()
# Suppose we only have ground truth through 2021-05-09.
full_gt = full_gt.sel(time=slice(None, np.datetime64('2021-05-09')))
# Include more historical incidence here for context. It will be trimmed off when
# we construct scenarios to simulate. The funny backwards range is to ensure that if
# we use weekly instead of daily resolution, we use the same day of the week as c.
time = np.arange(c.time.values[-1], np.datetime64('2021-04-01'), -time_resolution)[::-1]
incidence_model = public_data.assemble_forecast(full_gt, full_pred, site_df, time)
locs = np.random.choice(c.location.values, size=5, replace=False)
incidence_model.sel(location=locs).plot.line(x='time', color='k', alpha=.1, add_legend=False, col='location', row='model')
plt.ylim(0.0, 1e-3)
plt.suptitle('Forecast incidence at a sampling of sites', y=1.0)
pass
# incidence_flattened: rolls together all the models you've included in your ensemble, treating them as independent samples.
incidence_flattened = sim_scenarios.get_incidence_flattened(incidence_model, c)
# incidence_scenarios: chooses scenarios given the incidence curves and your chosen method of scenario-generation.
incidence_scenarios = sim_scenarios.generate_scenarios_independently(incidence_flattened, num_scenarios=100)
# compute the number of participants recruited under your trial rule
participants = sim.recruitment(c)
# compute the number of control arm events under your trial rules and incidence_scenarios.
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
# plot events and label different vaccine efficacies
plot_events(events)
# plot histograms of time to success
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_data
bsst_io.write_ville_to_netcdf(c, 'demo_data/site_list1_all_site_on.nc')
%time optimization.optimize_static_activation(c, incidence_scenarios)
all_sites = c.location.values
activated_sites = c.location.values[c.site_activation.mean('time') == 1]
# Simulate the results with this activation scheme.
print(f'\n\n{len(activated_sites)} of {len(all_sites)} activated')
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
df = (participants.sum(['location', 'time', 'comorbidity']) / participants.sum()).to_pandas()
display(df.style.set_caption('Proportion of participants by age and ethnicity'))
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_data
bsst_io.write_ville_to_netcdf(c, 'demo_data/site_list1_optimized_static.nc')
def loss_fn(c):
# sum over location, time, comorbidity
# remaining dimensions are [age, ethnicity]
participants = c.participants.sum(axis=0).sum(axis=0).sum(axis=-1)
total_participants = participants.sum()
return (
optimization.negative_mean_successiness(c) # demonstrate efficacy fast
+ 0.2 * c.site_activation.mean() # turning on sites is costly
- 0.5 * participants[1:, :].sum() / total_participants # we want people over 60
- 0.5 * participants[:, 1:].sum() / total_participants # we want blacks and hispanics
)
%time optimization.optimize_static_activation(c, incidence_scenarios, loss_fn)
all_sites = c.location.values
activated_sites = c.location.values[c.site_activation.mean('time') == 1]
# Simulate the results with this activation scheme.
print(f'\n\n{len(activated_sites)} of {len(all_sites)} activated')
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
df = (participants.sum(['location', 'time', 'comorbidity']) / participants.sum()).to_pandas()
display(df.style.set_caption('Proportion of participants by age and ethnicity'))
# We put all sites in on group. We also support prioritizing sites within groupings.
# For example, if you can activate 2 sites per state per week, sites would be grouped
# according to the state they're in.
site_to_group = pd.Series(['all_sites'] * len(site_df), index=site_df.index)
decision_dates = c.time.values[:70:7]
allowed_activations = pd.DataFrame([[20] * len(decision_dates)], index=['all_sites'], columns=decision_dates)
parameterizer = optimization.PivotTableActivation(c, site_to_group, allowed_activations, can_deactivate=False)
optimization.optimize_params(c, incidence_scenarios, parameterizer)
c['site_activation'] = c.site_activation.round() # each site has to be on or off at each time
df = c.site_activation.to_pandas()
df.columns = [pd.to_datetime(x).date() for x in df.columns]
sns.heatmap(df, cbar=False)
plt.title('Which sites are activated when')
plt.show()
participants = sim.recruitment(c)
events = sim.control_arm_events(c, participants, incidence_scenarios)
plot_participants(participants)
plot_events(events)
plot_success(c, events)
sim.add_stuff_to_ville(c, incidence_model, site_df, num_scenarios=100)
!mkdir -p demo_data
bsst_io.write_ville_to_netcdf(c, 'demo_data/site_list1_prioritized.nc')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>II. Preprocessing </h3>
Step2: The SVM is sensitive to feature scale so the first step is to center and normalize the data. The train and test sets are scaled separately using the mean and variance computed from the training data. This is done to estimate the ability of the model to generalize.
Step3: <h3>III. Undersampling </h3>
Step4: <h4> Cross-validation </h4>
Step5: <h5>CV Results</h5>
Step6: Based on initial experiments and the results of the CV above, undersampling with higher ratios like 0.8, 0.9, and 1.0 give higher MCC and True Positive values than lower ratios.
Step7: If an overall accuracy of 0.7 is acceptable, with random undersampling, we can get a TPR and MCC in the 0.71 and 0.22 range. These are the highest values obtained so far from experiments with sampling and cost-sensitive learning.
Step8: <h4>2. Neighborhood Cleaning Rule</h4>
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split as tts
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NeighbourhoodCleaningRule
from imblearn.under_sampling import TomekLinks
from imblearn.pipeline import Pipeline as ImbPipe
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import confusion_matrix, matthews_corrcoef,\
accuracy_score, classification_report
from collections import Counter
from time import time
from __future__ import division
import warnings
warnings.filterwarnings("ignore")
# load the data
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom.data"
secom = pd.read_table(url, header=None, delim_whitespace=True)
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/secom/secom_labels.data"
y = pd.read_table(url, header=None, usecols=[0], squeeze=True, delim_whitespace=True)
print 'The dataset has {} observations/rows and {} variables/columns.' \
.format(secom.shape[0], secom.shape[1])
print 'The ratio of majority class to minority class is {}:1.' \
.format(int(y[y == -1].size/y[y == 1].size))
# dropping columns which have large number of missing entries
m = map(lambda x: sum(secom[x].isnull()), xrange(secom.shape[1]))
m_200thresh = filter(lambda i: (m[i] > 200), xrange(secom.shape[1]))
secom_drop_200thresh = secom.dropna(subset=[m_200thresh], axis=1)
dropthese = [x for x in secom_drop_200thresh.columns.values if \
secom_drop_200thresh[x].std() == 0]
secom_drop_200thresh.drop(dropthese, axis=1, inplace=True)
print 'The SECOM data set now has {} variables.'\
.format(secom_drop_200thresh.shape[1])
# imputing missing values for the random forest
imp = Imputer(missing_values='NaN', strategy='median', axis=0)
secom_imp = pd.DataFrame(imp.fit_transform(secom_drop_200thresh))
# use Random Forest to assess variable importance
rf = RandomForestClassifier(n_estimators=100, random_state=7)
rf.fit(secom_imp, y)
# sorting features according to their rank
importance = rf.feature_importances_
ranked_indices = np.argsort(importance)[::-1]
# split data into train and holdout sets
# stratify the sample used for modeling to preserve the class proportions
#X_train, X_test, y_train, y_test = tts(secom_imp[ranked_indices[:40]], y, \
X_train, X_test, y_train, y_test = tts(secom_imp[ranked_indices], y, \
test_size=0.2, stratify=y, random_state=5)
print 'Train data for each class: {} '\
.format(Counter(y_train))
print 'The maj/min class ratio is: {0:2.0f}' \
.format(round(y_train[y_train == -1].size/y_train[y_train == 1].size))
print 'Test data for each class: {} '\
.format(Counter(y_test))
print 'The maj/min class ratio for the holdout set is: {0:2.0f}' \
.format(round(y_test[y_test == -1].size/y_test[y_test == 1].size))
# scaling the split data. The holdout data uses scaling parameters
# computed from the training data
standard_scaler = StandardScaler()
X_train_scaled = pd.DataFrame(standard_scaler.fit_transform(X_train), \
index=X_train.index)
X_test_scaled = pd.DataFrame(standard_scaler.transform(X_test))
# Note: we convert to a DataFrame because the plot functions
# we will use need DataFrame inputs.
# undersampling numbers before/after
print 'Original dataset distribution: {}'.format(Counter(y_train))
ratio = 0.8
rus = RandomUnderSampler(ratio=ratio, random_state=7)
X_res, y_res = rus.fit_sample(X_train_scaled, y_train)
print 'Resampled dataset distribution: {}'.format(Counter(y_res))
# plotting majority class distribution after undersampling
# displaying column 4
from IPython.html.widgets import interact
@interact(ratio=[0.1,1.0])
def plot_dist(ratio):
sns.set(style="white", font_scale=1.3)
fig, ax = plt.subplots(figsize=(7,5))
rus = RandomUnderSampler(ratio=ratio, random_state=7)
X_res, y_res = rus.fit_sample(X_train_scaled, y_train)
X_res_df = pd.DataFrame(X_res)
ax = sns.distplot(X_train_scaled[4][y_train == -1], color='darkorange', \
kde=False, label='before')
ax = sns.distplot(X_res_df[4][y_res == -1], color='b', \
kde=False, label='after')
ax.set_ylim([0, 180])
ax.set(xlabel='')
ax.legend(title='Ratio = {}'.format(ratio))
plt.title('Majority class distribution before and after undersampling')
plt.show()
# defining the MCC metric to assess cross-validation
def tpr_score(y_true, y_pred):
tprate = float(cm[1][1])/np.sum(cm[1])
return tprate
def mcc_score(y_true, y_pred):
mcc = matthews_corrcoef(y_true, y_pred)
return mcc
mcc_scorer = make_scorer(mcc_score, greater_is_better=True)
tpr_scorer = make_scorer(tpr_score, greater_is_better=True)
# print classification results
def test_results(y_test, y_predicted):
print '\nThe accuracy is: {0:4.2} ' \
.format(accuracy_score(y_test, y_predicted))
print '\nThe confusion matrix: '
cm = confusion_matrix(y_test, y_predicted)
print cm
print '\nThe True Positive rate is: {0:4.2}' \
.format(float(cm[1][1])/np.sum(cm[1]))
print '\nThe Matthews correlation coefficient: {0:4.2f} \n' \
.format(matthews_corrcoef(y_test, y_predicted))
print(classification_report(y_test, y_predicted))
# grid search cross-validation function
def sampling_gridcv(samp_method, nfeatures):
X_train_ = X_train_scaled.iloc[:,:nfeatures]
X_test_ = X_test_scaled.iloc[:,:nfeatures]
add_parameters = dict()
if samp_method == 'rus':
sampling = RandomUnderSampler(random_state=7)
#[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
add_parameters = dict(samp__ratio = np.arange(1,11)*0.1)
elif samp_method == 't1':
sampling = TomekLinks(random_state=7)
elif samp_method == 'ncr':
sampling = NeighbourhoodCleaningRule(random_state=7)
estimators = [('samp', sampling),
('clf', SVC(probability=True, random_state=7))]
parameters = dict(clf__C =[1, 10, 50, 100, 200],
clf__gamma=[.04, .05, .06, .07])
parameters.update(add_parameters)
pipe = ImbPipe(estimators)
print pipe
# stratified K-fold cross-validation
cv = GridSearchCV(pipe, param_grid = parameters, cv =5, scoring=mcc_scorer)
start = time()
cv.fit(X_train_, y_train)
print '\nGridSearchCV took {} seconds for {} candidate parameter settings.'\
.format(time() - start, len(cv.grid_scores_))
y_predicted = cv.predict(X_test_)
#probas_ = cv.predict_proba(X_test_)
print '\nThe best CV parameters are: {}' .format(cv.best_params_)
# print test results using best parameters
test_results(y_test, y_predicted)
# random undersampling with 40 features
sampling_gridcv('rus', 40)
sampling_gridcv('rus', 100)
sampling_gridcv('rus', 140)
# Second CV (using best CV parameters from previous run)
ratio = 1.0
X_train_ = X_train_scaled.iloc[:,:40]
X_test_ = X_test_scaled.iloc[:,:40]
rus = RandomUnderSampler(ratio=ratio, random_state=7)
X_res, y_res = rus.fit_sample(X_train_, y_train)
clf = SVC(random_state=7)
param_grid = {"C": [1, 5, 7, 10],
"gamma": [0.04, 0.05, 0.06]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5, scoring=mcc_scorer)
start = time()
grid_search.fit(X_res, y_res)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
print '\nThe best CV parameters are: {}' .format(grid_search.best_params_)
# using model with best parameters on test set
y_predicted = grid_search.predict(X_test_)
test_results(y_test, y_predicted)
# Number of elements before/after TL
print 'Original dataset distribution: {}'.format(Counter(y_train))
tl = TomekLinks(random_state=7)
X_res, y_res = tl.fit_sample(X_train_scaled, y_train)
print 'Resampled dataset distribution: {}'.format(Counter(y_res))
sampling_gridcv('t1', 40)
# Number of elements before/after NCL
print 'Original dataset distribution: {}'.format(Counter(y_train))
ncr = NeighbourhoodCleaningRule(random_state=7)
X_res, y_res = ncr.fit_sample(X_train_scaled, y_train)
print 'Resampled dataset distribution: {}'.format(Counter(y_res))
sampling_gridcv('ncr', 40)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph Config
Step2: Initial Data, Graph and Update
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import display, HTML
from string import Template
import pandas as pd
import json, random
HTML('<script src="lib/d3/d3.min.js"></script>')
html_template = Template('''
<style> $css_text </style>
<div id="graph-div"></div>
<script> $js_text </script>
''')
css_text = '''
.bar {
fill: steelblue;
}
.bar:hover {
fill: brown;
}
.axis {
font: 10px sans-serif;
}
.axis path,
.axis line {
fill: none;
stroke: #000;
shape-rendering: crispEdges;
}
.x.axis path {
display: none;
}
'''
js_text_template = Template('''
var margin = {top: 20, right: 20, bottom: 30, left: 40},
width = 500 - margin.left - margin.right,
height = 300 - margin.top - margin.bottom;
var x = d3.scale.ordinal()
.rangeRoundBands([0, width], .1);
var y = d3.scale.linear()
.range([height, 0]);
var xAxis = d3.svg.axis()
.scale(x)
.orient("bottom");
var yAxis = d3.svg.axis()
.scale(y)
.orient("left");
var svg = d3.select("#graph-div").append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
var data = $data ;
x.domain(data.map(function(d) { return d.letter; }));
y.domain([0, d3.max(data, function(d) { return d.y; })]);
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(xAxis);
svg.append("g")
.attr("class", "y axis")
.call(yAxis);
svg.selectAll(".bar")
.data(data)
.enter().append("rect")
.attr("class", "bar")
.attr("x", function(d) { return x(d.letter); })
.attr("width", x.rangeBand())
.attr("y", function(d) { return y(d.y); })
.attr("height", function(d) { return height - y(d.y); });
''')
js_text_template_2 = Template('''
var bars = svg.selectAll(".bar").data($data);
bars
.transition()
.attr("y", function(d) { return y(d.y); })
.attr("height", function(d) { return height - y(d.y); });
''')
data = pd.DataFrame({'letter': ['A','B','C','D'], 'y': [1,1,1,1]})
data.head()
js_text = js_text_template.substitute({'data': json.dumps(data.to_dict(orient='records'))})
HTML(html_template.substitute({'css_text': css_text, 'js_text': js_text}))
data['y'] = [random.uniform(0,1) for d in data['y']]
js_text = js_text_template_2.substitute({'data': json.dumps(data.to_dict(orient='records'))})
HTML('<script>' + js_text + '</script>')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining convergence diagnostics
Step2: Process samples
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def tracePlot(chains, labels=None, truths=None):
n_dim = chains.shape[2]
fig, ax = plt.subplots(n_dim, 1, figsize=(8., 27.), sharex=True)
ax[-1].set_xlabel('Iteration', fontsize=20.)
for i in range(len(ax)):
try:
ax[i].set_ylabel(labels[i], fontsize=20.)
except IndexError:
pass
ax[i].tick_params(which='major', axis='both', length=10., labelsize=16.)
for j in range(len(chains)):
try:
ax[i].plot([0, len(chains[j,:,i])+10], [truths[i], truths[i]], '-', lw=4, dashes=(20., 10.),
c='#B22222')
except:
pass
ax[i].plot(chains[j,:,i], '-', lw=1, c='#0473B3', alpha=0.5)
fig.tight_layout()
def GelmanRubin(chains, labels=None):
n_chains = chains.shape[0]
n_iter = chains.shape[1]/2
n_params = chains.shape[2]
# take last n samples if total was 2n
sample = chains[:,-n_iter:,:]
# compute mean of intra-chain (within) variances
W = np.mean(np.var(sample, axis=1), axis=0)
# compute mean of inter-chain (between) variances
chain_means = np.mean(sample, axis=1)
mean_of_chain_means = np.mean(chain_means, axis=0)
B = np.empty(n_params)
for i in range(n_params):
B[i] = np.sum((chain_means[:, i] - mean_of_chain_means[i])**2)*n_iter/(n_chains - 1.)
# estimated variance (likely an over-estimate)
Sigma_hat_2 = ((n_iter - 1.)*W + B)/n_iter
# pooled posterior variance
Var_hat = Sigma_hat_2 + B/(n_chains*n_iter)
# correction for degree of freedom
# compute potential scale reduction factor
PSRF = np.sqrt(Var_hat/W)
return W, B, Var_hat, PSRF
# test with Gl 876, the largest number of iterations
flatchain = np.genfromtxt('/Users/grefe950/Software/StarBay/interbay/chains/run06/GJ876_W0300_N0600_B0000.dat')
chains = flatchain.reshape(300, -1, 9)
labels=['Mass', '[Fe/H]', 'Y', 'log(Age)', 'Distance', 'alpha', 'log(Teff)', 'log(Fbol)', 'theta']
truths = [np.nan, 0.17, np.nan, np.nan, 1./0.21328, np.nan, np.log10(3189.), np.log10(1.9156e-8), 0.746]
tracePlot(chains, labels=labels, truths=truths)
GelmanRubin(chains, labels=labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to Regular Expressions
Step2: Now that we have a compiled regular expression, we can see if the pattern matches another string.
Step3: In the case above we found a match because 'Hello' is part of 'Hello World'.
Step4: So the match only works if the pattern matches the start of the other string. What if the case is different?
Step5: Doesn't work. By default, the match is case sensitive.
Step6: From what we have seen so far, matching with a string literal is pretty much functionally equivalent to the Python startswith(...) method that already comes as part of the String class.
Step7: Well, that isn't too exciting. But it does provide us with an opportunity for a valuable lesson
Step8: Zero or More
Step9: One or None
Step10: M
Step11: M or More
Step12: M through N
Step13: N or Fewer
Step14: Though we have illustrated these repetition operations on single characters, they actually apply to more complex combinations of characters, as we'll see soon.
Step15: Character sets can be bound to any of the repetition symbols that we have already seen. For example, if we wanted to match words that start with at least two vowels we could use the character set below.
Step16: Character sets can also be negated. Simply put a ^ symbol at the start of the character set.
Step17: Character Classes
Step18: These classes can have repetitions after them, just like character sets.
Step19: There are many common character classes.
Step20: You can mix these classes with repetitions.
Step21: But what if you want to find everything that isn't a digit? Or everything that isn't a space?
Step22: Placement
Step23: Though it might seem rather bland at first, the dot can be really useful when combined with repetition symbols.
Step24: As you can see, using the dot allows us to move past the start of the string we want to match and instead search deeper inside the target string.
Step25: Ending Anchor
Step26: Grouping
Step27: Grouping can also be done on a single item.
Step28: But why would you ever group a single item? It turns out that grouping is 'capture grouping' by default and allows you to extract items from a string.
Step29: In the case above, the entire string is considered group 0 because it matched the expression, but then the string 'dog' is group 1 because it was 'captured' by the parenthesis.
Step30: And capture groups can contain multiple values
Step31: Grouping can get even richer. For example
Step32: So far, we have compiled all of our regular expressions before using them. It turns out that many of the regular expression methods can accept a string and will compile that string for you.
Step33: sub is compiling the string "(cat|mouse)" into a pattern and then applying it to the input string.
Step34: You'll notice that the regular string containing \t printed a tab character. The raw string printed a literal \t. Likewise the regular string printed \ while the raw string printed \\.
Step35: Exercise 2
Step36: Exercise 3
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
pattern = re.compile('Hello')
type(pattern)
if pattern.match('Hello World'):
print("We found a match")
else:
print("No match found")
if pattern.match('I said Hello World'):
print("We found a match")
else:
print("No match found")
if pattern.match('HELLO'):
print("We found a match")
else:
print("No match found")
if pattern.match('He'):
print("We found a match")
else:
print("No match found")
if "Hello World".startswith("Hello"):
print("We found a match")
else:
print("No match found")
pattern = re.compile("ab+c")
for string in (
'abc',
'abbbbbbbc',
'ac',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab*c")
for string in (
'abc',
'abbbbbbbc',
'ac',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab?c")
for string in (
'abc',
'abbbbbbbc',
'ac',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab{7}c")
for string in (
'abc',
'abbbbbbc',
'abbbbbbbc',
'abbbbbbbbc',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab{2,}c")
for string in (
'abc',
'abbc',
'abbbbbbbbbbbbbbbbbbbbbbbbbbbbbbc',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab{4,6}c")
for string in (
'abbbc',
'abbbbc',
'abbbbbc',
'abbbbbbc',
'abbbbbbbc',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile("ab{,4}c")
for string in (
'abbbbbc',
'abbbbc',
'abbbc',
'abbc',
'abc',
'ac',
'a',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('[aeiou]')
for string in (
'a',
'e',
'i',
'o',
'u',
'x',
'ax',
'ex',
'ix',
'ox',
'ux',
'xa',
'xe',
'xi',
'xo',
'xu',
'xx',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('[aeiou]{2,}')
for string in (
'aardvark',
'earth',
'eat',
'oar',
'aioli',
'ute',
'absolutely',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('[^aeiou]')
for string in (
'aardvark',
'earth',
'ice',
'oar',
'ukulele',
'bathtub',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('\d')
for string in (
'abc',
'123',
'1a2b',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('\d{4,}')
for string in (
'a',
'123',
'1234',
'12345',
'1234a',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('\w\s\d')
for string in (
'a',
'1 3',
'_ 4',
'w 5',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('\d+\s\w+')
for string in (
'a',
'16 Candles',
'47 Hats',
'Number 5',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
print("Not a digit")
pattern = re.compile('\D')
for string in (
'a',
'1',
' ',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
print("\n")
print("Not a space")
pattern = re.compile('\S')
for string in (
'a',
'1',
' ',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
print("\n")
print("Not a word")
pattern = re.compile('\W')
for string in (
'a',
'1',
' ',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.')
for string in (
'a',
' ',
'4',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.*s')
for string in (
'as',
' oh no bees',
'does this match',
'maybe',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('^a.*s')
for string in (
'as',
'not as',
'a string that matches',
'a fancy string that matches',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.*s$')
for string in (
'as',
'beees',
'sa',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.*(cat|dog)')
for string in (
'cat',
'dog',
'fat cat',
'lazy dog',
'hog',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.*(dog)')
for string in (
'cat',
'dog',
'fat cat',
'lazy dog',
'hog',
):
print("'{}'".format(string), end=' ')
print('matches' if pattern.match(string) else 'does not match')
pattern = re.compile('.*(dog)')
match = pattern.match("hot diggity dog")
if match:
print(match.group(0))
print(match.group(1))
pattern = re.compile('.*(dog).*(cat)')
match = pattern.match("hot diggity dog barked at a scared cat")
if match:
print(match.group(0))
print(match.group(1))
print(match.group(2))
pattern = re.compile('.*(dog).*(mouse|cat)')
match = pattern.match("hot diggity dog barked at a scared cat")
if match:
print(match.group(0))
print(match.group(1))
print(match.group(2))
pattern = re.compile('(cat|mouse)')
re.sub(pattern, 'whale', 'The dog is afraid of the mouse')
re.sub('(cat|mouse)', 'whale', 'The dog is afraid of the mouse')
print('\tHello')
print(r'\tHello')
print('\\')
print(r'\\')
test_data = [
'apple',
'banana',
'grapefruit',
'apricot',
'orange'
]
# Create a pattern here
for test in test_data:
pass # Your pattern match goes here
test_data = [
'zoo',
'ZOO',
'bazooka',
'ZOOLANDER',
'kaZoo',
'ZooTopia',
'ZOOT Suit',
]
# Create a pattern here
for test in test_data:
pass # Your pattern match goes here
test_data = [
'sing',
'talking',
'SCREAMING',
'NeVeReNdInG',
'ingeron',
]
# Create a pattern here
for test in test_data:
pass # Your pattern match goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: also rest client possible
Step2: Set up a data collection graph
Step3: Data servers graph setup
Step4: Combine data set graph with server graph
Step5: Data servers provide different bandwidth to different regions / countries and end users belong to different regions (temporarily)
Step6: Data servers are sometimes down (not operational and thus do not serve data to users)
Step8: Interactive cells to play with graph
Step9: return operational servers for a specific file
Step10: switch off a server and rerun query
Step11: Simple cells to clean graphdb
Step12: simple graph visualizations
|
<ASSISTANT_TASK:>
Python Code:
import ENESNeoTools
from py2neo import Graph, Node, Relationship, authenticate
authenticate("localhost:7474", ENESNeoTools.user_name, ENESNeoTools.pass_word)
# connect to authenticated graph database
graph = Graph("http://localhost:7474/db/data/")
from neo4jrestclient.client import GraphDatabase
from neo4jrestclient.query import Q
gdb = GraphDatabase("http://localhost:7474/db/data/",username="neo4j",password="prolog16")
# collection organization reflects directory structure:
# e.g. cordex/output/EUR-11/MPI-CSC/MPI-M-MPI-ESM-LR/rcp85/r1i1p1/MPI-CSC-REMO2009/v1/day/tas
# generic structure: <activity>/<product>/<Domain>/<Institution>/<GCMModelName>/<CMIP5ExperimentName>
# /<CMIP5EnsembleMember>/<RCMModelName>/<RCMVersionID>/<Frequency>/<VariableName>.
# facets describing collection
facet_nodes = []
for key, value in ENESNeoTools.facet_list1.iteritems():
facet_node = Node("Collection",name=value[1], level=value[0])
facet_nodes.append(facet_node)
facet_chain = []
for i in range(1,len(facet_nodes)):
rel = Relationship(facet_nodes[i],"belongs_to",facet_nodes[i-1])
facet_chain.append(rel)
for rel in facet_chain:
graph.create(rel)
cordex_file_set1 = ENESNeoTools.get_files(ENESNeoTools.facet_list1)
#cordex_set1 = []
cordex_rel1 = []
for cordexfile in cordex_file_set1:
node = Node("File", name=cordexfile, group="file")
# cordex_set1.append(node)
cordex_rel1.append(Relationship(node,"belongs_to",facet_nodes[0]))
for rel in cordex_rel1:
graph.create(rel)
server_list = ENESNeoTools.get_servers()
service_rels = []
server_nodes = []
for (sname, surl) in server_list:
new_node = Node('data_server',name=sname, url=surl)
server_nodes.append(new_node)
data_services = ENESNeoTools.data_service_nodes(sname)
for data_service in data_services:
service_rels.append(Relationship(data_service,"service",new_node))
for rel in service_rels:
graph.create(rel)
orig1 = Relationship(facet_nodes[1],"served_by",server_nodes[0])
replica1 = Relationship(facet_nodes[1],"served_by",server_nodes[1])
graph.create(orig1)
graph.create(replica1)
region_germany = Node("country", name="Germany", provider="DFN")
region_australia = Node("country", name="Australia", provider="RNet")
region_sweden = Node("country", name="Sweden", provider="SweNet")
user1 = Node("user",name="Stephan Kindermann")
user2 = Node("user",name="Mr Spock")
user3 = Node("user",name="Michael Kolax")
home1 = Relationship(user1,"connects_to",region_germany)
home2 = Relationship(user2,"connects_to",region_australia)
home3 = Relationship(user3,"connects_to",region_sweden)
link1 = Relationship(server_nodes[0],"nw_link",region_germany, bandwidth=2000000)
link2 = Relationship(server_nodes[0],"nw_link",region_sweden, bandwidth=1000000)
link3 = Relationship(server_nodes[0],"nw_link",region_australia,bandwidth=500000)
link4 = Relationship(server_nodes[1],"nw_link",region_germany, bandwidth=1500000)
link5 = Relationship(server_nodes[1],"nw_link",region_sweden, bandwidth=3000000)
link6 = Relationship(server_nodes[1],"nw_link",region_australia, bandwidth=400000)
graph.create(link1,link2,link3,link4,link5,link6)
server_nodes[0].properties["status"] = "UP"
server_nodes[1].properties["status"] = "UP"
server_nodes[0].push()
server_nodes[1].push()
server_nodes[0].properties
from IPython.display import HTML
HTML('<iframe src=http://localhost:7474/browser/ width=1000 height=800> </iframe>')
%load_ext cypher
statement = MATCH (myfile:File {name:"tas_EUR-11_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_MPI-CSC-REMO2009_v1_day_20660101-20701231.nc"}) RETURN myfile
results = graph.cypher.execute(statement)
results
results = %cypher http://neo4j:prolog16@localhost:7474/db/data MATCH (myfile:File {name:"tas_EUR-11_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_MPI-CSC-REMO2009_v1_day_20660101-20701231.nc"}) RETURN myfile
results.get_dataframe()
graph.open_browser()
%%cypher http://neo4j:prolog16@localhost:7474/db/data
MATCH (a:File)-[:belongs_to*]-(b:Collection) -[:served_by]- (c:data_server)
WHERE c.status = 'UP' AND a.name = 'tas_EUR-11_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_MPI-CSC-REMO2009_v1_day_20760101-20801231.nc'
RETURN c
server_nodes[1].properties["status"] = "DOWN"
server_nodes[1].push()
%%cypher http://neo4j:prolog16@localhost:7474/db/data
MATCH (a:File)-[:belongs_to*]-(b:Collection) -[:served_by]- (c:data_server)
WHERE c.status = 'UP' AND a.name = 'tas_EUR-11_MPI-M-MPI-ESM-LR_rcp85_r1i1p1_MPI-CSC-REMO2009_v1_day_20760101-20801231.nc'
RETURN c
results = %cypher http://neo4j:prolog16@localhost:7474/db/data MATCH (a)-[r]-(b) RETURN a,r, b
%%bash
ls
%%cypher http://neo4j:prolog16@localhost:7474/db/data
MATCH (n)
OPTIONAL MATCH (n)-[r]-()
DELETE n,r
graph.delete_all()
%matplotlib inline
results.get_graph()
results.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This chapter introduces the Poisson process, which is a model used to describe events that occur at random intervals.
Step2: The result is an object that represents a "frozen" random variable and provides pmf, which evaluates the probability mass function of the Poisson distribution.
Step4: This result implies that if the average goal-scoring rate is 1.4 goals per game, the probability of scoring 4 goals in a game is about 4%.
Step5: make_poisson_pmf takes as parameters the goal-scoring rate, lam, and an array of quantities, qs, where it should evaluate the Poisson PMF. It returns a Pmf object.
Step6: And here's what it looks like.
Step7: The most likely outcomes are 0, 1, and 2; higher values are possible but increasingly unlikely.
Step8: The parameter, alpha, is the mean of the distribution.
Step9: The result is a discrete approximation of a gamma distribution.
Step10: This distribution represents our prior knowledge about goal scoring
Step11: As usual, reasonable people could disagree about the details of the prior, but this is good enough to get started. Let's do an update.
Step12: Now suppose we are have an array of possible values for $\lambda$; we can compute the likelihood of the data for each hypothetical value of lam, like this
Step14: And that's all we need to do the update.
Step15: The first parameter is the prior; the second is the number of goals.
Step16: Here's what the posterior distribution looks like, along with the prior.
Step17: The data, k=4, makes us think higher values of lam are more likely and lower values are less likely. So the posterior distribution is shifted to the right.
Step18: And here are the results.
Step19: Here are the posterior means for these distributions.
Step21: The mean of the prior distribution is about 1.4.
Step22: This is similar to the method we use in <<_Addends>> to compute the distribution of a sum.
Step23: Pmf provides a function that does the same thing.
Step24: The results are slightly different because Pmf.prob_gt uses array operators rather than for loops.
Step25: The following figure shows what these distributions look like for a few values of lam.
Step26: The predictive distribution is a mixture of these Pmf objects, weighted with the posterior probabilities.
Step27: Here's the predictive distribution for the number of goals France would score in a rematch.
Step28: This distribution represents two sources of uncertainty
Step29: We can use these distributions to compute the probability that France wins, loses, or ties the rematch.
Step30: Assuming that France wins half of the ties, their chance of winning the rematch is about 65%.
Step32: This is a bit lower than their probability of superiority, which is 75%. And that makes sense, because we are less certain about the outcome of a single game than we are about the goal-scoring rates.
Step33: To see what the exponential distribution looks like, let's assume again that lam is 1.4; we can compute the distribution of $t$ like this
Step34: And here's what it looks like
Step36: It is counterintuitive, but true, that the most likely time to score a goal is immediately. After that, the probability of each successive interval is a little lower.
Step38: Exercise
Step39: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py')
from utils import set_pyplot_params
set_pyplot_params()
from scipy.stats import poisson
lam = 1.4
dist = poisson(lam)
type(dist)
k = 4
dist.pmf(k)
from empiricaldist import Pmf
def make_poisson_pmf(lam, qs):
Make a Pmf of a Poisson distribution.
ps = poisson(lam).pmf(qs)
pmf = Pmf(ps, qs)
pmf.normalize()
return pmf
import numpy as np
lam = 1.4
goals = np.arange(10)
pmf_goals = make_poisson_pmf(lam, goals)
from utils import decorate
def decorate_goals(title=''):
decorate(xlabel='Number of goals',
ylabel='PMF',
title=title)
pmf_goals.bar(label=r'Poisson distribution with $\lambda=1.4$')
decorate_goals('Distribution of goals scored')
from scipy.stats import gamma
alpha = 1.4
qs = np.linspace(0, 10, 101)
ps = gamma(alpha).pdf(qs)
from empiricaldist import Pmf
prior = Pmf(ps, qs)
prior.normalize()
def decorate_rate(title=''):
decorate(xlabel='Goal scoring rate (lam)',
ylabel='PMF',
title=title)
prior.plot(ls='--', label='prior', color='C5')
decorate_rate(r'Prior distribution of $\lambda$')
prior.mean()
lam = 1.4
k = 4
poisson(lam).pmf(4)
lams = prior.qs
k = 4
likelihood = poisson(lams).pmf(k)
def update_poisson(pmf, data):
Update Pmf with a Poisson likelihood.
k = data
lams = pmf.qs
likelihood = poisson(lams).pmf(k)
pmf *= likelihood
pmf.normalize()
france = prior.copy()
update_poisson(france, 4)
prior.plot(ls='--', label='prior', color='C5')
france.plot(label='France posterior', color='C3')
decorate_rate('Posterior distribution for France')
croatia = prior.copy()
update_poisson(croatia, 2)
prior.plot(ls='--', label='prior', color='C5')
croatia.plot(label='Croatia posterior', color='C0')
decorate_rate('Posterior distribution for Croatia')
print(croatia.mean(), france.mean())
def prob_gt(pmf1, pmf2):
Compute the probability of superiority.
total = 0
for q1, p1 in pmf1.items():
for q2, p2 in pmf2.items():
if q1 > q2:
total += p1 * p2
return total
prob_gt(france, croatia)
Pmf.prob_gt(france, croatia)
pmf_seq = [make_poisson_pmf(lam, goals)
for lam in prior.qs]
import matplotlib.pyplot as plt
for i, index in enumerate([10, 20, 30, 40]):
plt.subplot(2, 2, i+1)
lam = prior.qs[index]
pmf = pmf_seq[index]
pmf.bar(label=f'$\lambda$ = {lam}', color='C3')
decorate_goals()
from utils import make_mixture
pred_france = make_mixture(france, pmf_seq)
pred_france.bar(color='C3', label='France')
decorate_goals('Posterior predictive distribution')
pred_croatia = make_mixture(croatia, pmf_seq)
pred_croatia.bar(color='C0', label='Croatia')
decorate_goals('Posterior predictive distribution')
win = Pmf.prob_gt(pred_france, pred_croatia)
win
lose = Pmf.prob_lt(pred_france, pred_croatia)
lose
tie = Pmf.prob_eq(pred_france, pred_croatia)
tie
win + tie/2
def expo_pdf(t, lam):
Compute the PDF of the exponential distribution.
return lam * np.exp(-lam * t)
lam = 1.4
qs = np.linspace(0, 4, 101)
ps = expo_pdf(qs, lam)
pmf_time = Pmf(ps, qs)
pmf_time.normalize()
def decorate_time(title=''):
decorate(xlabel='Time between goals (games)',
ylabel='PMF',
title=title)
pmf_time.plot(label='exponential with $\lambda$ = 1.4')
decorate_time('Distribution of time between goals')
# Solution
# Here's a function that updates the distribution of lam
# with the given time between goals
def update_expo(pmf, data):
Update based on an observed interval
pmf: prior PMF
data: time between goals in minutes
t = data / 90
lams = pmf.qs
likelihood = expo_pdf(t, lams)
pmf *= likelihood
pmf.normalize()
# Solution
# Here are the updates for the first and second goals
germany = prior.copy()
update_expo(germany, 11)
germany2 = germany.copy()
update_expo(germany2, 12)
# Solution
# Here are the mean values of `lam` after each update
germany.mean(), germany2.mean()
# Solution
# Here's what the posterior distributions look like
prior.plot(ls='--', label='prior', color='C5')
germany.plot(color='C3', label='Posterior after 1 goal')
germany2.plot(color='C16', label='Posterior after 2 goals')
decorate_rate('Prior and posterior distributions')
# Solution
# Here's the predictive distribution for each possible value of `lam`
t = (90-23) / 90
pmf_seq = [make_poisson_pmf(lam*t, goals)
for lam in germany2.qs]
# Solution
# And here's the mixture of predictive distributions,
# weighted by the probabilities in the posterior distribution.
pred_germany2 = make_mixture(germany2, pmf_seq)
# Solution
# Here's what the predictive distribution looks like
pred_germany2.bar(color='C1', label='germany')
decorate_goals('Posterior predictive distribution')
# Solution
# Here's the probability of scoring exactly 5 more goals
pred_germany2[5]
# Solution
# And the probability of 5 or more
pred_germany2.prob_ge(5)
def make_expo_pmf(lam, high):
Make a PMF of an exponential distribution.
lam: event rate
high: upper bound on the interval `t`
returns: Pmf of the interval between events
qs = np.linspace(0, high, 101)
ps = expo_pdf(qs, lam)
pmf = Pmf(ps, qs)
pmf.normalize()
return pmf
# Solution
# Here are the predictive distributions for the
# time until the first goal
pmf_seq = [make_expo_pmf(lam, high=4) for lam in prior.qs]
# Solution
# And here are the mixtures based on the two posterior distributions
pred_france = make_mixture(france, pmf_seq)
pred_croatia = make_mixture(croatia, pmf_seq)
# Solution
# Here's what the posterior predictive distributions look like
pred_france.plot(label='France', color='C3')
pred_croatia.plot(label='Croatia', color='C0')
decorate_time('Posterior predictive distribution')
# Solution
# And here's the probability France scores first
Pmf.prob_lt(pred_france, pred_croatia)
# Solution
# When a team is winning or losing by an insurmountable margin,
# they might remove their best players from the game, which
# would affect their goal-scoring rate, violating the assumption
# that the goal scoring rate is constant.
# In this example, Boston won the third game 8-1, but scoring
# eight goals in a game might not reflect their true long-term
# goal-scoring rate.
# As a result, the analysis below might overestimate the chance
# that Boston wins.
# As it turned out, they did not.
# Solution
from scipy.stats import gamma
alpha = 2.8
qs = np.linspace(0, 15, 101)
ps = gamma.pdf(qs, alpha)
prior_hockey = Pmf(ps, qs)
prior_hockey.normalize()
# Solution
prior_hockey.plot(ls='--', color='C5')
decorate_rate('Prior distribution for hockey')
prior_hockey.mean()
# Solution
bruins = prior_hockey.copy()
for data in [0, 2, 8, 4]:
update_poisson(bruins, data)
bruins.mean()
# Solution
canucks = prior_hockey.copy()
for data in [1, 3, 1, 0]:
update_poisson(canucks, data)
canucks.mean()
# Solution
canucks.plot(label='Canucks')
bruins.plot(label='Bruins')
decorate_rate('Posterior distributions')
# Solution
goals = np.arange(15)
pmf_seq = [make_poisson_pmf(lam, goals) for lam in bruins.qs]
# Solution
pred_bruins = make_mixture(bruins, pmf_seq)
pred_bruins.bar(label='Bruins', color='C1')
decorate_goals('Posterior predictive distribution')
# Solution
pred_canucks = make_mixture(canucks, pmf_seq)
pred_canucks.bar(label='Canucks')
decorate_goals('Posterior predictive distribution')
# Solution
win = Pmf.prob_gt(pred_bruins, pred_canucks)
lose = Pmf.prob_lt(pred_bruins, pred_canucks)
tie = Pmf.prob_eq(pred_bruins, pred_canucks)
win, lose, tie
# Solution
# Assuming the Bruins win half of the ties,
# their chance of winning the next game is...
p = win + tie/2
p
# Solution
# Their chance of winning the series is their
# chance of winning k=2 or k=3 of the remaining
# n=3 games.
from scipy.stats import binom
n = 3
a = binom.pmf([2,3], n, p)
a.sum()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Steps of skill clustering
Step1: First, we try it on count matrix as the matrix is already avail.
Step2: There are various choices to initialize NMF including random and by SVD. We try random NMF, denoted as rnmf.
|
<ASSISTANT_TASK:>
Python Code:
import my_util as my_util
import cluster_skill_helpers as cluster_skill_helpers
from cluster_skill_helpers import *
import random as rd
HOME_DIR = 'd:/larc_projects/job_analytics/'
SKILL_DAT = HOME_DIR + 'data/clean/skill_cluster/'
SKILL_RES = HOME_DIR + 'results/' + 'skill_cluster/new/'
# Load count matrix
skill_df = pd.read_csv(SKILL_DAT + 'skill_index.csv')
skills = skill_df['skill']
doc_skill = mmread(SKILL_DAT + 'doc_skill.mtx')
ks = range(10, 60, 10)
rnmf = {k: NMF(n_components=k, random_state=0) for k in ks}
print( "Fitting NMF using random initialization..." )
print('No. of factors, Error, Running time')
rnmf_error = []
for k in ks:
t0 = time()
rnmf[k].fit(doc_skill)
elapsed = time() - t0
err = rnmf[k].reconstruction_err_
print('%d, %0.1f, %0.1fs' %(k, err, elapsed))
rnmf_error.append(err)
# end
# Save learned factor-skill matrices
nmf_dir = SKILL_RES + 'nmf/'
for k in ks:
fname = '{}factor_skill.csv'.format(k)
pd.DataFrame(rnmf[k].components_).to_csv(nmf_dir + fname, index=False)
print('saved {}factor-skill matrix'.format(k))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Note
Step3: CUSTOM_SERVICE_ACCOUNT - In the gcp console Click on the Navigation Menu and navigate to IAM & Admin, then to Service Accounts and use the service account starting with prefix - 'tfx-tuner-caip-service-account'. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please see the lab setup README for setup instructions.
Step4: Creating the TFX CLI builder
Step5: Build the image and push it to your project's Container Registry
Step7: Exercise
Step8: Hint
|
<ASSISTANT_TASK:>
Python Code:
import yaml
# Set `PATH` to include the directory containing TFX CLI.
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
%pip install --upgrade --user tfx==0.25.0
# Use the following command to identify the GCS bucket for metadata and pipeline storage.
!gsutil ls
# TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
GCP_REGION = "us-central1"
ARTIFACT_STORE_URI = (
"gs://qwiklabs-gcp-04-406b0039d298-kubeflowpipelines-default" # Change
)
CUSTOM_SERVICE_ACCOUNT = "tfx-tuner-caip-service-account@qwiklabs-gcp-04-406b0039d298.iam.gserviceaccount.com" # Change
ENDPOINT = (
"3b634710ada9ec03-dot-us-central1.pipelines.googleusercontent.com" # Change
)
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
!cat tfx-cli/Dockerfile
!cat tfx-cli/requirements.txt
IMAGE_NAME = "tfx-cli"
TAG = "latest"
IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
# TODO: Your gcloud command here to build tfx-cli and submit to Container Registry.
!gcloud builds submit --timeout=15m --tag {IMAGE_URI} tfx-cli
PIPELINE_NAME = "tfx_covertype_continuous_training"
MODEL_NAME = "tfx_covertype_classifier"
DATA_ROOT_URI = "gs://workshop-datasets/covertype/small"
TAG_NAME = "test"
TFX_IMAGE_NAME = "lab-tfx-cicd-image"
PIPELINE_FOLDER = "pipeline"
PIPELINE_DSL = "runner.py"
RUNTIME_VERSION = "2.3"
PYTHON_VERSION = "3.7"
USE_KFP_SA = "False"
ENABLE_TUNING = "True"
SUBSTITUTIONS =
_GCP_REGION={},\
_ARTIFACT_STORE_URI={},\
_CUSTOM_SERVICE_ACCOUNT={},\
_ENDPOINT={},\
_PIPELINE_NAME={},\
_MODEL_NAME={},\
_DATA_ROOT_URI={},\
_TFX_IMAGE_NAME={},\
TAG_NAME={},\
_PIPELINE_FOLDER={},\
_PIPELINE_DSL={},\
_RUNTIME_VERSION={},\
_PYTHON_VERSION={},\
_USE_KFP_SA={},\
_ENABLE_TUNING={},
.format(
GCP_REGION,
ARTIFACT_STORE_URI,
CUSTOM_SERVICE_ACCOUNT,
ENDPOINT,
PIPELINE_NAME,
MODEL_NAME,
DATA_ROOT_URI,
TFX_IMAGE_NAME,
TAG_NAME,
PIPELINE_FOLDER,
PIPELINE_DSL,
RUNTIME_VERSION,
PYTHON_VERSION,
USE_KFP_SA,
ENABLE_TUNING,
).strip()
# TODO: write gcloud builds submit command to trigger manual pipeline run.
!gcloud builds submit . --timeout=15m --config cloudbuild.yaml --substitutions {SUBSTITUTIONS}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We're using two params. t (for title) and Season. Change values as you wish
Step2: Now let's take a look at what data we have
Step3: Okay! As we can see, data are indexed by Episodes. What do we have about the first episode?
Step4: So now we know which attributes each episode has. imdbRating is quite interesting, let's work on that. If we want to find out the rating for the seventh episode, all we gotta do is
Step5: Since we want to analyze data statistically, let's create a list and keep all ratings grouped
Step6: What rating can be considered the mode for that season?
Step7: How many episodes have been rated like this?
|
<ASSISTANT_TASK:>
Python Code:
import json
import urllib.request as request
url = 'http://www.omdbapi.com/?t=Scandal&Season=3'
content = request.urlopen(url).read()
data = json.loads(content.decode('UTF-8'))
print(data)
print(data['Episodes'][0])
print(data['Episodes'][6]['imdbRating'])
ratings = []
for episode in data['Episodes']:
ratings.append(episode['imdbRating'])
print(ratings)
from scipy import stats
mode = stats.mode(ratings)
print(mode)
print(mode[0][0])
print(mode[1][0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tablice typu array mają wiele przydatnych wbudowanych metod.
Step2: Do tworzenia sekwencji liczbowych jako obiekty typu array należy wykorzystać funkcję arange.
Step3: Możemy również zmieniać kształt (reshape) wyjścia funkcji arange.
Step4: Podobną funkcją do arange jest linspace która wypełnia wektor określoną liczbą elementów z przedziału o równych automatycznie obliczonych odstępach (w arange sami musimy podać rozmiar kroku.)
Step5: Tak jak w przypadku wszystkich funkcji pythonowych możemy uzyskać dodatkowe informacje o funkcjach NumPy za pomocą polecenia help(nazwa.funkcji)
Step6: Tablice mogą sie składać z różnych typów danych (ale tylko jednego typu danych równocześnie, stąd jednorodność).
Step7: Kilka przydatnych funkcji do tworzenia określonych tablic
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
x = np.array([[1,2,3],[4,5,6],[7,8,9]])
print x
x.shape
x.sum(axis=0)
x.sum(axis=1)
x.mean(axis=0)
x.mean(axis=1)
np.arange(10)
np.arange(5,10)
np.arange(5,10,0.5)
x = np.arange(1,10).reshape(3,3)
x
x = np.linspace(0,5,5)
x
help(np.linspace)
x = np.array([1,2,3])
print x.dtype
x = np.array([0.1,0.2,0.3])
print x
print x.dtype
x = np.array([1,2,3],dtype='float64')
print x.dtype
x = np.zeros([3,4])
x
x = np.ones([3,4])
x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: RDKit Enumeration Toolkit
Step2: Sanitizing Reaction Blocks
Step3: Preprocessing Reaction Blocks
Step4: So now, this scaffold will only match the specified halogens and a primary amine. Let's get some!
Step5: Basic Usage
Step6: If you only want each reactant to match once ( and hence only produce one product per reactant set ) you can adjust the parameters
Step7: Enumerating the library
Step8: Understanding results of enumerations
Step9: Note
Step10: Let's play with this enumerator.
Step11: What this means is make the product from reagents[0][111] and reagents[1][130]
Step12: This also appears to be the last product. So lets' start over.
Step13: We can Skip to the 100th result
Step14: Let's advance by one here and see what happens. It's no surprise that for the CartesianProduct strategy the first index is increased by one.
Step15: Enumeration States
Step16: Now when we go back to this state, the next molecule should be the one we just saved.
Step17: Resetting the enumeration back to the beginning
Step18: Pickling Libraries
Step19: And the libraries are in lock step.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from rdkit.Chem import AllChem
from rdkit.Chem import rdChemReactions
from rdkit.Chem.AllChem import ReactionFromRxnBlock, ReactionToRxnBlock
from rdkit.Chem.Draw import IPythonConsole
IPythonConsole.ipython_useSVG=True
rxn_data = $RXN
ISIS 090220091539
2 1
$MOL
-ISIS- 09020915392D
2 1 1 0 0 0 0 0 0 0999 V2000
-2.0744 0.1939 0.0000 L 0 0 0 0 0 0 0 0 0 0 0 0
-2.5440 -0.1592 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 F 2 17 35
V 1 halogen
M RGP 1 2 1
M ALS 1 2 F Cl Br
M END
$MOL
-ISIS- 09020915392D
2 1 0 0 0 0 0 0 0 0999 V2000
2.8375 -0.2500 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
3.3463 0.0438 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
1 2 1 0 0 0 0
V 2 amine.primary
M RGP 1 1 2
M END
$MOL
-ISIS- 09020915392D
3 2 0 0 0 0 0 0 0 0999 V2000
13.5792 0.0292 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
14.0880 0.3229 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0
13.0704 0.3229 0.0000 R# 0 0 0 0 0 0 0 0 0 2 0 0
1 2 1 0 0 0 0
1 3 1 0 0 0 0
M RGP 2 2 1 3 2
M END
rxn = ReactionFromRxnBlock(rxn_data)
rxn
AllChem.SanitizeRxn(rxn)
rxn.Initialize()
nWarn, nError, nReactants, nProducts, labels = AllChem.PreprocessReaction(rxn)
print ("Number of warnings:", nWarn)
print ("Number of preprocessing errors:", nError)
print ("Number of reactants in reaction:", nReactants)
print ("Number of products in reaction:", nProducts)
print ("Preprocess labels added:", labels)
!wget http://www.sigmaaldrich.com/content/dam/sigma-aldrich/docs/Aldrich/General_Information/1/sdf-benzylic-primary-amines.sdf -O amines.sdf
!wget http://www.sigmaaldrich.com/content/dam/sigma-aldrich/docs/Aldrich/General_Information/1/sdf-alkyl-halides.sdf -O halides.sdf
reagents = [
[x for x in AllChem.SDMolSupplier("halides.sdf")],
[x for x in AllChem.SDMolSupplier("amines.sdf")]
]
print ("number of reagents per template:", [len(x) for x in reagents])
library = rdChemReactions.EnumerateLibrary(rxn, reagents)
params = rdChemReactions.EnumerationParams()
params.reagentMaxMatchCount = 1
library = rdChemReactions.EnumerateLibrary(rxn, reagents, params=params)
enumerator = library.GetEnumerator()
print (enumerator)
print ("Possible number of permutations:", enumerator.GetNumPermutations())
count = 0
totalMols = 0
for results in library:
for productSet in results:
for mol in productSet:
totalMols += 1
count += 1
print("Number of result sets", count)
print("Number of result molecules", totalMols)
import copy
enumerator = copy.copy(library.GetEnumerator())
print(enumerator)
test_enumerator = copy.copy(enumerator)
list(test_enumerator.GetPosition())
reagents[0][111]
reagents[1][130]
library = rdChemReactions.EnumerateLibrary(rxn, reagents, params=params)
test_enumerator = copy.copy(library.GetEnumerator())
list(test_enumerator.GetPosition())
test_enumerator.Skip(100)
pos = list(test_enumerator.GetPosition())
print(pos)
reagents[0][pos[0]]
reagents[0][pos[1]]
pos = test_enumerator.next()
print(list(pos))
library = rdChemReactions.EnumerateLibrary(rxn, reagents, params=params)
# skip the first 100 molecules
library.GetEnumerator().Skip(100)
# get the state
state = library.GetState()
print("State is:\n", repr(state))
result = library.next()
for productSet in result:
for mol in productSet:
smiles = AllChem.MolToSmiles(mol)
break
library.SetState(state)
result = library.next()
for productSet in result:
for mol in productSet:
assert AllChem.MolToSmiles(mol) == smiles
print(AllChem.MolToSmiles(mol), "==", smiles, "!")
library.ResetState()
print(list(library.GetPosition()))
s = library.Serialize() # XXX bug need default arg
library2 = rdChemReactions.EnumerateLibrary()
library2.InitFromString(s)
for i in range(10):
result = library.next()
for productSet in result:
for mol in productSet:
print("Result library1", AllChem.MolToSmiles(mol))
result = library2.next()
for productSet in result:
for mol in productSet:
print("Result library2", AllChem.MolToSmiles(mol))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: little matplotlib config trick
Step2: Now, this config dictionary is huge
Step3: Visualizing Multi-Dimensional Arrays
Step4: Q. What is the rank of x?
Step5: Notice that the first row of the array was plotted
Step6: To look up other interpolations, just use the help feature.
Step7: Q. And what should this yield?
Step8: Arrays can be indexed in one of two ways
Step9: Q. What should this be?
Step10: Can access x and y index information using numpy.indices
Step11: Q. How to isolate the element in xT corresponding to i = 1 and j = 2?
Step12: Q. How about the indices of all even elements in xT?
Step13: Note you only need this if you want to use these indices somewhere else, e.g. in another array of same shape.
Step14: How to find particular elements in a 2-D array?
Step15: Array Computing
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import rcParams
rcParams.keys()
[key for key in rcParams.keys() if 'map' in key]
rcParams['image.cmap']
rcParams['image.cmap'] = 'viridis'
rcParams['image.interpolation'] = 'none'
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]])
x
x.shape
x.ndim
print(x) # for reference
pl.imshow(x)
pl.colorbar();
print(x) # for reference
pl.imshow(x, origin='lower')
pl.colorbar();
# Interpolation (by default) makes an image look
# smoother.
# Instead:
pl.imshow(x, origin='lower', interpolation='bilinear')
pl.colorbar()
pl.imshow?
x # for reference
print(x)
print()
print(x.T)
xT = x.T
pl.imshow(xT)
pl.colorbar()
xT.shape
xT # Reminder
xT[2][1]
xT[2,1]
xT
print(np.indices(xT.shape))
print("-" * 50)
for i in range(xT.shape[0]):
for j in range(len(xT[0])):
print(i, j)
i, j = np.indices(xT.shape)
i
j
xT
xT[1,2]
print(xT[np.logical_and(i == 1, j == 2)])
# Q. How did this work?
print(np.logical_and(i == 1, j == 2))
i == 1
xT # for reference
np.argwhere(xT % 2 == 0)
xT[xT % 2 == 0]
xT # for reference
np.argwhere(xT > 5)
xT
xT
pl.imshow(xT)
pl.colorbar()
pl.clim(1, 12) # colorbar limits,
# analogous to xlim, ylim
print(xT + 5)
pl.imshow(xT+5)
pl.colorbar()
# pl.clim(1, 12)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training and Testing Split
Step2: Scaling
Step3: Text
Step5: Custom Feature Separator
Step6: Classification Models
Step7: Although tuning is not necessary for Naive Bayes, I pass the default parameters of those models to GridSearchCV anyway so that I can do a direct pair-wise comparison with the other models across the different steps of cross-validation.
Step8: Creating Pipelines
Step9: I plan on using imblearn classes for later iterations so I use it's pipeline in the beginning for convenience
Step10: Main Feature Union
Step11: Naive Bayes Estimators
Step12: QDA Estimators
Step13: Logistic Estimators
Step14: Random Forest Estimators
Step15: Fitting Estimators
Step16: Basic Estimators
Step17: Testing Estimators
Step18: Closer look at the variability of the best model
Step19: Importing bootstrapped scores from parts 1 & 2 for side-by-side comparison
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import numpy as np
import scipy as sp
import seaborn as sns
import matplotlib.pyplot as plt
import json
from IPython.display import Image
from IPython.core.display import HTML
retval=os.chdir("..")
clean_data=pd.read_pickle('./clean_data/clean_data.pkl')
clean_data.head()
kept_cols=['helpful', 'num_sents', 'num_words', 'readability', 'neg_senti',
'pos_senti', 'neu_senti', 'comp_senti', 'text_lemma',]
my_rand_state=0
test_size=0.25
from sklearn.model_selection import train_test_split
X = (clean_data[kept_cols].iloc[:,1:]).as_matrix()
y = (clean_data[kept_cols].iloc[:,0]).tolist()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=my_rand_state)
from sklearn.preprocessing import StandardScaler
std_scale=StandardScaler()
from sklearn.feature_extraction.text import TfidfVectorizer
#set max_features to minimize training time
#also, I didn't apply LDA-based dimensionality reduction
tfidf=TfidfVectorizer(lowercase=False,max_features=200)
from sklearn.base import BaseEstimator, TransformerMixin
class ExtractText(BaseEstimator, TransformerMixin):
Separates the features by numerical and text
def __init__(self, text,n_text=-1):
self.text = text
self.n_text=n_text
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if(self.text):
return X[:,self.n_text]
else:
return X[:,:self.n_text]
from sklearn.pipeline import FeatureUnion
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
nb_clf=GaussianNB()
priors=[None]
qda_clf=QuadraticDiscriminantAnalysis()
reg_param=[0.0, 0.25, 0.5, 0.75]
log_clf=LogisticRegression(penalty='l2')
C=[0.001 , 0.01, 10, 100,1000]
rf_clf=RandomForestClassifier()
n_estimators=[100,200]
max_features=[.1,.3,.5]
class_weight=['balanced']
class_weight.extend([{1: w} for w in [1, 2, 10]])
from imblearn import pipeline #needed if mixing imblearn with sklearn classes
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
n_jobs=4
n_folds=10
skfold = StratifiedKFold(n_splits=n_folds,random_state=my_rand_state, shuffle=False)
class DenseTransformer(BaseEstimator, TransformerMixin):
def transform(self, X, y=None, **fit_params):
return X.todense()
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y, **fit_params)
return self.transform(X)
def fit(self, X, y=None, **fit_params):
return self
ft_union=FeatureUnion(transformer_list=[('text_pipe',pipeline.Pipeline([('extract',ExtractText(text=True)),
('tfidf',tfidf),
('to_dense', DenseTransformer())])),
('numb_pipe',pipeline.Pipeline([('extract',ExtractText(text=False)),
('scale',std_scale)]))])
nb_clf_b = pipeline.Pipeline(steps=[('union',ft_union),('clf',nb_clf)])
nb_clf_est_b = GridSearchCV(estimator=nb_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(clf__priors=priors))
qda_clf_b = pipeline.Pipeline(steps=[('union',ft_union),('clf',qda_clf)])
qda_clf_est_b = GridSearchCV(estimator=qda_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(clf__reg_param=reg_param))
log_clf_b = pipeline.Pipeline(steps=[('union',ft_union),('clf',log_clf)])
log_clf_est_b = GridSearchCV(estimator=log_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(clf__C=C,
clf__class_weight=class_weight))
rf_clf_b = pipeline.Pipeline(steps=[('union',ft_union),('clf',rf_clf)])
rf_clf_est_b = GridSearchCV(estimator=rf_clf_b,cv=skfold,
scoring='roc_auc',n_jobs=n_jobs,
param_grid=dict(clf__n_estimators=n_estimators,
clf__max_features=max_features,
clf__class_weight=class_weight))
from sklearn.externals import joblib
nb_clf_est_b.fit(X_train,y_train)
joblib.dump(nb_clf_est_b, './other_output/merged/nb_clf_est_b.pkl')
qda_clf_est_b.fit(X_train,y_train)
joblib.dump(qda_clf_est_b, './other_output/merged/qda_clf_est_b.pkl')
log_clf_est_b.fit(X_train,y_train)
joblib.dump(log_clf_est_b, './other_output/merged/log_clf_est_b.pkl')
rf_clf_est_b.fit(X_train,y_train)
joblib.dump(rf_clf_est_b, './other_output/merged/rf_clf_est_b.pkl')
from sklearn.metrics import roc_curve, auc
nb_clf_est_b=joblib.load('./other_output/merged/nb_clf_est_b.pkl')
qda_clf_est_b=joblib.load('./other_output/merged/qda_clf_est_b.pkl')
log_clf_est_b=joblib.load('./other_output/merged/log_clf_est_b.pkl')
rf_clf_est_b=joblib.load('./other_output/merged/rf_clf_est_b.pkl')
nb_fpr, nb_tpr, _ = roc_curve(y_test,
nb_clf_est_b.predict_proba(X_test)[:,1])
nb_roc_auc = auc(nb_fpr, nb_tpr)
qda_fpr, qda_tpr, _ = roc_curve(y_test,
qda_clf_est_b.predict_proba(X_test)[:,1])
qda_roc_auc = auc(qda_fpr, qda_tpr)
log_fpr, log_tpr, _ = roc_curve(y_test,
log_clf_est_b.predict_proba(X_test)[:,1])
log_roc_auc = auc(log_fpr, log_tpr)
rf_fpr, rf_tpr, _ = roc_curve(y_test,
rf_clf_est_b.predict_proba(X_test)[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
plt.plot(nb_fpr, nb_tpr, color='cyan', linestyle='--',
label='NB (area = %0.2f)' % nb_roc_auc, lw=2)
plt.plot(qda_fpr, qda_tpr, color='indigo', linestyle='--',
label='QDA (area = %0.2f)' % qda_roc_auc, lw=2)
plt.plot(log_fpr, log_tpr, color='seagreen', linestyle='--',
label='LOG (area = %0.2f)' % log_roc_auc, lw=2)
plt.plot(rf_fpr, rf_tpr, color='blue', linestyle='--',
label='RF (area = %0.2f)' % rf_roc_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k',
label='Luck')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curves of Basic Models Using Bag of Words and Macro-Text Stats')
plt.legend(loc="lower right")
plt.savefig('./plots/ROC_Basic_BOW_MERGED.png', bbox_inches='tight')
plt.show()
from scipy.stats import sem
len(y_test) #the sample is large enough that we can get away with 5% draws
y_test=np.array(y_test)
X_test=np.array(X_test)
#initialize
n_bootstraps = 2000
rng_seed = 1
sample_percent=0.05
min_index=0
max_index=len(y_test)-1
draw_size=int(len(y_test)*sample_percent)
bootstrapped_scores = []
rng = np.random.RandomState(rng_seed)
for i in range(n_bootstraps):
# bootstrap by sampling with replacement on
indices = rng.random_integers(min_index, max_index, draw_size)
#calculate ROC from
rf_fpr, rf_tpr, _ = roc_curve(y_test[indices],
rf_clf_est_b.predict_proba(X_test[indices,:])[:,1])
rf_roc_auc = auc(rf_fpr, rf_tpr)
#save
bootstrapped_scores.append(rf_roc_auc)
import pickle
with open('./other_output/merged/rf_bootstrapped_scores.pkl', 'wb') as f:
pickle.dump(bootstrapped_scores, f)
plt.hist(bootstrapped_scores, bins=50)
plt.title('Histogram of Bootstrapped AUC ROC of\nRandom Forest Model Using Bag of Words and Macro-Text Stats')
plt.savefig('./plots/ROC_Histogram_Basic_BOW_MERGED.png', bbox_inches='tight')
plt.show()
pd.DataFrame({'auc':bootstrapped_scores}).auc.describe()
with open('./other_output/rf_bootstrapped_scores.pkl', 'rb') as f:
bootstrapped_scores_macrotext = pickle.load(f)
with open('./other_output/bow/rf_bootstrapped_scores.pkl', 'rb') as f:
bootstrapped_scores_bow = pickle.load(f)
bs_auc=pd.DataFrame({'Macro-Text Only':bootstrapped_scores_macrotext,
'Bag of Words Only':bootstrapped_scores_bow,
'Combined':bootstrapped_scores})
bs_auc=bs_auc[['Macro-Text Only','Bag of Words Only','Combined']]
bs_auc.describe()
pl_axes=bs_auc.boxplot(return_type='axes')
x=pl_axes.axes.set_ylabel('AUC ROC')
x=pl_axes.axes.set_title('Comparison of Bootstrapped AUC ROC of Random Forest Model\nUsing Macro-Text Stats, Bag of Words, and Combined Approaches')
plt.savefig('./plots/BoxPlot_ROC_MacText_BOX.png', bbox_inches='tight')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrieve fixed and moving images for registration
Step2: Prepare images for registration
Step3: Plot the MutualInformationImageToImageMetric surface
Step4: Follow gradient ascent
Step5: We see in the plot that the metric improves as transformation parameters are updated with each iteration. The value of the metric at each step generally increases, yielding a final value very close to the optimal position in the parameter space window.
Step6: Resample the moving image
Step7: Clean up
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from urllib.request import urlretrieve
import itk
from itkwidgets import compare, checkerboard
dim = 2
ImageType = itk.Image[itk.F, dim]
FixedImageType = ImageType
MovingImageType = ImageType
fixed_img_path = 'BrainT1SliceBorder20.png'
moving_img_path = 'BrainProtonDensitySliceShifted13x17y.png'
if not os.path.exists(fixed_img_path):
url = 'https://data.kitware.com/api/v1/file/5cad1ae88d777f072b18183d/download'
urlretrieve(url, fixed_img_path)
if not os.path.exists(moving_img_path):
url = 'https://data.kitware.com/api/v1/file/5cad1ae88d777f072b181831/download'
urlretrieve(url, moving_img_path)
fixed_img = itk.imread('BrainT1SliceBorder20.png', itk.F)
moving_img = itk.imread('BrainProtonDensitySliceShifted13x17y.png', itk.F)
checkerboard(fixed_img, moving_img)
fixed_normalized_image = itk.normalize_image_filter(fixed_img)
fixed_smoothed_image = itk.discrete_gaussian_image_filter(fixed_normalized_image, variance=2.0)
moving_normalized_image = itk.normalize_image_filter(moving_img)
moving_smoothed_image = itk.discrete_gaussian_image_filter(moving_normalized_image, variance=2.0)
compare(fixed_smoothed_image, moving_smoothed_image)
# Move at most 20 pixels away from the initial position
window_size = [20,20]
# Collect 100 steps of data along each axis
n_steps = [100,100]
TransformType = itk.TranslationTransform[itk.D,dim]
OptimizerType = itk.GradientDescentOptimizer;
ExhaustiveOptimizerType = itk.ExhaustiveOptimizer
MetricType = itk.MutualInformationImageToImageMetric[ImageType, ImageType]
RegistrationType = itk.ImageRegistrationMethod[ImageType, ImageType]
InterpolatorType = itk.LinearInterpolateImageFunction[ImageType, itk.D]
transform = TransformType.New()
metric = MetricType.New()
optimizer = ExhaustiveOptimizerType.New()
registrar = RegistrationType.New()
interpolator = InterpolatorType.New()
metric.SetNumberOfSpatialSamples(100)
metric.SetFixedImageStandardDeviation(0.4)
metric.SetMovingImageStandardDeviation(0.4)
optimizer.SetNumberOfSteps(n_steps)
# Initialize scales and set back to optimizer
scales = optimizer.GetScales()
scales.SetSize(2)
scales.SetElement(0,window_size[0] / n_steps[0])
scales.SetElement(1,window_size[1] / n_steps[1])
optimizer.SetScales(scales)
registrar.SetFixedImage(fixed_smoothed_image)
registrar.SetMovingImage(moving_smoothed_image)
registrar.SetOptimizer(optimizer)
registrar.SetTransform(transform)
registrar.SetInterpolator(interpolator)
registrar.SetMetric(metric)
registrar.SetFixedImageRegion(fixed_img.GetBufferedRegion())
registrar.SetInitialTransformParameters(transform.GetParameters())
# Collect data describing the parametric surface with an observer
surface = dict()
def print_iteration():
surface[tuple(optimizer.GetCurrentPosition())] = optimizer.GetCurrentValue()
optimizer.AddObserver(itk.IterationEvent(), print_iteration)
registrar.Update()
# Check the extreme positions within the observed window
max_position = list(optimizer.GetMaximumMetricValuePosition())
min_position = list(optimizer.GetMinimumMetricValuePosition())
max_val = optimizer.GetMaximumMetricValue()
min_val = optimizer.GetMinimumMetricValue()
print(max_position)
print(min_position)
# Set up values for the plot
x_vals = [list(set([x[i]
for x in surface.keys()])) for i in range(0,2)]
for i in range(0,2):
x_vals[i].sort()
X, Y = np.meshgrid(x_vals[0], x_vals[1])
Z = np.array([[surface[(x0,x1)] for x1 in x_vals[0]]for x0 in x_vals[1]])
# Plot the surface as a 2D heat map
fig = plt.figure()
plt.gca().invert_yaxis()
ax = plt.gca()
surf = ax.scatter(X, Y, c=Z, cmap=cm.coolwarm)
ax.plot(max_position[0],max_position[1],'k^')
ax.plot(min_position[0],min_position[1],'kv')
# Plot the surface as a 3D scatter plot
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X,Y,Z,cmap=cm.coolwarm)
n_iterations = 200
transform = TransformType.New()
metric = MetricType.New()
optimizer = OptimizerType.New()
registrar = RegistrationType.New()
interpolator = InterpolatorType.New()
registrar.SetFixedImage(fixed_smoothed_image)
registrar.SetMovingImage(moving_smoothed_image)
registrar.SetOptimizer(optimizer)
registrar.SetTransform(transform)
registrar.SetInterpolator(interpolator)
registrar.SetMetric(metric)
registrar.SetFixedImageRegion(fixed_img.GetBufferedRegion())
registrar.SetInitialTransformParameters(transform.GetParameters())
metric.SetNumberOfSpatialSamples(100)
metric.SetFixedImageStandardDeviation(0.4)
metric.SetMovingImageStandardDeviation(0.4)
optimizer.SetLearningRate(15)
optimizer.SetNumberOfIterations(n_iterations)
optimizer.MaximizeOn()
descent_data = dict()
descent_data[0] = (0,0)
def log_iteration():
descent_data[optimizer.GetCurrentIteration() + 1] = tuple(optimizer.GetCurrentPosition())
optimizer.AddObserver(itk.IterationEvent(), log_iteration)
registrar.Update()
print(f'Its: {optimizer.GetCurrentIteration()}')
print(f'Final Value: {optimizer.GetValue()}')
print(f'Final Position: {list(registrar.GetLastTransformParameters())}')
x_vals = [descent_data[i][0] for i in range(0,n_iterations)]
y_vals = [descent_data[i][1] for i in range(0,n_iterations)]
fig = plt.figure()
# Note: We invert the y-axis to represent the image coordinate system
plt.gca().invert_yaxis()
ax = plt.gca()
surf = ax.scatter(X, Y, c=Z, cmap=cm.coolwarm)
for i in range(0,n_iterations-1):
plt.plot(x_vals[i:i+2],y_vals[i:i+2],'wx-')
plt.plot(descent_data[0][0], descent_data[0][1],'bo')
plt.plot(descent_data[n_iterations-1][0],descent_data[n_iterations-1][1],'ro')
plt.plot(max_position[0], max_position[1], 'k^')
plt.plot(min_position[0], min_position[1], 'kv')
max_position
ResampleFilterType = itk.ResampleImageFilter[MovingImageType,FixedImageType]
resample = ResampleFilterType.New(
Transform=transform,
Input=moving_img,
Size=fixed_img.GetLargestPossibleRegion().GetSize(),
OutputOrigin=fixed_img.GetOrigin(),
OutputSpacing=fixed_img.GetSpacing(),
OutputDirection=fixed_img.GetDirection(),
DefaultPixelValue=100)
resample.Update()
checkerboard(fixed_img, resample.GetOutput())
os.remove(fixed_img_path)
os.remove(moving_img_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Edit form information
Step2: Save your form
Step3: officially submit your form
|
<ASSISTANT_TASK:>
Python Code:
from dkrz_forms import form_widgets
form_widgets.show_status('form-submission')
MY_LAST_NAME = "...." # e.gl MY_LAST_NAME = "schulz"
#-------------------------------------------------
from dkrz_forms import form_handler, form_widgets
form_info = form_widgets.check_pwd(MY_LAST_NAME)
sf = form_handler.init_form(form_info)
form = sf.sub.entity_out.form_info
form.myattribute = "myinformation"
form_handler.save_form(sf,"..my comment..") # edit my comment info
form_handler.email_form_info(sf)
form_handler.form_submission(sf)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the data
Step2: The decoding
Step3: Compute prediction scores using cross-validation
Step4: Retrieve the discriminating weights and save them
Step5: Visualize the discriminating weights over the mean EPI
Step6: Plot also the mask that was computed by the NiftiMasker
|
<ASSISTANT_TASK:>
Python Code:
from nilearn import datasets
# if you download these from python.
haxby_dataset = datasets.fetch_haxby()
# print basic information on the dataset
print('First subject anatomical nifti image (3D) is at: %s' %
haxby_dataset.anat[0])
print('First subject functional nifti images (4D) are at: %s' %
haxby_dataset.func[0]) # 4D data
# Load the behavioral labels
import numpy as np
# Load target information as string and give a numerical identifier to each
labels = np.recfromcsv(haxby_dataset.session_target[0], delimiter=" ")
# scikit-learn >= 0.14 supports text labels. You can replace this line by:
# target = labels['labels']
_, target = np.unique(labels['labels'], return_inverse=True)
# Keep only data corresponding to faces or cats
condition_mask = np.logical_or(labels['labels'] == b'face',
labels['labels'] == b'cat')
target = target[condition_mask]
from nilearn.input_data import NiftiMasker
# ventro-temporal mask -> it is a transformer -> 3D images => 2D feature spaces
mask_filename = haxby_dataset.mask_vt[0]
# For decoding, standardizing is often very important
nifti_masker = NiftiMasker(mask_img=mask_filename, standardize=True)
func_filename = haxby_dataset.func[0]
# We give the nifti_masker a filename and retrieve a 2D array ready
# for machine learning with scikit-learn
fmri_masked = nifti_masker.fit_transform(func_filename)
# Restrict the classification to the face vs cat discrimination
fmri_masked = fmri_masked[condition_mask]
print(fmri_masked.shape)
# it is a substantially reduced dataset.
print(fmri_masked)
print(target)
# Here we use a Support Vector Classification, with a linear kernel
from sklearn.svm import SVC
# set the kernel type
svc = SVC(kernel='linear')
# And we run it
print(target.shape)
print(fmri_masked.shape)
svc.fit(fmri_masked, target)
prediction = svc.predict(fmri_masked)
len(target)
print(prediction);
print(target - prediction)
from sklearn.cross_validation import KFold
cv = KFold(n=len(fmri_masked), n_folds=5)
cv_scores = []
for train, test in cv:
svc.fit(fmri_masked[train], target[train])
prediction = svc.predict(fmri_masked[test])
cv_scores.append(np.sum(prediction == target[test])
/ float(np.size(target[test])))
print(cv_scores)
# Retrieve the SVC discriminating weights
coef_ = svc.coef_
# Reverse masking thanks to the Nifti Masker
coef_img = nifti_masker.inverse_transform(coef_)
# Save the coefficients as a Nifti image
coef_img.to_filename('haxby_svc_weights.nii')
from nilearn.image import mean_img
from nilearn.plotting import plot_roi, plot_stat_map, show
mean_epi = mean_img(func_filename)
plot_stat_map(coef_img, mean_epi, title="SVM weights", display_mode="yx")
plot_roi(nifti_masker.mask_img_, mean_epi, title="Mask", display_mode="yx")
show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE on notation
Step2: Q1. Complete this code.
Step3: Q2. Complete this code.
Step4: Q3-4. Complete this code.
Step5: Q5-8. Complete this code.
Step6: Variable helper functions
Step7: Saving and Restoring Variables
Step8: Sharing Variables
Step9: Q17. Predict the result of this code.
Step10: Q18. Complete this code.
Step11: Q19. Complete this code.
Step12: Q20. Complete this code.
Step13: Q21. Complete this code.
Step14: Exporting and Importing Meta Graphs
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from datetime import date
date.today()
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
tf.__version__
np.__version__
w = tf.Variable(1.0, name="weight")
with tf.Session() as sess:
sess.run(w.initializer)
print(sess.run(w))
# Create a variable w.
w = tf.Variable(1.0, name="Weight")
# Q. Add 1 to w and assign the value to w.
assign_op = w.assign(w + 1.0)
# Or assign_op = w.assign_add(1.0)
# Or assgin_op = tf.assgin(w, w + 1.0)
with tf.Session() as sess:
sess.run(w.initializer)
for _ in range(10):
print(sess.run(w), "=>", end="")
sess.run(assign_op)
w1 = tf.Variable(1.0)
w2 = tf.Variable(2.0)
w3 = tf.Variable(3.0)
out = w1 + w2 + w3
# Q. Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Initialize all variables.
print(sess.run(out))
V = tf.Variable(tf.truncated_normal([1, 10]))
# Q3. Initialize `W` with 2 * W
W = tf.Variable(V.initialized_value() * 2.0)
# Q4. Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op) # Initialize all variables.
_V, _W = sess.run([V, W])
print(_V)
print(_W)
assert np.array_equiv(_V * 2.0, _W)
# Compare the value of `W` with Z = 2 * V.
g = tf.Graph()
with g.as_default():
W = tf.Variable([[0,1],[2,3]], name="Weight", dtype=tf.float32)
# Q5. Print the name of `W`.
print("Q5.", W.name)
# Q6. Print the name of the op of `W`.
print("Q6.", W.op.name)
# Q7. Print the data type of `w`.
print("Q7.", W.dtype)
# Q8. Print the shape of `w`.
print("Q8.", W.get_shape().as_list())
# Q9. Print the rank (or ndims) of `W`.
print("Q9.", W.get_shape().ndims)
# Q10. Check if the graph of `W` is the same as `g`.
print("Q10.", W.graph == g)
tf.reset_default_graph()
w1 = tf.Variable(1.0, name="weight1")
w2 = tf.Variable(2.0, name="weight2", trainable=False)
w3 = tf.Variable(3.0, name="weight3")
with tf.Session() as sess:
# Q11. Initialize the variables w1 and w2.
sess.run(tf.variables_initializer([w1, w2]))
# Q12. Print the name of all global variables
for v in tf.global_variables():
print("global variable =>", v.name)
# Q13. Print the name of all trainable variables
for v in tf.trainable_variables():
print("trainable_variable =>", v.name)
tf.reset_default_graph()
w = tf.Variable(0.2, 'weight') # <- This is what we want to find. The true value is 2.
x = tf.random_uniform([1])
y = 2. * x # Let's pretend we don't know the coefficient 2 here.
y_hat = w * x
loss = tf.squared_difference(y, y_hat)
train_op = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
saver = tf.train.Saver()
for step in range(1, 10001):
sess.run(train_op)
import os
if not os.path.exists('model'): os.mkdir('model') # Let's make a folder in order not to discrupt our current folder.
if step % 1000 == 0:
print(sess.run(w), "=>", end="")
# Q14. Save the checkpoint file, giving it a base name of `model/my-model`
save_path = saver.save(sess, 'model/my-model', global_step=step)
print("Saved successfully")
print(os.listdir('model'))
# Q15. Find the filename of the latest saved checkpoint file.
# And if it exists, restore it.
ckpt = tf.train.latest_checkpoint('model')
print(ckpt)
if ckpt is not None:
saver.restore(sess, ckpt)
print("Restored succesfully!")
g = tf.Graph()
with g.as_default():
with tf.variable_scope("foo"):
v = tf.get_variable("vv", [1,]) # v.name == "foo/vv:0"
#Q. Get the existing variable `v`
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("vv") # The same as v above.
assert v1 == v
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("vv", [1])
print("v.name =", v.name)
value = [0, 1, 2, 3, 4, 5, 6, 7]
# Q. Create an initializer with `value`.
init = tf.constant_initializer(value)
tf.reset_default_graph()
x = tf.get_variable('x', shape=[2, 4], initializer=init)
with tf.Session() as sess:
sess.run(x.initializer)
print("x =\n", sess.run(x))
# Q. Create an initializer with a normal distribution of mean equals 0 and standard deviation equals 2.
init = tf.random_normal_initializer(mean=0, stddev=2)
tf.reset_default_graph()
x = tf.get_variable('x', shape=[10, 1000], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
print("Make sure the mean", np.mean(_x), "is close to 0" )
print("Make sure the standard deviation", np.std(_x), "is close to 2" )
# Q. Create an initializer with a truncated normal distribution of mean equals 0 and standard deviation equals 2.
init = tf.truncated_normal_initializer(mean=0, stddev=2)
tf.reset_default_graph()
x = tf.get_variable('x', shape=[1000,], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
plt.scatter(np.arange(1000), _x)
_avg = np.array([np.mean(_x)] * 1000)
_std = np.array([np.std(_x)] * 1000)
plt.plot(np.arange(1000), _avg, 'r-')
plt.plot(np.arange(1000), _avg + 2*_std, 'g-')
plt.plot(np.arange(1000), _avg - 2*_std, 'k-')
plt.legend(['mean', 'upper 2*std', 'lower 2*std'])
plt.show()
# Note that few data points exceed the bounaries.
# Q. Create an initializer with a random normal distribution of minimum 0 and maximum 1.
init = tf.random_uniform_initializer(0, 1)
tf.reset_default_graph()
x = tf.get_variable('x', shape=[5000,], initializer=init)
with tf.Session():
x.initializer.run()
_x = x.eval()
count, bins, ignored = plt.hist(_x, 20, normed=True)
plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
plt.show()
# Note that few data points exceed the bounaries.
# Check other initializers such as zeros_initializer, ones_initializer, or orthogonal_initializer, as well.
tf.reset_default_graph()
print("Of course, there're no variables since we reset the graph. See", tf.global_variables())
with tf.Session() as sess:
# Q. Import the saved graph of `model/my-model-10000`.
new_saver = tf.train.import_meta_graph('model/my-model-10000.meta')
new_saver.restore(sess, 'model/my-model-10000')
for v in tf.global_variables():
print("Now we have a variable", v.name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Architecture
Step2: Training
Step3: Denoising
Step4: Checking out the performance
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 = tf.layers.conv2d(inputs_, 16, (3, 3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same')
# Now 14x14x16
conv2 = tf.layers.conv2d(maxpool1, 8, (3, 3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same')
# Now 7x7x8
conv3 = tf.layers.conv2d(maxpool2, 8, (3, 3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same')
# Now 4x4x8
### Decoder
upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7))
# Now 7x7x8
conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 7x7x8
upsample2 = tf.image.resize_nearest_neighbor(conv4, (14, 14))
# Now 14x14x8
conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu)
# Now 14x14x8
upsample3 = tf.image.resize_nearest_neighbor(conv5, (28, 28))
# Now 28x28x8
conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu)
# Now 28x28x16
logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None)
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded = tf.nn.sigmoid(logits, name='decoded')
# Pass logits through sigmoid and calculate the cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
imgs = batch[0].reshape((-1, 28, 28, 1))
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs')
targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets')
### Encoder
conv1 =
# Now 28x28x32
maxpool1 =
# Now 14x14x32
conv2 =
# Now 14x14x32
maxpool2 =
# Now 7x7x32
conv3 =
# Now 7x7x16
encoded =
# Now 4x4x16
### Decoder
upsample1 =
# Now 7x7x16
conv4 =
# Now 7x7x16
upsample2 =
# Now 14x14x16
conv5 =
# Now 14x14x32
upsample3 =
# Now 28x28x32
conv6 =
# Now 28x28x32
logits =
#Now 28x28x1
# Pass logits through sigmoid to get reconstructed image
decoded =
# Pass logits through sigmoid and calculate the cross-entropy loss
loss =
# Get cost and define the optimizer
cost = tf.reduce_mean(loss)
opt = tf.train.AdamOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
epochs = 100
batch_size = 200
# Set's how much noise we're adding to the MNIST images
noise_factor = 0.5
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images from the batch
imgs = batch[0].reshape((-1, 28, 28, 1))
# Add random noise to the input images
noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape)
# Clip the images to be between 0 and 1
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
# Noisy images as inputs, original images as targets
batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs,
targets_: imgs})
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape)
noisy_imgs = np.clip(noisy_imgs, 0., 1.)
reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))})
for images, row in zip([noisy_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Número de veículos pertencentes a cada marca
Step2: Preço médio dos veículos com base no tipo de veículo, bem como no tipo de caixa de câmbio
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Imports
import os
import subprocess
import stat
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mat
import matplotlib.pyplot as plt
from datetime import datetime
sns.set(style="white")
%matplotlib inline
np.__version__
pd.__version__
sns.__version__
mat.__version__
# Dataset
clean_data_path = "dataset/autos.csv"
df = pd.read_csv(clean_data_path,encoding="latin-1")
# Crie um Plot que mostre o número de veículos pertencentes a cada marca
sns.set_style("whitegrid")
g = sns.catplot(y="brand", data=df, kind="count", palette="Reds_r", height=7, aspect=1.5)
g.ax.set_title("Veículos Por Marca",fontdict={'size':18})
g.ax.xaxis.set_label_text("Número de Veículos",fontdict= {'size':16})
g.ax.yaxis.set_label_text("Marca",fontdict= {'size':16})
plt.show()
# Salvando o plot
g.savefig(("plots/Analise2/brand-vehicleCount.png"))
# Crie um Plot com o Preço médio dos veículos com base no tipo de veículo, bem como no tipo de caixa de câmbio
fig, ax = plt.subplots(figsize=(8,5))
colors = ["#00e600", "#ff8c1a","#a180cc"]
sns.barplot(x="vehicleType", y="price",hue="gearbox", palette=colors, data=df)
ax.set_title("Preço médio dos veículos por tipo de veículo e tipo de caixa de câmbio",fontdict= {'size':12})
ax.xaxis.set_label_text("Tipo de Veículo",fontdict= {'size':12})
ax.yaxis.set_label_text("Preço Médio",fontdict= {'size':12})
plt.show()
# Salvando o plot
fig.savefig("plots/Analise2/vehicletype-gearbox-price.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Image As Greyscale
Step2: Load Image As RGB
Step3: View Image Data
|
<ASSISTANT_TASK:>
Python Code:
# Load library
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load image as grayscale
image = cv2.imread('images/plane.jpg', cv2.IMREAD_GRAYSCALE)
# Show image
plt.imshow(image, cmap='gray'), plt.axis("off")
plt.show()
# Load image in color
image_bgr = cv2.imread('images/plane.jpg', cv2.IMREAD_COLOR)
# Convert to RGB
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
# Show image
plt.imshow(image_rgb), plt.axis("off")
plt.show()
# Show image data
image
# Show dimensions
image.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Helper functions
Step2: Experiment 1
Step3: Original Lines
Step4: Rotated lines
Step5: Experiment 2
Step6: Compute rotation linear transformation
Step7: Successs! We have eliminated the sample divergence!
Step8: Compute linear transformation
Step9: Experiment 4
Step10: Conclusion
Step11: Experiment 6
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import seaborn as sns
from random import randint as rand
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LinearRegression
from sklearn.metrics.pairwise import euclidean_distances
from scipy.linalg import svd
from seqpyplot.container.data_container import DataContainer
from seqpyplot.parsers.config_parser import config_parser
from pathlib import Path
from matplotlib import rcParams
rcParams['figure.figsize'] = (10, 10)
pd.options.mode.chained_assignment = None
def calc_theta(coef1, coef2):
"Returns an angle in radians"
return np.abs(
np.arctan(np.abs(coef1 - coef2) / (1. + (coef1 * coef2)))
)
def compute_rot_mat(rad, coef=.5):
" Compute a rotation matrix using rad for a given regression coefficient "
if coef < 1.0:
rotation_matrix = np.array([[np.cos(rad), -np.sin(rad)],
[np.sin(rad), np.cos(rad)]])
else:
rotation_matrix = np.array([[np.cos(rad), np.sin(rad)],
[-np.sin(rad), np.cos(rad)]])
return rotation_matrix
slope1 = 1.1
slope2 = 2.0
line1 = np.array([slope1 * x for x in range(10)])
line2 = np.array([slope2 * x for x in range(10)])
xs = list(range(10))
# Plot lines
plt.plot(xs, line1, color='black', label='line to rotate');
plt.plot(xs, line2, color='red', linewidth=5, label='stationary');
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--');
plt.annotate(s=('<-- we\'ll rotate this line'), xy=(xs[3]+0.08, line1[3]), rotation=-36)
plt.annotate(s=('<-- Direction of rotation'), xy=(xs[3] + 0.75, line1[3]+3.5), rotation=-15)
plt.ylim((-1, 10))
plt.xlim((-1, 10))
plt.legend(loc='upper right');
# Compute angle
angle_diff = calc_theta(slope1, slope2)
angle_diff
# Compute rotation matrix
rot_matrix = compute_rot_mat(angle_diff)
rot_matrix
# rotate line 1 (black line)
new_line1 = list()
for x, y in zip(xs, line1):
# need shape [[#], [#]]
old_point = np.array([[x], [y]])
new_point = np.dot(old_point, rot_matrix)
new_line1.append(new_point)
new_line1 = np.squeeze(np.asarray(new_line1))
xs[6], line1[6]
plt.plot(xs, line2, color='red', linewidth=5, alpha=0.7, label='Stationary');
plt.plot(xs, line1, color='black', label='line to rotate')
plt.scatter(new_line1[:, 0], new_line1[:, 1], color='black', s=95)
plt.plot(new_line1[:, 0], new_line1[:, 1], color='black', linestyle='--', label='rotated line')
plt.annotate(s=('Original Line'), xy=(xs[6] + 0.7, line1[6]+0.3))
plt.annotate(s=('<-- Direction of rotation'), xy=(xs[6] - 1.7, line1[6]+0.8), rotation=-36)
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
for (x1, y1), (x2, y2) in zip(zip(xs, line1), new_line1):
plt.plot([x1, x2], [y1, y2], linestyle='--', color='black', alpha=0.4)
plt.ylim((-1, 10))
plt.xlim((-1, 10));
plt.legend(loc='upper right');
config = '../examples/example_config.ini'
config_obj = config_parser(config)
# load the data container_obj
container_obj = DataContainer(config_obj)
data, ercc_data = container_obj.parse_input()
cols = data.columns
cols
df = data[['D1_Cont', 'D1_Treat']]
df.loc[:, 'mean'] = df.mean(axis=1)
df.head()
d1 = df[['D1_Cont', 'mean']]
d2 = df[['D1_Treat', 'mean']]
fig, ax = plt.subplots()
d1.plot('mean', 'D1_Cont', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='blue', alpha=0.4)
d2.plot('mean', 'D1_Treat', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4);
plt.annotate('The bias between samples is clearly seen in this plot!', (500, 3900), fontsize=14)
ax.set_title("Raw unnormalized data");
# Quick reset for this cell
d1 = df[['D1_Cont', 'mean']]
d2 = df[['D1_Treat', 'mean']]
# define regression objects
regCont = LinearRegression(fit_intercept=True)
regTreat = LinearRegression(fit_intercept=True)
# fit regression
regCont.fit(d1['D1_Cont'].values.reshape(-1, 1), d1['mean'].values.reshape(-1, 1))
regTreat.fit(d2['D1_Treat'].values.reshape(-1, 1), d1['mean'].values.reshape(-1, 1))
print(regCont.coef_, regCont.intercept_)
print(regTreat.coef_, regTreat.intercept_)
# Correct bias
d1['D1_Cont'] = d1['D1_Cont'] - regCont.intercept_
d2['D1_Treat'] = d2['D1_Treat'] - regTreat.intercept_
# Plot regression lines
fig, ax = plt.subplots()
d1.plot('mean', 'D1_Cont', kind='scatter', ax=ax, color='blue', alpha=0.4)
d2.plot('mean', 'D1_Treat', kind='scatter', xlim=(0, 8000), ylim=(0, 8000), ax=ax, color='red', alpha=0.4)
plt.plot([0, 8000], [0.0, regCont.coef_ * 8000], linestyle='--', color='black')
plt.plot([0, 8000], [0.0, regTreat.coef_ * 8000], linestyle='--', color='black');
ax.set_title("bias corrected, with best fit lines");
plt.ylim((-500, 8000))
plt.xlim((-500, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
correction_theta = calc_theta(np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_))
correction_theta # in radians
rotation_matrix = compute_rot_mat(correction_theta)
rotation_matrix
new_treat = np.array([np.dot(rotation_matrix, d2.values[i, :]) for i in range(len(d2.values))])
new_treat
d2_cor = d2.copy()
d2_cor.loc[:, 'D1_Treat'] = new_treat[:, 0]
d2_cor.loc[:, 'mean'] = new_treat[:, 1]
fig, ax = plt.subplots()
d1.plot('mean', 'D1_Cont', kind='scatter', xlim=(0, 20000), ylim=(0, 20000), ax=ax, color='blue', alpha=0.4)
d2_cor.plot('mean', 'D1_Treat', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4);
ax.set_title("No TMM, linearly transformed")
plt.ylim((-1000, 8000))
plt.xlim((-1000, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
# load the data container_obj
config = '../examples/example_config.ini'
config_obj = config_parser(config)
container_obj = DataContainer(config_obj)
data, ercc_data = container_obj.parse_input()
data = container_obj.normalize_file_pairs(data) # Single df of normalized data
normed_df = data[['D1_Cont', 'D1_Treat']].copy()
normed_df.loc[:, 'mean'] = normed_df.mean(axis=1)
regCont = LinearRegression(fit_intercept=True)
regTreat = LinearRegression(fit_intercept=True)
regCont.fit(normed_df['D1_Cont'].values.reshape(-1, 1), normed_df['mean'].values.reshape(-1, 1))
regTreat.fit(normed_df['D1_Treat'].values.reshape(-1, 1), normed_df['mean'].values.reshape(-1, 1))
normed_df['D1_Cont'] = normed_df['D1_Cont'] - regCont.intercept_
normed_df['D1_Treat'] = normed_df['D1_Treat'] - regTreat.intercept_
fig, ax = plt.subplots()
normed_df.plot('mean', 'D1_Cont', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='blue', alpha=0.4, label='Control')
normed_df.plot('mean', 'D1_Treat', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='red', alpha=0.4, label='Treated')
# plot regression lines, with color switch!
plt.plot([0, 8000], [0.0, regCont.coef_ * 8000], linestyle='--', color='black')
plt.plot([0, 4000], [0.0, regTreat.coef_ * 4000], linestyle='--', color='white');
plt.plot([4000, 8000], [regTreat.coef_[0] * 4000.0, regTreat.coef_[0] * 8000], linestyle='--', color='black');
ax.set_title("TMM normalized expression data");
plt.ylim((-500, 8000))
plt.xlim((-500, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
correction_theta = calc_theta(np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_))
rotation_matrix = compute_rot_mat(correction_theta, regTreat.coef_)
new_treat = np.array([np.dot(rotation_matrix, normed_df[['D1_Treat', 'mean']].values[i, :]) for i in range(len(normed_df))])
corr_df = normed_df.copy()
corr_df.loc[:, 'D1_Treat'] = new_treat[:, 0]
# corr_df.loc[:, 'mean'] = normed_df['mean'].values
corr_df.loc[:, 'mean'] = new_treat[:, 1]
fig, ax = plt.subplots()
normed_df.plot('mean', 'D1_Cont', kind='scatter', xlim=(0, 20000), ylim=(0, 20000), ax=ax, color='blue', alpha=0.4)
corr_df.plot('mean', 'D1_Treat', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4, s=10);
ax.set_title("With TMM, linearly transformed");
plt.ylim((-500, 15000))
plt.xlim((-500, 15000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
data_copy = data.copy()[['D1_Cont', 'D1_Treat']]
percentiles = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 0.95, 0.99]
data_copy.describe(percentiles=percentiles)
data_copy2 = data_copy[(data_copy.D1_Cont != 0) & (data_copy.D1_Treat != 0)]
(data_copy2.D1_Cont - data_copy2.D1_Treat).abs().describe(percentiles=percentiles)
(data_copy2.D1_Cont - data_copy2.D1_Treat).abs().describe(percentiles=percentiles).loc['80%']
data_copy3 = (data_copy2.D1_Cont - data_copy2.D1_Treat).abs()
sns.boxplot(data_copy3[data_copy3 < 500]);
data_copy2.head()
# load the data container_obj
config = '../examples/example_config.ini'
config_obj = config_parser(config)
container_obj = DataContainer(config_obj)
data, ercc_data = container_obj.parse_input()
data = container_obj.normalize_file_pairs(data) # Single df of normalized data
#----------------------------------------------------------
data_copy = data.copy()
data_copy.loc[:, 'mean'] = data_copy.mean(axis=1)
data_copy = data_copy[(data_copy.D1_Cont != 0) & (data_copy.D1_Treat != 0)]
data_copy.loc[:, 'abs_diff'] = (data_copy.D1_Cont - data_copy.D1_Treat).abs()
cutoff = (data_copy.D1_Cont - data_copy.D1_Treat).abs().describe(percentiles=percentiles).loc['80%']
data_copy = data_copy[data_copy['abs_diff'] < cutoff]
regCont = LinearRegression(fit_intercept=True)
regTreat = LinearRegression(fit_intercept=True)
regCont.fit(data_copy['D1_Cont'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
regTreat.fit(data_copy['D1_Treat'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
#----------------------------------------------------------
normed_df = data.copy()
normed_df = normed_df[['D1_Cont', 'D1_Treat']].copy()
normed_df.loc[:, 'mean'] = normed_df.mean(axis=1)
normed_df['D1_Cont'] = normed_df['D1_Cont'] - regCont.intercept_
normed_df['D1_Treat'] = normed_df['D1_Treat'] - regTreat.intercept_
fig, ax = plt.subplots()
normed_df.plot('mean', 'D1_Cont', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='blue', alpha=0.4, label='Control')
normed_df.plot('mean', 'D1_Treat', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='red', alpha=0.4, label='Treated')
# plot regression lines, with color switch!
plt.plot([0, 8000], [0.0, regCont.coef_ * 8000], linestyle='--', color='black')
plt.plot([0, 4000], [0.0, regTreat.coef_ * 4000], linestyle='--', color='white');
plt.plot([4000, 8000], [regTreat.coef_[0] * 4000.0, regTreat.coef_[0] * 8000], linestyle='--', color='black');
ax.set_title("TMM normalized expression data");
plt.ylim((-500, 8000))
plt.xlim((-500, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
correction_theta = calc_theta(np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_))
rotation_matrix = compute_rot_mat(correction_theta)
regCont.coef_, regTreat.coef_
normed_df.head()
new_treat = np.array([np.dot(rotation_matrix, normed_df[['D1_Cont', 'mean']].values[i, :]) for i in range(len(normed_df.values))])
rotated = pd.DataFrame(new_treat, columns=['Cont_cor', 'mean_cor'], index=normed_df.index)
fig, ax = plt.subplots()
rotated.plot('mean_cor', 'Cont_cor', kind='scatter', xlim=(0, 20000), ylim=(0, 20000), ax=ax, color='blue', alpha=0.4)
normed_df.plot('mean', 'D1_Treat', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4);
ax.set_title("No TMM, linearly transformed")
plt.ylim((-1000, 8000))
plt.xlim((-1000, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
# load the data container_obj
config = '../examples/example_config.ini'
config_obj = config_parser(config)
container_obj = DataContainer(config_obj)
data, ercc_data = container_obj.parse_input()
data = container_obj.normalize_file_pairs(data) # Single df of normalized data
#----------------------------------------------------------
data_copy = data.copy()
data_copy.loc[:, 'mean'] = data_copy.mean(axis=1)
data_copy = data_copy[(data_copy['mean'] > 100) & (data_copy['mean'] < 500)]
regCont = LinearRegression(fit_intercept=True)
regTreat = LinearRegression(fit_intercept=True)
regCont.fit(data_copy['D1_Cont'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
regTreat.fit(data_copy['D1_Treat'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
#----------------------------------------------------------
normed_df = data.copy()
normed_df = normed_df[['D1_Cont', 'D1_Treat']].copy()
normed_df.loc[:, 'mean'] = normed_df.mean(axis=1)
normed_df['D1_Cont'] = normed_df['D1_Cont'] - regCont.intercept_
normed_df['D1_Treat'] = normed_df['D1_Treat'] - regTreat.intercept_
fig, ax = plt.subplots()
normed_df.plot('mean', 'D1_Cont', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='blue', alpha=0.4, label='Control')
normed_df.plot('mean', 'D1_Treat', kind='scatter',
xlim=(0, 5000), ylim=(0, 8000), ax=ax, color='red', alpha=0.4, label='Treated')
# plot regression lines, with color switch!
plt.plot([0, 8000], [0.0, regCont.coef_ * 8000], linestyle='--', color='black')
plt.plot([0, 4000], [0.0, regTreat.coef_ * 4000], linestyle='--', color='white');
plt.plot([4000, 8000], [regTreat.coef_[0] * 4000.0, regTreat.coef_[0] * 8000], linestyle='--', color='black');
ax.set_title("TMM normalized expression data");
plt.ylim((-500, 8000))
plt.xlim((-500, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
correction_theta = calc_theta(np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_))
rotation_matrix = compute_rot_mat(correction_theta)
np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_)
normed_df.head()
coefficients
new_treat = np.array([np.dot(rotation_matrix, normed_df[['D1_Treat', 'mean']].values[i, :]) for i in range(len(normed_df.values))])
rotated = pd.DataFrame(new_treat, columns=['Treat_cor', 'mean_cor'], index=normed_df.index)
fig, ax = plt.subplots()
rotated.plot('mean_cor', 'Treat_cor', kind='scatter', xlim=(0, 20000), ylim=(0, 20000), ax=ax, color='blue', alpha=0.4)
normed_df.plot('mean', 'D1_Cont', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4);
ax.set_title("TMM, linearly transformed, full corretion")
plt.ylim((-1000, 8000))
plt.xlim((-1000, 8000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
# load the data container_obj
config = '../examples/example_config.ini'
config_obj = config_parser(config)
container_obj = DataContainer(config_obj)
data, ercc_data = container_obj.parse_input()
data = container_obj.normalize_file_pairs(data) # Single df of normalized data
#----------------------------------------------------------
data_copy = data.copy()
data_copy.loc[:, 'mean'] = data_copy.mean(axis=1)
data_copy = data_copy[(data_copy['mean'] > 100) & (data_copy['mean'] < 500)]
regCont = LinearRegression(fit_intercept=True)
regTreat = LinearRegression(fit_intercept=True)
regCont.fit(data_copy['D1_Cont'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
regTreat.fit(data_copy['D1_Treat'].values.reshape(-1, 1), data_copy['mean'].values.reshape(-1, 1))
#----------------------------------------------------------
normed_df = data.copy()
normed_df = normed_df[['D1_Cont', 'D1_Treat']].copy()
normed_df.loc[:, 'mean'] = normed_df.mean(axis=1)
normed_df['D1_Cont'] = normed_df['D1_Cont'] - regCont.intercept_
normed_df['D1_Treat'] = normed_df['D1_Treat'] - regTreat.intercept_
correction_theta = calc_theta(np.squeeze(regCont.coef_), np.squeeze(regTreat.coef_))
rotation_matrix = compute_rot_mat(correction_theta)
new_treat = np.array([np.dot(rotation_matrix, normed_df[['D1_Treat', 'mean']].values[i, :]) for i in range(len(normed_df.values))])
rotated = pd.DataFrame(new_treat, columns=['Treat_cor', 'mean_cor'], index=normed_df.index)
rotated.loc[:, 'mean'] = normed_df['mean']
fig, ax = plt.subplots()
rotated.plot('mean', 'Treat_cor', kind='scatter', xlim=(0, 20000), ylim=(0, 20000), ax=ax, color='blue', alpha=0.4)
normed_df.plot('mean', 'D1_Cont', kind='scatter', xlim=(0, 5000), ylim=(0, 5000), ax=ax, color='red', alpha=0.4);
ax.set_title("TMM, linearly transformed")
plt.ylim((-1000, 18000))
plt.xlim((-1000, 18000));
plt.axvline(0, linestyle='--')
plt.axhline(0, linestyle='--')
plt.legend();
correction_theta = calc_theta()
coefs = list(map(float, [np.squeeze(regTreat.coef_), np.squeeze(regCont.coef_)]))
coefs
calc_theta(*coefs)
normed_df.columns[np.argmin(coefs)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, collect data from a public email archive.
Step2: Let's check the number of threads in this mailing list corpus
Step3: We can plot the number of people participating in each thread.
Step4: The duration of a thread is the amount of elapsed time between its first and last message.
Step5: Let's take a look at the largest threads!
Step6: Export the content of each thread into a .csv file (named
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.archive import Archive
from bigbang.archive import load as load_archive
from bigbang.thread import Thread
from bigbang.thread import Node
from bigbang.utils import remove_quoted
import matplotlib.pyplot as plt
import datetime
import pandas as pd
import csv
from collections import defaultdict
import os
#Insert a list of archive names
archives_names = ["ietf"]
cwd = os.getcwd()
archives_paths = list()
for archive_name in archives_names:
archives_paths.append('../../archives/'+archive_name+'.csv')
archives_list = [load_archive(archive_path).data for archive_path in archives_paths]
archives = Archive(pd.concat(archives_list))
archives_data = archives.data
print len(archives.get_threads())
n_people = [t.get_num_people() for t in archives.get_threads()]
plt.hist(n_people, bins = 20)
plt.xlabel('number of email-address in a thread')
plt.show()
duration = [t.get_duration().days for t in archives.get_threads()]
plt.hist(duration, bins = (10))
plt.xlabel('duration of a thread(days)')
plt.show()
#set how many threads subjects you want to display
n_top = 5
threads_emailcount = defaultdict(int)
for thread in archives.get_threads():
threads_emailcount[thread] = thread.get_num_messages()
for thread, count in sorted(threads_emailcount.iteritems(), reverse = True, key = lambda (k,v):(v,k))[:n_top]:
try:print thread.get_root().data['Subject'] +' ('+str(count)+' emails)'
except: pass
#Insert the participation threshold (number of people)
#(for no threeshold: 'min_participation = 0')
min_participation = 0
#Insert the duration threshold (number of days)
#(for no threeshold: 'min_duration = 0')
min_duration = 0
i = 0
for thread in arx.get_threads():
if thread.get_num_people() >= min_participation and thread.get_duration().days >= min_duration:
i += 1
f = open(cwd+'/thread_'+str(i)+'.csv', "wb")
f_w = csv.writer(f)
f_w.writerow(thread.get_content())
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now I'll add a column to represent the priors
Step2: And a column for the likelihoods
Step3: Here we see a difference from the previous method
Step4: I call the result unnorm because these values are the "unnormalized posteriors". Each of them is the product of a prior and a likelihood
Step5: Notice that we get 5/8, which is what we got by computing $P(D)$ directly.
Step6: The posterior probability for Bowl 1 is 0.6, which is what we got using Bayes's Theorem explicitly.
Step7: I'll use fractions to represent the prior probabilities and the likelihoods. That way they don't get rounded off to floating-point numbers.
Step9: Once you have priors and likelhoods, the remaining steps are always the same, so I'll put them in a function
Step10: And call it like this.
Step11: Here is the final Bayes table
Step12: The posterior probability of the 6-sided die is 4/9, which is a little more than the probabilities for the other dice, 3/9 and 2/9.
Step13: The data is that Monty opened Door 3 and revealed a goat. So let's
Step14: Now that we have priors and likelihoods, we can use update to compute the posterior probabilities.
Step15: After Monty opens Door 3, the posterior probability of Door 1 is $1/3$;
Step16: Exercise
Step17: Exercise
Step18: Exercise
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
table = pd.DataFrame(index=['Bowl 1', 'Bowl 2'])
table['prior'] = 1/2, 1/2
table
table['likelihood'] = 3/4, 1/2
table
table['unnorm'] = table['prior'] * table['likelihood']
table
prob_data = table['unnorm'].sum()
prob_data
table['posterior'] = table['unnorm'] / prob_data
table
table2 = pd.DataFrame(index=[6, 8, 12])
from fractions import Fraction
table2['prior'] = Fraction(1, 3)
table2['likelihood'] = Fraction(1, 6), Fraction(1, 8), Fraction(1, 12)
table2
def update(table):
Compute the posterior probabilities.
table['unnorm'] = table['prior'] * table['likelihood']
prob_data = table['unnorm'].sum()
table['posterior'] = table['unnorm'] / prob_data
return prob_data
prob_data = update(table2)
table2
table3 = pd.DataFrame(index=['Door 1', 'Door 2', 'Door 3'])
table3['prior'] = Fraction(1, 3)
table3
table3['likelihood'] = Fraction(1, 2), 1, 0
table3
update(table3)
table3
# Solution
table4 = pd.DataFrame(index=['Normal', 'Trick'])
table4['prior'] = 1/2
table4['likelihood'] = 1/2, 1
update(table4)
table4
# Solution
table5 = pd.DataFrame(index=['GG', 'GB', 'BG', 'BB'])
table5['prior'] = 1/4
table5['likelihood'] = 1, 1, 1, 0
update(table5)
table5
# Solution
# If the car is behind Door 1, Monty would always open Door 2
# If the car was behind Door 2, Monty would have opened Door 3
# If the car is behind Door 3, Monty would always open Door 2
table6 = pd.DataFrame(index=['Door 1', 'Door 2', 'Door 3'])
table6['prior'] = 1/3
table6['likelihood'] = 1, 0, 1
update(table6)
table6
# Solution
# If the car is behind Door 1, Monty would have opened Door 2
# If the car is behind Door 2, Monty would always open Door 3
# If the car is behind Door 3, Monty would have opened Door 2
table7 = pd.DataFrame(index=['Door 1', 'Door 2', 'Door 3'])
table7['prior'] = 1/3
table7['likelihood'] = 0, 1, 0
update(table7)
table7
# Solution
# Hypotheses:
# A: yellow from 94, green from 96
# B: yellow from 96, green from 94
table8 = pd.DataFrame(index=['A', 'B'])
table8['prior'] = 1/2
table8['likelihood'] = 0.2*0.2, 0.14*0.1
update(table8)
table8
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List all avaiable magics
Step2: Specify the directory where you have put the Matlab routines CO2SYS.m, errors.m, and derivnum.m.
Step3: Note
Step4: Hint
Step5: 2.2 Partial derivatives with respect to par1 (specified above as ALK)
Step6: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step7: 2.2 Partial derivatives with respect to par2 (specified above as DIC)
Step8: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step9: 2.3 Partial derivatives with respect to Temperature
Step10: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step11: 2.4 Partial derivatives with respect to Salinity
Step12: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step13: 2.1 Partial derivatives with respect to $K_1$
Step14: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step15: 2.6 Partial derivatives with respect to $K_A$ (solubility product for aragonite)
Step16: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step17: 3. Compute sentitivities with derivnum for a 2nd input pair (pH-$A_\text{T}$)
Step18: 3.2 Partial derivatives with respect to H+ (since par1 is pH)
Step19: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step20: 3.3 Partial derivatives with respect to par2 (specified above as ALK)
Step21: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step22: 3.4 Partial derivatives with respect to T
Step23: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step24: 4. Compute sentitivities with derivnum for a 3rd input pair (pCO2-$C_\text{T}$)
Step25: 4.2 Partial derivatives with respect to par1 (specified as pCO2 above)
Step26: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step27: 4.3 Partial derivatives with respect to par2 (specified above as DIC)
Step28: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step29: 4.4 Partial derivatives with respect to T
Step30: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step31: 4.5 Partial derivatives with respect to S
Step32: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step33: 5. Compute sentitivities with derivnum for a 4th input pair (pCO2-$A_\text{T}$)
Step34: 5.2 Partial derivatives with respect to par1 (specified as pCO2 above)
Step35: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step36: 5.3 Partial derivatives with respect to par2 (specified above as ALK)
Step37: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step38: 5.4 Partial derivatives with respect to T
Step39: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step40: 5.5 Partial derivatives with respect to S
Step41: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step42: 6. Compute sentitivities with derivnum for a 4th input pair (pCO2-pH)
Step43: 6.2 Partial derivatives with respect to par1 (specified as pCO2 above)
Step44: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step45: 6.3 Partial derivatives with respect to par2 (specified above as pH)
Step46: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step47: 6.4 Partial derivatives with respect to T
Step48: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
Step49: 6.5 Partial derivatives with respect to S
Step50: Test to see if same results for OUT conditions when TEMPOUT = TEMPIN
|
<ASSISTANT_TASK:>
Python Code:
%load_ext oct2py.ipython
%lsmagic
%%octave
addpath ("~/Software/MATLAB/CO2SYS-MATLAB/src")
%%octave
help derivnum
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 2300; % ALK
PAR2 = 2000; % DIC
PAR1TYPE = 1; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 2; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# SI = 0; % Total dissolved inorganic silicon (Sit)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15= Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('K1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('K1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('Kspa', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('Kspa', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 8.1; % pH
PAR2 = 2300; % ALK
PAR1TYPE = 3; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 1; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
#TEMPOUT = TEMPIN;
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 15; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, PRESIN, PRESOUT,...
SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 2300; % DIC
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 2; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 15; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 2300; % ALK
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 1; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
# Standard input for CO2SYS:
# --------------------------
# Input Variables:
PAR1 = 400; % pCO2
PAR2 = 8.0; % pH
PAR1TYPE = 4; % 1=ALK, 2=DIC, 3=pH, 4=pCO2, 5=fCO2
PAR2TYPE = 3; % Same 5 choices as PAR1TYPE
SAL = 35; % Salinity
TEMPIN = 18; % Temperature (input)
TEMPOUT = 25; % Temperature (output)
PRESIN = 0; % Pressure (input)
PRESOUT = PRESIN; % Pressure (output)
SI = 60; % Total dissolved inorganic silicon (Sit)
PO4 = 2; % Total dissoloved inorganic Phosphorus (Pt)
# Input Parameters:
pHSCALEIN = 1; % pH scale (1=total, 2=seawater, 3=NBS, 4=Free)
K1K2CONSTANTS = 10; % set for K1 & K2: 10=Lueker et al. (2000); 14=Millero (2010); 15=Waters et al. (2014)
KSO4CONSTANTS = 1; % KSO4 of Dickson (1990a) & Total dissolved boron (Bt) from Uppstrom (1974)
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par1', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('par2', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN, ...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('T', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPOUT,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{1:9});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{1:9});
printf("%f %f %f %f %f %f %f %f %f \n", b(1:9));
%%octave
[b, bhead, bunits] = derivnum ('S', PAR1, PAR2, PAR1TYPE, PAR2TYPE, SAL, TEMPIN, TEMPIN,...
PRESIN, PRESOUT, SI, PO4,...
pHSCALEIN, K1K2CONSTANTS, KSO4CONSTANTS);
# Print (nicely formatted):
printf("%s %s %s %s %s %s %s %s %s \n", bhead{10:18});
printf("%s %s %s %s %s %s %s %s %s \n", bunits{10:18});
printf("%f %f %f %f %f %f %f %f %f \n", b(10:18));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Formats
Step2: Nearest Neighbors
Step3: to calculate the distance between a word and the nieghbors, we can call the distances method
Step4: The word embeddings are not unit vectors, actually the more frequent the word is the larger the norm of its own vector.
Step5: This could be problematic for some applications and training algorithms.
Step6: Vocabulary Expansion
Step7: Not all the words are available in the dictionary defined by the word embeddings.
Step8: we would like to return the vector that represents the word Green, to do that we apply a case expansion
Step9: Digit Expansion
Step10: To fix that, we apply another type of vocabulary expansion DigitExpander.
Step11: As expected, the neighbors of the new number 434 will be other numbers
|
<ASSISTANT_TASK:>
Python Code:
from polyglot.mapping import Embedding
embeddings = Embedding.load("/home/rmyeid/polyglot_data/embeddings2/en/embeddings_pkl.tar.bz2")
neighbors = embeddings.nearest_neighbors("green")
neighbors
embeddings.distances("green", neighbors)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
norms = np.linalg.norm(embeddings.vectors, axis=1)
window = 300
smooth_line = np.convolve(norms, np.ones(window)/float(window), mode='valid')
plt.plot(smooth_line)
plt.xlabel("Word Rank"); _ = plt.ylabel("$L_2$ norm")
embeddings = embeddings.normalize_words()
neighbors = embeddings.nearest_neighbors("green")
for w,d in zip(neighbors, embeddings.distances("green", neighbors)):
print("{:<8}{:.4f}".format(w,d))
from polyglot.mapping import CaseExpander, DigitExpander
"GREEN" in embeddings
embeddings.apply_expansion(CaseExpander)
"GREEN" in embeddings
embeddings.nearest_neighbors("GREEN")
"434" in embeddings
embeddings.apply_expansion(DigitExpander)
"434" in embeddings
embeddings.nearest_neighbors("434")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_induced_power
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
label_name = 'Aud-rh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
tmin, tmax, event_id = -0.2, 0.5, 2
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Picks MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
# Load epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject,
preload=True)
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(7, 30, 2) # define frequencies of interest
label = mne.read_label(fname_label)
n_cycles = frequencies / 3. # different number of cycle per frequency
# subtract the evoked response in order to exclude evoked activity
epochs_induced = epochs.copy().subtract_evoked()
plt.close('all')
for ii, (this_epochs, title) in enumerate(zip([epochs, epochs_induced],
['evoked + induced',
'induced only'])):
# compute the source space power and the inter-trial coherence
power, itc = source_induced_power(
this_epochs, inverse_operator, frequencies, label, baseline=(-0.1, 0),
baseline_mode='percent', n_cycles=n_cycles, n_jobs=1)
power = np.mean(power, axis=0) # average over sources
itc = np.mean(itc, axis=0) # average over sources
times = epochs.times
##########################################################################
# View time-frequency plots
plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 2, 2 * ii + 1)
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=30., cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % title)
plt.colorbar()
plt.subplot(2, 2, 2 * ii + 2)
plt.imshow(itc,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=0.7,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('ITC (%s)' % title)
plt.colorbar()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define helper functions
Step2: Network parameters
Step3: Training and validation images are now in tiny-imagenet-200/train and tiny-imagenet-200/val.
Step4: Plot some random batch
Step5: Define network
Step6: Instantiate it
Step7: Move to gpu (if neccesary)
Step8: Restore if it is needed
Step9: Train network
Step10: Plot val loss, train loss and val acc, train acc
Step11: Do not run the cell below unless you know what to do!
Step12: When everything is done, please compute accuracy on the validation set and report it below.
Step13: Check test dataset
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torch.utils.data as data
from torchvision import transforms, utils
import torch.nn as nn
import torch.optim as optim
import time
%matplotlib inline
import tiny_imagenet
tiny_imagenet.download(".")
def show_batch(sample_batched):
plt.figure(figsize=(10,10))
grid = utils.make_grid(sample_batched[0], padding=2, normalize=True)
plt.imshow(grid.numpy().transpose((1, 2, 0)))
plt.title('Batch from dataloader')
plt.axis('off')
plt.ioff()
plt.show()
def save_checkpoint(state, filename):
torch.save(state, filename)
def accuracy(model, images, labels):
with torch.no_grad():
labels_pred = model.predict(images)
numbers = labels_pred.argmax(dim=-1)
return (numbers == labels).float().mean()
def make_layers(cfg, batch_norm=True):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
EPOCHS = 50
TRAIN_DATA_PATH = "tiny-imagenet-200/train/"
VAL_DATA_PATH = "tiny-imagenet-200/val/"
BATCH_SIZE = 64
TRANSFORM_IMG = transforms.Compose([
#transforms.Resize(64),
#transforms.CenterCrop(32),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(degrees=90),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
])
TRANSFORM_IMG_VAL = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] )
])
train_data = torchvision.datasets.ImageFolder(root=TRAIN_DATA_PATH,
transform=TRANSFORM_IMG)
train_data_loader = data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
val_data = torchvision.datasets.ImageFolder(root=VAL_DATA_PATH,
transform=TRANSFORM_IMG_VAL)
val_data_loader = data.DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
iterator = iter(train_data_loader)
show_batch(iterator.next())
class Net(nn.Module):
def __init__(self, features, num_classes=200, init_weights=True):
super(Net, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((2, 2))
self.classifier = nn.Sequential(
nn.Linear(512 * 2 * 2, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
layers = make_layers([64, 64, 'M',\
128, 128, 'M',\
256, 256, 256, 256, 'M',\
512, 512, 512, 512, 'M',\
512, 512, 512, 512, 'M'])
net = Net(layers)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
loss_history = np.zeros(EPOCHS)
acc_history = np.zeros(EPOCHS)
train_loss_history = np.zeros(EPOCHS)
train_acc_history = np.zeros(EPOCHS)
ngpu = 2
#device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print("Cuda is available")
net = net.cuda()
if (ngpu > 1):
net = nn.DataParallel(net, range(ngpu))
else:
print('Cuda is not available')
net = net.cpu()
WEIGHTS_PATH = './weights/model_best_lr01_sgd_50e.pth.tar'
if (torch.cuda.is_available()):
checkpoint = torch.load(f=WEIGHTS_PATH)
else:
net = nn.DataParallel(net)
checkpoint = torch.load(map_location='cpu', f=WEIGHTS_PATH)
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
last_epoch = checkpoint['epoch']
acc_history = checkpoint['acc_history']
loss_history = checkpoint['loss_history']
train_loss_history = checkpoint['train_loss_history']
train_acc_history = checkpoint['train_acc_history']
count = 0
running_loss = 0
net.zero_grad()
for epoch in range(EPOCHS):
# If this epoch has been already passed
if (train_loss_history[epoch] > 0.1):
print("###############################################")
print("Epoch: " + str(epoch))
print("Train Loss: ", round(train_loss_history[epoch], 2))
if (loss_history[epoch] > 0.1):
print("Train Accuracy: {}%".format(round(train_acc_history[epoch], 2)))
print('Valid Loss: {}'.format(round(loss_history[epoch],2)))
print('Valid Accuracy: {}%'.format(acc_history[epoch]))
continue
# otherwise train
start = time.time()
net.train()
# Train on training dataset
for steps, (x_batch, y_batch) in enumerate(train_data_loader):
x_batch = x_batch.cuda()
y_batch = y_batch.cuda()
optimizer.zero_grad()
predicted = net(x_batch)
loss = criterion(predicted, y_batch)
loss.backward()
optimizer.step()
running_loss = loss.item()
end = time.time()
train_loss_history[epoch] = loss
print("###############################################")
print("Epoch: " + str(epoch) + ", Time: " + str(round(end-start, 2)) + "s")
print("Train loss: " + str(round(running_loss,2)))
# Validate the model on every five steps
if (epoch % 5 != 0):
continue
with torch.no_grad():
# compute train acc
correct = 0
total = 0
for images, labels in train_data_loader:
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
train_acc_history[epoch] = 100 * correct / total
print('Train Accuracy: {}%'.format(100 * correct / total))
net.eval()
# compute val acc and loss
correct = 0
total = 0
for images, labels in val_data_loader:
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
print('Valid Loss: {}'.format(round(loss.item(), 2)))
print('Valid Accuracy: {}%'.format(100 * correct / total))
acc_history[epoch] = 100 * correct / total
loss_history[epoch] = loss
plt.figure(figsize=(14, 7))
plt.title("Training and validation loss", size=16)
plt.xlabel("Epoch", size=16)
plt.ylabel("Loss", size=16)
plt.plot(train_loss_history, 'b', label="training loss")
plt.plot(range(0, EPOCHS, 5), loss_history[0::5], 'g', label="validation loss")
plt.legend()
plt.grid()
plt.show()
plt.figure(figsize=(14, 7))
plt.title("Training and validation accuracy", size=16)
plt.xlabel("Epoch", size=16)
plt.ylabel("Accuracy", size=16)
plt.plot(range(0, EPOCHS, 5), train_acc_history[0::5], 'b', label="training accuracy")
plt.plot(range(0, EPOCHS, 5), acc_history[0::5], 'g', label="validation accuracy")
plt.legend()
plt.grid()
plt.show()
save_checkpoint({
'epoch': epoch,
'state_dict': net.state_dict(),
'best_acc': acc_history[epoch-1],
'best_loss': loss_history[epoch-1],
'optimizer': optimizer.state_dict(),
'acc_history' : acc_history,
'loss_history' : loss_history,
'train_loss_history' : train_loss_history,
'train_acc_history' : train_acc_history
}, './weights/model_best_lr01_sgd_rand.pth.tar')
net.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in val_data_loader:
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
val_accuracy = correct / total
print("Validation accuracy: %.2f%%" % (val_accuracy * 100))
TEST_DATA_PATH = ""
test_data = torchvision.datasets.ImageFolder(root=TEST_DATA_PATH,
transform=TRANSFORM_IMG_VAL)
test_data_loader = data.DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
net.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_data_loader:
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
test_accuracy = correct / total
print("Test accuracy: %.2f%%" % (test_accuracy * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic math
Step2: Lists
Step3: Dictionaries
Step4: Commenting your code
Step6: Type coercion
|
<ASSISTANT_TASK:>
Python Code:
# variable assignment
# https://www.digitalocean.com/community/tutorials/how-to-use-variables-in-python-3
# strings -- enclose in single or double quotes, just make sure they match
# numbers
# the print function
# booleans
# addition
# subtraction
# multiplication
# division
# etc.
# create a list: name, hometown, age
# an item's position in the list is the key thing
# create another list of mixed data
# use len() to get the number of items in the list
# use square brackets [] to access items in a list
# (counting starts at zero in Python)
# get the first item
# you can do negative indexing to get items from the end of your list
# get the last item
# Use colons to get a range of items in a list
# get the first two items
# the last number in a list slice is the first list item that's ~not~ included in the result
# if you leave the last number off, it takes the item at the first number's index and everything afterward
# get everything from the third item onward
# Use append() to add things to a list
# Use pop() to remove items from the end of a list
# use join() to join items from a list into a string with a delimiter of your choosing
# Access items in a dictionary using square brackets and the key (typically a string)
# You can also use the `get()` method to retrieve values
# you can optionally provide a second argument as the default value
# if the key doesn't exist (otherwise defaults to `None`)
# Use the .keys() method to get the keys of a dictionary
# Use the .values() method to get the values
# add items to a dictionary using square brackets, the name of the key (typically a string)
# and set the value like you'd set a variable, with =
# delete an item from a dictionary with `del`
# date in m/d/yyyy format
# split out individual pieces of the date
# using a shortcut method to assign variables to the resulting list
# reshuffle as yyyy-mm-dd using .format()
# use a formatting option (:0>2) to left-pad month/day numbers with a zero
# construct a greeting template
# two strings of numbers
# what happens when you add them without coercing?
# coerce to integer, then add them
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CGRtools has subpackage containers with data structures classes
Step2: 1.1. MoleculeContainer
Step3: Each structure has additional atoms attributes
Step4: Atom objects are dataclasses which store information about
Step5: Atomic attributes are assignable.
Step6: Bonds are Read-only
Step7: Method delete_atom removes atom from the molecule
Step8: Atoms and bonds objects can be converted into integer representation that could be used to classify their types.
Step9: Connected components.
Step10: Union of molecules
Step11: Substructures could be extracted from molecules.
Step12: augmented_substructure is a substructure consisting from atoms and a given number of shells of neighboring atoms around it.
Step13: Atoms Ordering.
Step14: Atom number can be changed by remap method.
Step15: 1.2. ReactionContainer
Step16: Reactions also has standardize, kekule, thiele, implicify_hydrogens, explicify_hydrogens, etc methods (see part 3). These methods are applied independently to every molecule in reaction.
Step17: For CGRContainer attributes charge, is_radical, neighbors and hybridization refer to atom state in reactant of reaction; arguments p_charge, p_is_radical, p_neighbors and p_hybridization could be used to extract atom state in product part in reaction.
Step18: Bonds has order and p_order attribute
Step19: CGR can be decomposed back to reaction, i.e. reactants and products.
Step20: For decomposition of CGRContainer back into ReactionContainer ReactionContainer.from_cgr constructor method can be used.
Step21: You can see that water absent in products initially was restored.
Step22: 1.4 Queries
Step23: CGRs also can be transformed into Query.
Step24: 1.5. Molecules, CGRs, Reactions construction
Step25: Reactions can be constructed from molecules.
Step26: QueryContainers can be constructed in the same way as MoleculeContainers.
Step27: 1.6. Extending CGRtools
|
<ASSISTANT_TASK:>
Python Code:
import pkg_resources
if pkg_resources.get_distribution('CGRtools').version.split('.')[:2] != ['4', '0']:
print('WARNING. Tutorial was tested on 4.0 version of CGRtools')
else:
print('Welcome!')
# load data for tutorial
from pickle import load
from traceback import format_exc
with open('molecules.dat', 'rb') as f:
molecules = load(f) # list of MoleculeContainer objects
with open('reactions.dat', 'rb') as f:
reactions = load(f) # list of ReactionContainer objects
m1, m2, m3, m4 = molecules # molecule
m7 = m3.copy()
m11 = m3.copy()
m11.standardize()
m7.standardize()
r1 = reactions[0] # reaction
m5 = r1.reactants[0]
m8 = m7.substructure([4, 5, 6, 7, 8, 9])
m10 = r1.products[0].copy()
from CGRtools.containers import * # import all containers
m1.meta # dictionary for molecule properties storage. For example, DTYPE/DATUM fields of SDF file are read into this dictionary
m1 # MoleculeContainer supports depiction and graphic representation in Jupyter notebooks.
m1.depict() # depiction returns SVG image in format string
with open('molecule.svg', 'w') as f: # saving image to SVG file
f.write(m1.depict())
m_copy = m1.copy() # copy of molecule
m_copy
len(m1) # get number of atoms in molecule
# or
m1.atoms_count
m1.bonds_count # number of bonds
m1.atoms_numbers # list of atoms numbers
# iterate over atoms using its numbers
list(m1.atoms()) # works the same as dict.items()
# iterate over bonds using adjacent atoms numbers
list(m1.bonds())
# access to atom by number
m1.atom(1)
try:
m1.atom(10) # raise error for absent atom numbers
except KeyError:
print(format_exc())
# access to bond using adjacent atoms numbers
m1.bond(1, 4)
try:
m1.bond(1, 3) # raise error for absent bond
except KeyError:
print(format_exc())
a = m1.atom(1)
# access to information
a.atomic_symbol # element symbol
a.charge # formal charge
a.is_radical # atom radical state
a.isotope # atom isotope. Default isotope if not set. Default isotopes are the same as used in InChI notation
a.x # coordinates
a.y
#or
a.xy
a.neighbors # Number of neighboring atoms. It is read-only.
a.hybridization # Atoms hybridization. It is read-only.
try:
a.hybridization = 2 # Not assignable. Read-only! Thus error is raised.
except AttributeError:
print(format_exc())
a.charge = 1
m1
a.charge = 0
a.is_radical = True
m1
# bond objects also are data-like classes which store information about bond order
b = m1.bond(3, 4)
b.order
try:
b.order = 1 # order change not possible
except AttributeError:
print(format_exc())
m1.delete_bond(3, 4)
m1
m1.delete_atom(3)
m1
m_copy # copy unchanged!
int(a)
# 61705 == 000001111 0001000 0100 1
# 000001111 == 15 isotope
# 0001000 == 8 Oxygen
# 0100 == 4 (4 - 4 = 0) uncharged
# 1 == 1 is radical
int(b) # bonds are encoded by their order
a = m_copy.atom(1)
print(a.implicit_hydrogens) # get number of implicit hydrogens on atom 1
print(a.explicit_hydrogens) # get number of explicit hydrogens on atom 1
print(a.total_hydrogens) # get total number of hydrogens on atom 1
m1
m1.check_valence() # return list of numbers of atoms with invalid valences
m4 # molecule with valence errors
m4.check_valence()
m3
m3.sssr # Method for application of Smallest Set of Smallest Rings algorithm for rings
# identification. Returns tuple of tuples of atoms forming smallest rings
m2 # it's a salt represented as one graph
m2.connected_components # tuple of tuples of atoms belonging to graph components
anion, cation = m2.split() # split molecule to components
anion # graph of only one salt component
cation # graph of only one salt component
salt = anion | cation
# or
salt = anion.union(cation)
salt # this graph has disconnected components, it is considered single compound now
sub = m3.substructure([4,5,6,7,8,9]) # substructure with passed atoms
sub
aug = m3.augmented_substructure([10], deep=2) # atom 10 is Nitrogen
aug
m5.atoms_order
m5
remapped = m5.remap({4:2}, copy=True)
remapped
r1 # depiction supported
r1.meta
print(r1.reactants, r1.products) # Access to lists of reactant and products.
reactant1, reactant2, reactant3 = r1.reactants
product = r1.products[0]
cgr1 = m7 ^ m8 # CGR from molecules
# or
cgr1 = m7.compose(m8)
print(cgr1)
cgr1
r1
cgr2 = ~r1 # CGR from reactions
# or
cgr2 = r1.compose()
print(cgr2) # signature is printed out.
cgr2.clean2d()
cgr2
a = cgr2.atom(2) # atom access is the same as for MoleculeContainer
a.atomic_symbol # element attribute
a.isotope # isotope attribute
a.charge # charge of atom in reactant
a.p_charge # charge of atom in product
a.p_is_radical # radical state of atom in product.
a.neighbors # number of neighbors of atom in reactant
a.p_neighbors # number of neighbors of atom in product
a.hybridization # hybridization of atom in reactant. 1 means only single bonds are incident to atom
a.p_hybridization # hybridization of atom in product. 1 means only single bonds are incident to atom
b = cgr1.bond(4, 10) # take bond
b.order # bond order in reactant
b.p_order is None # bond order in product in None
reactant_part, product_part = ~cgr1 # CGR of unbalanced reaction is decomposed back into reaction
# or
reactant_part, product_part = cgr1.decompose()
reactant_part # reactants extracted. One can notice it is initial molecule
product_part #extracted products. Originally benzene was the product.
decomposed = ReactionContainer.from_cgr(cgr2)
decomposed.clean2d()
decomposed
r1 # compare with initial reaction
from CGRtools.containers import*
m10 # ether
carb = m10.substructure([5,7,2], as_query=True) # extract of carboxyl fragment
print(carb)
carb
cgr_q = cgr1.substructure(cgr1, as_query=True) # transfrom CGRContainer into QueryCGRContainer
#or
cgr_q = QueryCGRContainer() | cgr1 # Union of Query container with CGR or Molecule gives QueryCGRContainer
print(cgr_q) # print out signature of query
cgr_q
from CGRtools.containers import MoleculeContainer
from CGRtools.containers.bonds import Bond
from CGRtools.periodictable import Na
m = MoleculeContainer() # new empty molecule
m.add_atom('C') # add Carbon atom using element symbol
m.add_atom(6) # add Carbon atom using element number. {'element': 6} is not valid, but {'element': 'O'} is also acceptable
m.add_atom('O', charge=-1) # add negatively charged Oxygen atom. Similarly other atomic properties can be set
# add_atom has second argument for setting atom number.
# If not set, the next integer after the biggest among already created will be used.
m.add_atom(Na(23), 4, charge=1) # For isotopes required element object construction.
m.add_bond(1, 2, 1) # add bond with order = 1 between atoms 1 and 2
m.add_bond(3, 2, Bond(1)) # the other possibility to set bond order
m.clean2d() #experimental function to calculate atom coordinates. Has number of flaws yet
m
r = ReactionContainer(reactants=[m1], products=[m11]) # one-step way to construct reaction
# or
r = ReactionContainer([m1], [m11]) # first list of MoleculeContainers is interpreted as reactants, second one - as products
r
r.fix_positions() # this method fixes coordinates of molecules in reaction without calculation of atoms coordinates.
r
q = QueryContainer() # creation of empty container
q.add_atom('N') # add N atom, any isotope, not radical, neutral charge,
# number of neighbors and hybridization are irrelevant
q.add_atom('C', neighbors=[2, 3], hybridization=2) # add carbon atom, any isotope, not radical, neutral charge,
# has 2 or 3 explicit neighbors and sp2 hybridization
q.add_atom('O', neighbors=1)
q.add_bond(1, 2, 1) # add single bond between atom 1 and 2
q.add_bond(2, 3, 2) # add double bond between atom 1 and 2
# any amide group will fit this query
print(q) # print out signature (SMILES-like)
q.clean2d()
q
from CGRtools.periodictable import Core, C, O
class Marked(Core):
__slots__ = '__mark' # all new attributes should be slotted!
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__mark = None # set default value for added attribute
@property
def mark(self): # created new property
return self.__mark
@mark.setter
def mark(self, mark):
# do some checks and calculations
self.__mark = mark
def __repr__(self):
if self.__isotope:
return f'{self.__class__.__name__[6:]}({self.__isotope})'
return f'{self.__class__.__name__[6:]}()'
@property
def atomic_symbol(self) -> str:
return self.__class__.__name__[6:]
class MarkedC(Marked, C):
pass
class MarkedO(Marked, O):
pass
m = MoleculeContainer() # create newly developed container MarkedMoleculeContainer
m.add_atom(MarkedC()) # add custom atom C
m.add_atom(MarkedO()) # add custom atom O
m.add_bond(1, 2, 1)
m.atom(2).mark = 1 # set mark on atom.
print(m)
m.clean2d()
m
m.atom(2).mark # one can return mark
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the Data
Step2: A Random Forest Model
Step3: Output
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
act_train = pd.read_csv('act_train.csv')
act_test = pd.read_csv('act_test.csv')
people = pd.read_csv('people.csv')
def prepare_acts(data, train_set=True):
data = data.drop(['date', 'activity_id'], axis=1)
if train_set:
data = data.drop(['outcome'], axis=1)
data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1])
data['people_id'] = pd.to_numeric(data['people_id']).astype(int)
columns = list(data.columns)
for col in columns[1:]:
data[col] = data[col].fillna('type 0')
data[col] = data[col].apply(lambda x: x.split(' ')[1])
data[col] = pd.to_numeric(data[col]).astype(int)
return data
def prepare_people(data):
data = data.drop(['date'], axis=1)
data['people_id'] = data['people_id'].apply(lambda x: x.split('_')[1])
data['people_id'] = pd.to_numeric(data['people_id']).astype(int)
columns = list(data.columns)
bools = columns[11:]
strings = columns[1:11]
for col in bools:
data[col] = pd.to_numeric(data[col]).astype(int)
for col in strings:
data[col] = data[col].fillna('type 0')
data[col] = data[col].apply(lambda x: x.split(' ')[1])
data[col] = pd.to_numeric(data[col]).astype(int)
return data
people_prepared = prepare_people(people)
actions_train = prepare_acts(act_train)
actions_test = prepare_acts(act_test, train_set=False)
features = actions_train.merge(people_prepared, how='left', on='people_id')
labels = act_train['outcome']
test = actions_test.merge(people_prepared, how='left', on='people_id')
from sklearn.ensemble import RandomForestClassifier
rfclassifier = RandomForestClassifier()
rfclassifier.fit(features, labels)
test_proba = rfclassifier.predict_proba(test)
test_preds = test_proba[:,1]
test_ids = act_test['activity_id']
output = pd.DataFrame({'activity_id': test_ids, 'outcome': test_preds})
output.to_csv('redhat.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: View Timezones
Step2: Create Timestamp With Time Zone
Step3: Create Timestamp Without Time Zone
Step4: Add Time Zone
Step5: Convert Time Zone
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
import pandas as pd
from pytz import all_timezones
# Show ten time zones
all_timezones[0:10]
# Create datetime
pd.Timestamp('2017-05-01 06:00:00', tz='Europe/London')
# Create datetime
date = pd.Timestamp('2017-05-01 06:00:00')
# Set time zone
date_in_london = date.tz_localize('Europe/London')
# Change time zone
date_in_london.tz_convert('Africa/Abidjan')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using pyplot's imshow to display the image
Step2: Displaying the image inside a bqplot Figure
Step3: Mixing with other marks
Step4: Its traits (attributes) will also respond dynamically to a change from the backend
|
<ASSISTANT_TASK:>
Python Code:
import os
import ipywidgets as widgets
import bqplot.pyplot as plt
from bqplot import LinearScale
image_path = os.path.abspath("../../data_files/trees.jpg")
with open(image_path, "rb") as f:
raw_image = f.read()
ipyimage = widgets.Image(value=raw_image, format="jpg")
ipyimage
plt.figure(padding_y=0)
axes_options = {"x": {"visible": False}, "y": {"visible": False}}
plt.imshow(image_path, "filename")
plt.show()
fig = plt.figure(title="Trees", padding_x=0, padding_y=0)
image = plt.imshow(ipyimage, "widget")
fig
fig = plt.figure(padding_x=0, padding_y=0)
plt.scales(scales={"x": LinearScale(min=-1, max=2), "y": LinearScale(min=-0.5, max=2)})
image = plt.imshow(ipyimage, format="widget")
plt.plot([0, 1, 1, 0, 0], [0, 0, 1, 1, 0], "r")
fig
# Full screen
image.x = [-1, 2]
image.y = [-0.5, 2]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notes from Klein et al 2011 (doi
|
<ASSISTANT_TASK:>
Python Code:
from numpy import pi
from scipy.constants import hbar
# Find the power for a 3 mm diameter gaussian beam with stated intensity:
r = 0.3/2 # units of cm
A = pi*r**2
P = 2.5e-3 * A
P*1e6 # microWatts
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This FASTA file shown above has just one sequence in it. As we saw in the first example above, it's also possible for one FASTA file to contain multiple sequences. These are sometimes called multi-FASTA files. When you write code to interpret FASTA files, it's a good idea to always allow for the possibility that the FASTA file might contain multiple sequences.
Step2: The first part accumulates a list of strings (one per line) for each sequence. The second part joins those lines together so that we end up with one long string per sequence. Why divide it up this way? Mainly to avoid the poor performance of repeatedly concatenating (immutable) Python strings.
Step3: Note that only the short names survive. This is usually fine, but it's not hard to modify the function so that information relating short names to long names is also retained.
Step4: Accessing a substring in this way is very fast and simple. The downside is that you've stored all of the sequences in memory. If the FASTA files are really big, this takes lots of valuable memory. This may or may not be a good trade.
Step5: Here we use it to index a small multi-FASTA file. We print out the index at the end.
Step8: What do the fields in those two records mean? Take the first record
Step9: Here's an example of how to use the class defined above.
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import urllib.request
url = 'ftp://ftp.ncbi.nlm.nih.gov/genomes/archive/old_genbank/Eukaryotes/vertebrates_mammals/Homo_sapiens/GRCh38/non-nuclear/assembled_chromosomes/FASTA/chrMT.fa.gz'
response = urllib.request.urlopen(url)
print(gzip.decompress(response.read()).decode('UTF8'))
def parse_fasta(fh):
fa = {}
current_short_name = None
# Part 1: compile list of lines per sequence
for ln in fh:
if ln[0] == '>':
# new name line; remember current sequence's short name
long_name = ln[1:].rstrip()
current_short_name = long_name.split()[0]
fa[current_short_name] = []
else:
# append nucleotides to current sequence
fa[current_short_name].append(ln.rstrip())
# Part 2: join lists into strings
for short_name, nuc_list in fa.items():
# join this sequence's lines into one long string
fa[short_name] = ''.join(nuc_list)
return fa
from io import StringIO
fasta_example = StringIO(
'''>sequence1_short_name with optional additional info after whitespace
ACATCACCCCATAAACAAATAGGTTTGGTCCTAGCCTTTCTATTAGCTCTTAGTAAGATTACACATGCAA
GCATCCCCGTTCCAGTGAGTTCACCCTCTAAATCACCACGATCAAAAGGAACAAGCATCAAGCACGCAGC
AATGCAGCTCAAAACGCTTAGCCTAGCCACACCCCCACGGGAAACAGCAGTGAT
>sequence2_short_name with optional additional info after whitespace
GCCCCAAACCCACTCCACCTTACTACCAGACAACCTTAGCCAAACCATTTACCCAAATAAAGTATAGGCG
ATAGAAATTGAAACCTGGCGCAATAGATATAGTACCGCAAGGGAAAGATGAAAAATTATAACCAAGCATA
ATATAG''')
parsed_fa = parse_fasta(fasta_example)
parsed_fa
parsed_fa['sequence2_short_name'][100:130]
def index_fasta(fh):
index = []
current_short_name = None
current_byte_offset, running_seq_length, running_byte_offset = 0, 0, 0
line_length_including_ws, line_length_excluding_ws = 0, 0
for ln in fh:
ln_stripped = ln.rstrip()
running_byte_offset += len(ln)
if ln[0] == '>':
if current_short_name is not None:
index.append((current_short_name, running_seq_length,
current_byte_offset, line_length_excluding_ws,
line_length_including_ws))
long_name = ln_stripped[1:]
current_short_name = long_name.split()[0]
current_byte_offset = running_byte_offset
running_seq_length = 0
else:
line_length_including_ws = max(line_length_including_ws, len(ln))
line_length_excluding_ws = max(line_length_excluding_ws, len(ln_stripped))
running_seq_length += len(ln_stripped)
if current_short_name is not None:
index.append((current_short_name, running_seq_length,
current_byte_offset, line_length_excluding_ws,
line_length_including_ws))
return index
fasta_example = StringIO(
'''>sequence1_short_name with optional additional info after whitespace
ACATCACCCCATAAACAAATAGGTTTGGTCCTAGCCTTTCTATTAGCTCTTAGTAAGATTACACATGCAA
GCATCCCCGTTCCAGTGAGTTCACCCTCTAAATCACCACGATCAAAAGGAACAAGCATCAAGCACGCAGC
AATGCAGCTCAAAACGCTTAGCCTAGCCACACCCCCACGGGAAACAGCAGTGAT
>sequence2_short_name with optional additional info after whitespace
GCCCCAAACCCACTCCACCTTACTACCAGACAACCTTAGCCAAACCATTTACCCAAATAAAGTATAGGCG
ATAGAAATTGAAACCTGGCGCAATAGATATAGTACCGCAAGGGAAAGATGAAAAATTATAACCAAGCATA
ATATAG''')
idx = index_fasta(fasta_example)
idx
import re
class FastaOOB(Exception):
Out-of-bounds exception for FASTA sequences
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class FastaIndexed(object):
Encapsulates a set of indexed FASTA files. Does not load the FASTA
files into memory but still allows the user to extract arbitrary
substrings, with the help of the index.
__removeWs = re.compile(r'\s+')
def __init__(self, fafns):
self.fafhs = {}
self.faidxs = {}
self.chr2fh = {}
self.offset = {}
self.lens = {}
self.charsPerLine = {}
self.bytesPerLine = {}
for fafn in fafns:
# Open FASTA file
self.fafhs[fafn] = fh = open(fafn, 'r')
# Parse corresponding .fai file
with open(fafn + '.fai') as idxfh:
for ln in idxfh:
toks = ln.rstrip().split()
if len(toks) == 0:
continue
assert len(toks) == 5
# Parse and save the index line
chr, ln, offset, charsPerLine, bytesPerLine = toks
self.chr2fh[chr] = fh
self.offset[chr] = int(offset) # 0-based
self.lens[chr] = int(ln)
self.charsPerLine[chr] = int(charsPerLine)
self.bytesPerLine[chr] = int(bytesPerLine)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close all the open FASTA files
for fafh in self.fafhs.values():
fafh.close()
def has_name(self, refid):
return refid in self.offset
def name_iter(self):
return self.offset.iterkeys()
def length_of_ref(self, refid):
return self.lens[refid]
def get(self, refid, start, ln):
''' Return the specified substring of the reference. '''
assert refid in self.offset
if start + ln > self.lens[refid]:
raise ReferenceOOB('"%s" has length %d; tried to get [%d, %d)' % (refid, self.lens[refid], start, start + ln))
fh, offset, charsPerLine, bytesPerLine = \
self.chr2fh[refid], self.offset[refid], \
self.charsPerLine[refid], self.bytesPerLine[refid]
byteOff = offset
byteOff += (start // charsPerLine) * bytesPerLine
into = start % charsPerLine
byteOff += into
fh.seek(byteOff)
left = charsPerLine - into
# Count the number of line breaks interrupting the rest of the
# string we're trying to read
if ln < left:
return fh.read(ln)
else:
nbreaks = 1 + (ln - left) // charsPerLine
res = fh.read(ln + nbreaks * (bytesPerLine - charsPerLine))
res = re.sub(self.__removeWs, '', res)
return res
# first we'll write a new FASTA file
with open('tmp.fa', 'w') as fh:
fh.write('''>sequence1_short_name with optional additional info after whitespace
ACATCACCCCATAAACAAATAGGTTTGGTCCTAGCCTTTCTATTAGCTCTTAGTAAGATTACACATGCAA
GCATCCCCGTTCCAGTGAGTTCACCCTCTAAATCACCACGATCAAAAGGAACAAGCATCAAGCACGCAGC
AATGCAGCTCAAAACGCTTAGCCTAGCCACACCCCCACGGGAAACAGCAGTGAT
>sequence2_short_name with optional additional info after whitespace
GCCCCAAACCCACTCCACCTTACTACCAGACAACCTTAGCCAAACCATTTACCCAAATAAAGTATAGGCG
ATAGAAATTGAAACCTGGCGCAATAGATATAGTACCGCAAGGGAAAGATGAAAAATTATAACCAAGCATA
ATATAG''')
with open('tmp.fa') as fh:
idx = index_fasta(fh)
with open('tmp.fa.fai', 'w') as fh:
fh.write('\n'.join(['\t'.join(map(str, x)) for x in idx]))
with FastaIndexed(['tmp.fa']) as fa_idx:
print(fa_idx.get('sequence2_short_name', 100, 30))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup
Step3: Data simulation
Step4: Let's simulate two timeseries and plot some basic information about them.
Step5: Now we put the signals at two locations on the cortex. We construct a
Step6: Before we simulate the sensor-level data, let's define a signal-to-noise
Step7: Now we run the signal through the forward model to obtain simulated sensor
Step8: We create an
Step9: Power mapping
Step10: We will now compute the cortical power map at 10 Hz. using a DICS beamformer.
Step12: Plot the DICS power maps for both approaches, starting with the first
Step13: Now the second
|
<ASSISTANT_TASK:>
Python Code:
# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
from scipy.signal import welch, coherence, unit_impulse
from matplotlib import pyplot as plt
import mne
from mne.simulation import simulate_raw, add_noise
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.time_frequency import csd_morlet
from mne.beamformer import make_dics, apply_dics_csd
# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
# Filenames for various files we'll be using
meg_path = op.join(data_path, 'MEG', 'sample')
raw_fname = op.join(meg_path, 'sample_audvis_raw.fif')
fwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')
cov_fname = op.join(meg_path, 'sample_audvis-cov.fif')
fwd = mne.read_forward_solution(fwd_fname)
# Seed for the random number generator
rand = np.random.RandomState(42)
sfreq = 50. # Sampling frequency of the generated signal
n_samp = int(round(10. * sfreq))
times = np.arange(n_samp) / sfreq # 10 seconds of signal
n_times = len(times)
def coh_signal_gen():
Generate an oscillating signal.
Returns
-------
signal : ndarray
The generated signal.
t_rand = 0.001 # Variation in the instantaneous frequency of the signal
std = 0.1 # Std-dev of the random fluctuations added to the signal
base_freq = 10. # Base frequency of the oscillators in Hertz
n_times = len(times)
# Generate an oscillator with varying frequency and phase lag.
signal = np.sin(2.0 * np.pi *
(base_freq * np.arange(n_times) / sfreq +
np.cumsum(t_rand * rand.randn(n_times))))
# Add some random fluctuations to the signal.
signal += std * rand.randn(n_times)
# Scale the signal to be in the right order of magnitude (~100 nAm)
# for MEG data.
signal *= 100e-9
return signal
signal1 = coh_signal_gen()
signal2 = coh_signal_gen()
fig, axes = plt.subplots(2, 2, figsize=(8, 4))
# Plot the timeseries
ax = axes[0][0]
ax.plot(times, 1e9 * signal1, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',
title='Signal 1')
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')
# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],
ylabel='Power (dB)', title='Power spectrum of signal 1')
# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.)
ax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',
title='Coherence between the timeseries')
fig.tight_layout()
# The locations on the cortex where the signal will originate from. These
# locations are indicated as vertex numbers.
vertices = [[146374], [33830]]
# Construct SourceEstimates that describe the signals at the cortical level.
data = np.vstack((signal1, signal2))
stc_signal = mne.SourceEstimate(
data, vertices, tmin=0, tstep=1. / sfreq, subject='sample')
stc_noise = stc_signal * 0.
snr = 1. # Signal-to-noise ratio. Decrease to add more noise.
# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_raw(raw_fname).crop(0, 1).resample(50).info
# Only use gradiometers
picks = mne.pick_types(info, meg='grad', stim=True, exclude=())
mne.pick_info(info, picks, copy=False)
# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR
# Simulate the raw data, with a lowpass filter on the noise
stcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),
(stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time
duration = (len(stc_signal.times) * 2) / sfreq
raw = simulate_raw(info, stcs, forward=fwd)
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)
events = mne.find_events(raw, initial_event=True)
tmax = (len(stc_signal.times) - 1) / sfreq
epochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2),
tmin=0, tmax=tmax, baseline=None, preload=True)
assert len(epochs) == 2 # ensure that we got the two expected events
# Plot some of the channels of the simulated data that are situated above one
# of our simulated sources.
picks = mne.pick_channels(epochs.ch_names,
mne.read_vectorview_selection('Left-frontal'))
epochs.plot(picks=picks)
# Compute the inverse operator
fwd = mne.read_forward_solution(fwd_fname)
inv = make_inverse_operator(epochs.info, fwd, cov)
# Apply the inverse model to the trial that also contains the signal.
s = apply_inverse(epochs['signal'].average(), inv)
# Take the root-mean square along the time dimension and plot the result.
s_rms = np.sqrt((s ** 2).mean())
title = 'MNE-dSPM inverse (RMS)'
brain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1,
size=600, time_label=title, title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')
# Rotate the view and add a title.
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))
# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs['signal'], frequencies=[10])
# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(
info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=1.,
inversion='single', weight_norm=None, real_filter=True)
print(filters_approach1)
filters_approach2 = make_dics(
info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=None,
inversion='matrix', weight_norm='unit-noise-gain', real_filter=True)
print(filters_approach2)
# You can save these to disk with:
# filters_approach1.save('filters_1-dics.h5')
# Compute the DICS power map by applying the spatial filters to the CSD matrix.
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1)
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2)
def plot_approach(power, n):
Plot the results on a brain.
title = 'DICS power map, approach %d' % n
brain = power_approach1.plot(
'sample', subjects_dir=subjects_dir, hemi='both',
size=600, time_label=title, title=title)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh', color='b')
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh', color='b')
# Rotate the view and add a title.
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))
return brain
brain1 = plot_approach(power_approach1, 1)
brain2 = plot_approach(power_approach2, 2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create $n \times n$ regular grid of coordinates to interpolate onto.
Step2: The plot_surface() function constructs MBA class with the given initial grid size, interpolates the input data over regular surface, and plots the results
Step3: The smaller the initial grid size, the smoother the interpolated surface.
Step4: Report some timings and statistics about the constructed hierarchy
Step5: Specifing the initial approximation
|
<ASSISTANT_TASK:>
Python Code:
cmin = [0.0, 0.0]
cmax = [1.0, 1.0]
coo = uniform(0, 1, (7,2))
val = uniform(0, 1, coo.shape[0])
n = 100
s = linspace(0,1,n)
x = array(meshgrid(s,s)).transpose([1,2,0]).copy()
def plot_surface(m0):
interp = mba2(cmin, cmax, [m0,m0], coo, val)
error = amax(abs(val - interp(coo))) / amax(abs(val))
v = interp(x)
pcolormesh(s, s, v, cmap='RdBu')
scatter(x=coo[:,0], y=coo[:,1], c=val, cmap='RdBu')
xlim([0,1])
ylim([0,1])
title("$m_0 = {0:}$, error = {1:.3e}".format(m0, error))
colorbar();
figure(figsize=(11,5))
subplot(121); plot_surface(2)
subplot(122); plot_surface(10)
tight_layout()
%%timeit
interp = mba2(cmin, cmax, [3,3], coo, val)
%%timeit interp = mba2(cmin, cmax, [3,3], coo, val)
v = interp(x)
interp = mba2(cmin, cmax, [3,3], coo, val)
print(interp)
def test_initial(x0, y0, init, desc):
interp = mba1([0], [1], [8], x0, y0, init)
x = linspace(0, 1, 100).reshape(100,1)
y = interp(x)
plot(x, y, 'k-')
plot(x, [init(x) for x in x], 'k:')
plot(x0, y0, 'ro')
ylim([0,1])
title(desc)
x = [[0.3], [0.5], [0.7]]
v = [0.45, 0.55, 0.5, ]
figure(figsize=(12, 3))
subplot(131); test_initial(x, v, lambda x: 0.5, 'y = 0.5')
subplot(132); test_initial(x, v, lambda x: x[0], 'y = x')
subplot(133); test_initial(x, v, lambda x: 1-x[0], 'y = 1-x')
tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def myXOR(x , y ) :
for i in range(31 , - 1 , - 1 ) :
b1 = x &(1 << i )
b2 = y &(1 << i )
b1 = min(b1 , 1 )
b2 = min(b2 , 1 )
xoredBit = 0
if(b1 & b2 ) :
xoredBit = 0
else :
xoredBit =(b1 b2 )
res <<= 1 ;
res |= xoredBit
return res
x = 3
y = 5
print("XOR ▁ is ", myXOR(x , y ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: M-layer experiments
Step2: Generate a spiral and show extrapolation
Step3: Train an M-layer on multivariate polynomials such as the determinant
Step4: Permanents
Step5: Determinants
Step6: Train an M-layer on periodic data
Step7: Train an M-layer on CIFAR-10
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
if os.path.isfile('m_layer.py'):
from m_layer import MLayer
else:
!if ! type "svn" > /dev/null; then sudo apt-get install subversion; fi
!svn export https://github.com/google-research/google-research/trunk/m_layer
from m_layer.m_layer import MLayer
GLOBAL_SEED = 1
import numpy as np
np.random.seed(GLOBAL_SEED)
import itertools
import functools
import operator
import logging
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
from matplotlib import pylab
print(tf.__version__)
print(tf.config.experimental.list_physical_devices('GPU'))
SPIRAL_DIM_REP = 10
SPIRAL_DIM_MATRIX = 10
SPIRAL_LAYER_SIZE = 20
SPIRAL_LR = 0.01
SPIRAL_EPOCHS = 1000
SPIRAL_BATCH_SIZE = 16
def spiral_m_layer_model():
return tf.keras.models.Sequential(
[tf.keras.layers.Dense(SPIRAL_DIM_REP,
input_shape=(2,)),
MLayer(dim_m=SPIRAL_DIM_MATRIX,
with_bias=True,
matrix_squarings_exp=None,
matrix_init='normal'),
tf.keras.layers.ActivityRegularization(l2=1e-3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation='sigmoid')]
)
def spiral_dnn_model(activation_type):
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(2,)),
tf.keras.layers.Dense(SPIRAL_LAYER_SIZE,
activation=activation_type),
tf.keras.layers.Dense(SPIRAL_LAYER_SIZE,
activation=activation_type),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
def spiral_generate(n_points, noise=0.5, rng=None, extra_rotation=False):
if rng is None:
rng = np.random.RandomState()
if not extra_rotation:
n = np.sqrt(0.001 + (.25)*rng.rand(n_points, 1)) * 6 * (2 * np.pi)
else:
n = np.sqrt((7.0/36)*rng.rand(n_points, 1)+.25) * 6 * (2 * np.pi)
x = 0.5 * (np.sin(n) * n + (2 * rng.rand(n_points, 1) - 1) * noise)
y = 0.5 * (np.cos(n) * n + (2 * rng.rand(n_points, 1) - 1) * noise)
return (np.vstack((np.hstack((x, y)), np.hstack((-x, -y)))),
np.hstack((np.zeros(n_points), np.ones(n_points))))
def spiral_run(model_type, fig=None, activation_type=None, ):
if fig is None:
fig = pylab.figure(figsize=(8,8), dpi=144)
model = spiral_dnn_model(activation_type) if model_type=="dnn" else\
spiral_m_layer_model()
x_train, y_train = spiral_generate(1000)
x_test, y_test = spiral_generate(333, extra_rotation=True)
model.summary()
opt = tf.keras.optimizers.RMSprop(lr=SPIRAL_LR)
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor='loss', factor=0.2, patience=5, min_lr=1e-5)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss',
patience=30,
min_delta=0.0001,
restore_best_weights=True)
result = model.fit(x_train, y_train, epochs=SPIRAL_EPOCHS,
batch_size=SPIRAL_BATCH_SIZE, verbose=2,
callbacks=[reduce_lr, early_stopping])
n_epochs = len(result.history['loss'])
delta = 0.5 ** 3
xs = np.arange(-14, 14.01, delta)
ys = np.arange(-14, 14.01, delta)
num_samples = len(xs)
a = []
for x in xs:
for y in ys:
a.append([x, y])
t_nn_gen = model.predict(np.array(a))
axes = fig.gca()
XX, YY = np.meshgrid(xs, ys)
axes.contourf(XX, YY, np.arcsinh(t_nn_gen.reshape(XX.shape)),
levels=[0.0, 0.5, 1.0],
colors=[(0.41, 0.67, 0.81, 0.2), (0.89, 0.51, 0.41, 0.2)])
axes.contour(XX, YY, np.arcsinh(t_nn_gen.reshape(XX.shape)),
levels=[0.5])
axes.set_aspect(1)
axes.grid()
axes.plot(x_train[y_train==0, 1], x_train[y_train==0, 0], '.', ms = 2,
label='Class 1')
axes.plot(x_train[y_train==1, 1], x_train[y_train==1, 0], '.', ms = 2,
label='Class 2')
plt.plot(x_test[y_test==1, 1], x_test[y_test==1, 0], '.', ms = .5,
label='Class 2')
plt.plot(x_test[y_test==0, 1], x_test[y_test==0, 0], '.', ms = .5,
label='Class 1')
return fig, n_epochs, result.history['loss'][-1]
fig, n_epochs, loss = spiral_run('m_layer')
POLY_BATCH_SIZE = 32
POLY_DIM_MATRIX = 8
POLY_DIM_INPUT_MATRIX = 3
POLY_EPOCHS = 150
POLY_SEED = 123
POLY_LOW = -1
POLY_HIGH = 1
POLY_NUM_SAMPLES = 8192
POLY_LR = 1e-3
POLY_DECAY = 1e-6
def poly_get_model():
return tf.keras.models.Sequential(
[tf.keras.layers.Flatten(input_shape=(POLY_DIM_INPUT_MATRIX,
POLY_DIM_INPUT_MATRIX)),
MLayer(dim_m=POLY_DIM_MATRIX, matrix_init='normal'),
tf.keras.layers.ActivityRegularization(l2=1e-4),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1)]
)
def poly_fun(x, permanent=False):
if permanent:
return sum(
functools.reduce(
operator.mul,
(x[i, pi] for i, pi in enumerate(perm)),
1)
for perm in itertools.permutations(range(x.shape[0])))
return np.linalg.det(x)
def poly_run(permanent=False):
rng = np.random.RandomState(seed=POLY_SEED)
num_train = POLY_NUM_SAMPLES * 5 // 4
x_train = rng.uniform(size=(num_train, POLY_DIM_INPUT_MATRIX,
POLY_DIM_INPUT_MATRIX), low=POLY_LOW,
high=POLY_HIGH)
x_test = rng.uniform(size=(100000, POLY_DIM_INPUT_MATRIX,
POLY_DIM_INPUT_MATRIX), low=POLY_LOW,
high=POLY_HIGH)
y_train = np.array([poly_fun(x, permanent=permanent) for x in x_train])
y_test = np.array([poly_fun(x, permanent=permanent) for x in x_test])
model = poly_get_model()
model.summary()
opt = tf.keras.optimizers.RMSprop(lr=POLY_LR, decay=POLY_DECAY)
model.compile(loss='mse', optimizer=opt)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.2, patience=5, min_lr=1e-5)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=30,
restore_best_weights=True)
model.fit(x_train, y_train, batch_size=POLY_BATCH_SIZE,
epochs=POLY_EPOCHS,
validation_split=0.2,
shuffle=True,
verbose=2,
callbacks=[reduce_lr, early_stopping])
score_train = model.evaluate(x=x_train, y=y_train)
score_test = model.evaluate(x=x_test, y=y_test)
print('Train, range %s - %s: %s' % (POLY_LOW, POLY_HIGH, score_train))
print('Test, range %s - %s: %s' % (POLY_LOW, POLY_HIGH, score_test))
poly_run(permanent=True)
poly_run(permanent=False)
PERIODIC_EPOCHS = 1000
PERIODIC_BATCH_SIZE = 128
PERIODIC_LR = 0.00001
PERIODIC_DIM_MATRIX = 10
PERIODIC_INIT_SCALE = 0.01
PERIODIC_DIAG_INIT = 10
PERIODIC_SEED = 123
def periodic_matrix_init(shape, rng=None, **kwargs):
if rng is None:
rng = np.random.RandomState()
data = np.float32(rng.normal(loc=0, scale=PERIODIC_INIT_SCALE, size=shape))
for i in range(shape[1]):
data[:, i, i] -= PERIODIC_DIAG_INIT
return data
def periodic_get_model(rng=None):
if rng is None:
rng = np.random.RandomState()
return tf.keras.models.Sequential([
tf.keras.layers.Dense(
2, input_shape=(1,),
kernel_initializer=tf.keras.initializers.RandomNormal()),
MLayer(PERIODIC_DIM_MATRIX, with_bias=True, matrix_squarings_exp=None,
matrix_init=lambda shape, **kwargs:
periodic_matrix_init(shape, rng=rng, **kwargs)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1)
])
def periodic_dist2(y_true, y_pred):
return tf.nn.l2_loss(y_true - y_pred)
def periodic_run(get_model):
rng = np.random.RandomState(seed=PERIODIC_SEED)
# See README file for information about this dataset.
with gfile.Open('daily-min-temperatures.csv', 'r') as f:
data = pd.read_csv(f)
dates = data['Date']
y = data['Temp']
temperatures = data['Temp']
y = list(np.convolve(temperatures - np.mean(temperatures), np.full(7, 1 / 7),
mode='valid'))
num_train = 9 * len(y) // 10
num_test = len(y) - num_train
x_all = np.arange(len(y)).tolist()
x_train = x_all[:num_train]
y_train = y[:num_train]
x_test = x_all[num_train:]
y_targets = y[num_train:]
model_to_train = get_model(rng=rng)
input = tf.keras.layers.Input(shape=(1,))
output = model_to_train(input)
model = tf.keras.models.Model(inputs=input, outputs=output)
opt = tf.keras.optimizers.RMSprop(lr=PERIODIC_LR, decay=0)
early_stopping = tf.keras.callbacks.EarlyStopping(restore_best_weights=True)
model.compile(
loss='mean_squared_error', optimizer=opt,
metrics=[periodic_dist2])
history = model.fit(x_train, y_train,
batch_size=PERIODIC_BATCH_SIZE, epochs=PERIODIC_EPOCHS,
shuffle=True, verbose=1, callbacks=[early_stopping])
y_predictions = model.predict(x_all)
plt.plot(x_train, y_train, linewidth=1, alpha=0.7)
plt.plot(x_test, y_targets, linewidth=1, alpha=0.7)
plt.plot(x_all, y_predictions, color='magenta')
plt.legend(['y_train', 'y_targets', 'y_predictions'])
plt.xlim([0, 3650])
plt.ylabel('Temperature (Celsius)')
plt.grid(True, which='major', axis='both')
plt.grid(True, which='minor', axis='both')
xtick_index = [i for i, date in enumerate(dates) if date.endswith('-01-01')]
plt.xticks(ticks=xtick_index,
labels=[x[:4] for x in dates[xtick_index].to_list()],
rotation=30)
plt.show()
periodic_run(periodic_get_model)
CIFAR_DIM_REP = 35
CIFAR_DIM_MAT = 30
CIFAR_LR = 1e-3
CIFAR_DECAY = 1e-6
CIFAR_MOMENTUM = 0.9
CIFAR_BATCH_SIZE = 32
CIFAR_EPOCHS = 150
CIFAR_NAME = 'cifar10'
CIFAR_NUM_CLASSES = 10
def cifar_load_dataset():
train = tfds.load(CIFAR_NAME, split='train', with_info=False, batch_size=-1)
test = tfds.load(CIFAR_NAME, split='test', with_info=False, batch_size=-1)
train_np = tfds.as_numpy(train)
test_np = tfds.as_numpy(test)
x_train, y_train = train_np['image'], train_np['label']
x_test, y_test = test_np['image'], test_np['label']
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = tf.keras.utils.to_categorical(y_train, CIFAR_NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, CIFAR_NUM_CLASSES)
x_train_range01 = x_train.astype('float32') / 255
x_test_range01 = x_test.astype('float32') / 255
return (x_train_range01, y_train), (x_test_range01, y_test)
def cifar_get_model():
return tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
tf.keras.layers.Dense(CIFAR_DIM_REP),
MLayer(dim_m=CIFAR_DIM_MAT, with_bias=True, matrix_squarings_exp=3),
tf.keras.layers.ActivityRegularization(1e-3),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(CIFAR_NUM_CLASSES, activation='softmax')
])
def cifar_run():
(x_train, y_train), (x_test, y_test) = cifar_load_dataset()
model = cifar_get_model()
model.summary()
opt = tf.keras.optimizers.SGD(lr=CIFAR_LR, momentum=CIFAR_MOMENTUM,
decay=CIFAR_DECAY)
model.compile(loss='categorical_crossentropy', optimizer=opt,
metrics=['accuracy'])
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_acc', factor=0.2, patience=5, min_lr=1e-5)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_acc',
patience=15,
restore_best_weights=True)
history = model.fit(
x_train,
y_train,
batch_size=CIFAR_BATCH_SIZE,
epochs=CIFAR_EPOCHS,
validation_split=0.1,
shuffle=True,
verbose=2,
callbacks=[reduce_lr, early_stopping])
scores = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
cifar_run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Queries
Step2: Step through every interaction.
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
import json
DATABASE = "data.sqlite"
conn = sqlite3.connect(DATABASE)
cursor = conn.cursor()
# For getting the maximum row id
QUERY_MAX_ID = "SELECT id FROM interactions ORDER BY id DESC LIMIT 1"
# Get interaction data
QUERY_INTERACTION = "SELECT geneids1, geneids2, probability FROM interactions WHERE id = {}"
max_id = cursor.execute(QUERY_MAX_ID).fetchone()[0]
matrix = {}
row_id = 0
while row_id <= max_id:
row_id+= 1
row = cursor.execute(QUERY_INTERACTION.format(row_id))
row = row.fetchone()
if row == None:
continue
id1 = row[0]
id2 = row[1]
prob = int(round(row[2],2) * 1000)
# Forward
if id1 not in matrix:
matrix[id1] = {}
if id2 not in matrix[id1]:
matrix[id1][id2] = []
if prob not in matrix[id1][id2]:
matrix[id1][id2].append(prob)
# Backwards
if id2 not in matrix:
matrix[id2] = {}
if id1 not in matrix[id2]:
matrix[id2][id1] = []
if prob not in matrix[id2][id1]:
matrix[id2][id1].append(prob)
with open("matrix.json", "w+") as file:
file.write(json.dumps( matrix ))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a function for the Galactic foreground
Step2: We're going to specify that $\phi_{\rm fg}= 2\,{\rm rad\,m^{-2}}$. We can then compute the Galactic contribution at each value of $\lambda^2$
Step3: Now make a function for the radio galaxy lobe
Step4: The total polarized signal will be the sum of the radio galaxy contribution and the Galactic contribution
Step5: Now let's re-create Fig. 1 from Brentjens & de Bruyn (2005; https
Step6: Now let's plot on the magnitude of the total polarization as well
Step7: Now let's calculate the polarization angle
Step8: Now plot it all together
|
<ASSISTANT_TASK:>
Python Code:
# this embeds plots in the notebook
%matplotlib inline
import numpy as np # for arrays
import pylab as pl # for plotting
lam_sq = np.arange(0.01,1,0.01)
phi_fg = 2.
P_gal = np.sin(2*phi_fg*lam_sq)/(2*phi_fg*lam_sq) + 0*1j
phi_1 = 10.
P_rg = 0.25*np.cos(2*phi_1*lam_sq) + 1j*0.25*np.sin(2*phi_1*lam_sq)
P_tot = P_gal + P_rg
pl.subplot(111)
pl.plot(lam_sq,P_gal.real,ls='--')
pl.xlabel(r"$\lambda^2$ [m$^2$]")
pl.ylabel("Flux [Jy]")
pl.axis([0,1,-0.2,1.4])
pl.show()
pl.subplot(111)
pl.plot(lam_sq,P_gal.real,ls='--')
pl.plot(lam_sq,np.absolute(P_tot),ls=':')
pl.xlabel(r"$\lambda^2$ [m$^2$]")
pl.ylabel("Flux [Jy]")
pl.axis([0,1,-0.2,1.4])
pl.show()
chi = 0.5*np.arctan2(P_tot.imag,P_tot.real)
chi*= (180./np.pi) # convert radians to degrees
# hack to unwrap the arctangent [-pi/2,pi/2] wrap:
for i in range(1,len(chi)):
delta_chi = np.abs(chi[i]-chi[i-1])
if (delta_chi>45.):
chi[i:]+=180.
pl.subplot(111)
pl.plot(lam_sq,chi)
pl.xlabel(r"$\lambda^2$ [m$^2$]")
pl.ylabel(r"$\chi$ [deg]")
pl.axis([0,1,-50,350])
pl.show()
fig, ax1 = pl.subplots()
ln1 = ax1.plot(lam_sq, chi, 'b-',label=r"$\chi$")
ax1.set_xlabel(r"$\lambda^2$ [m$^2$]")
ax1.set_ylabel(r"$\chi$ [deg]")
ax1.set_ylim(-50, 350)
ax2 = ax1.twinx()
ln2 = ax2.plot(lam_sq,np.absolute(P_tot),ls=':',label=r"$|P|$")
ln3 = ax2.plot(lam_sq,P_gal.real,ls='--',label=r"$Q_{\rm fg}$")
ax2.set_ylabel("Flux [Jy]")
ax2.set_ylim(-0.2, 1.4)
# figure legend:
lns = ln1+ln2+ln3
labs = [l.get_label() for l in lns]
ax2.legend(lns, labs, loc=1)
fig.tight_layout()
pl.savefig("Fig1.png")
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read RF Image
Step2: The RF image represents transducer results over a certain time period with regards to the axial direction (along the beam) and the lateral direction (normal to the beam). Data along the axial direction describes how the ultrasound wave echo propagated through the image medium. Here we use matplotlib to visualize the waveform at a fixed lateral position at T=0.
Step3: Perform Envelope Detection
Step4: Apply Logarithmic Transform
Step5: Use BModeImageFilter for RF-to-BMode image generation
Step6: Beamline Over Time
|
<ASSISTANT_TASK:>
Python Code:
# Install notebook dependencies
import sys
#!{sys.executable} -m pip install itk itk-ultrasound numpy matplotlib itkwidgets
import itk
from matplotlib import pyplot as plt
from itkwidgets import view, compare
SAMPLING_FREQUENCY = 40e6 # Hz
SAMPLING_PERIOD = SAMPLING_FREQUENCY ** -1
# Image dimensions: [axial, lateral, frame]
rf_image = itk.imread('Input/MouseLiverRF.mha', itk.F)
print(itk.size(rf_image))
view(rf_image)
def visualize_beam_waveform(image, lateral_idx=0, time_idx=0):
arr = itk.array_view_from_image(image)
x_labels = [idx * SAMPLING_PERIOD for idx in range(0,arr.shape[2])]
plt.plot(x_labels,arr[time_idx,lateral_idx,:])
plt.xlabel('Sampling Time ($\mu$s)')
plt.ylabel('Response')
plt.show()
# Visualize one RF waveform
visualize_beam_waveform(rf_image)
# Note that dimension in direction of analytic signal filter must be
# a multiple of 2,3,5 for the FFT to be valid
padded_image = itk.fft_pad_image_filter(rf_image)
complex_image = itk.analytic_signal_image_filter(rf_image,direction=0)
modulus_image = itk.complex_to_modulus_image_filter(complex_image)
visualize_beam_waveform(modulus_image)
view(modulus_image)
log_image = itk.log10_image_filter(modulus_image)
visualize_beam_waveform(log_image)
view(log_image)
bmode_image = itk.imread('Input/MouseLiverB.mha', itk.F)
filtered_image = itk.b_mode_image_filter(rf_image,direction=1)
compare(filtered_image, bmode_image)
# BMode Image (calculated)
visualize_beam_waveform(filtered_image)
# BMode Image (expected)
arr = itk.array_view_from_image(bmode_image)
plt.plot(arr[0,:,0])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll just look at a small number of potential hosts for now. I'll have to do batches to scale this up and I just want to check it works for now.
Step2: So we get ~84% accuracy on just predicting labels. Let's compare to a random classifier.
Step3: A stratified random classifier gets 88% accuracy, which doesn't look good for our logistic regression!
Step4: This is quite interesting! Lots of points (blue) are not really considered, and sometimes there are a few candidates (red). These usually look pretty reasonable, but it also seems a lot like the predictor is just looking for bright things.
Step5: Against a random classifier...
Step6: It would also be useful to know what the classifier considers "hard" to classify. I think an entropy approach might work (though there are problems with this...). Let's find the highest-entropy subject.
|
<ASSISTANT_TASK:>
Python Code:
import itertools
import sys
import bson
import h5py
import keras.layers
import keras.models
import matplotlib.pyplot
import numpy
import pandas
import sklearn.cross_validation
import sklearn.dummy
import sklearn.linear_model
import sklearn.metrics
sys.path.insert(1, '..')
import crowdastro.data
import crowdastro.show
with pandas.HDFStore('../crowdastro-data/training.h5') as store:
data = store['data']
data.head()
n = 5000
# I'm gathering up the radio patches first so I can run them through the CNN at the same time
# as one big matrix operation. In principle this would run on the GPU.
radio_patches = numpy.zeros((n, 80, 80))
labels = numpy.zeros((n,))
radius = 40
padding = 150
for idx, row in data.head(n).iterrows():
sid = bson.objectid.ObjectId(row['subject_id'][0].decode('ascii'))
x = row['x'][0]
y = row['y'][0]
label = row['is_host'][0]
labels[idx] = label
subject = crowdastro.data.db.radio_subjects.find_one({'_id': sid})
radio = crowdastro.data.get_radio(subject, size='5x5')
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[idx, :] = patch
# Load the CNN.
with open('../crowdastro-data/cnn_model_2.json', 'r') as f:
cnn = keras.models.model_from_json(f.read())
cnn.load_weights('../crowdastro-data/cnn_weights_2.h5')
cnn.layers = cnn.layers[:5] # Pop the layers after the second convolution's activation.
cnn.add(keras.layers.Flatten())
cnn.compile(optimizer='sgd', loss='mse') # I don't actually care about the optimiser or loss.
# Load the PCA.
with h5py.File('../crowdastro-data/pca.h5') as f:
pca = f['conv_2'][:]
# Find the radio features.
radio_features = cnn.predict(radio_patches.reshape(n, 1, 80, 80)) @ pca.T
# Add on the astronomy features.
features = numpy.hstack([radio_features, data.ix[:n-1, 'flux_ap2_24':'flux_ap2_80'].as_matrix()])
features = numpy.nan_to_num(features)
# Split into training and testing data.
xs_train, xs_test, ts_train, ts_test = sklearn.cross_validation.train_test_split(features, labels, test_size=0.2)
# Classify!
lr = sklearn.linear_model.LogisticRegression(class_weight='balanced')
lr.fit(xs_train, ts_train)
lr.score(xs_test, ts_test)
sklearn.metrics.confusion_matrix(ts_test, lr.predict(xs_test), [0, 1])
dc = sklearn.dummy.DummyClassifier(strategy='stratified')
dc.fit(xs_train, ts_train)
dc.score(xs_test, ts_test)
def softmax(x):
exp = numpy.exp(x)
return exp / numpy.sum(exp, axis=0)
subject_ids = set()
for idx, row in data.ix[n:n * 2].iterrows():
sid = row['subject_id'][0]
subject_ids.add(sid)
for subject_id in itertools.islice(subject_ids, 0, 10):
# Pandas *really* doesn't like fancy indexing against string comparisons.
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].T
probs = softmax(scores)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(potential_hosts[:, 0], potential_hosts[:, 1], c=probs)
matplotlib.pyplot.show()
hits = 0
attempts = 0
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].reshape(-1)
predicted_host = scores.argmax()
if labels[predicted_host]:
hits += 1
attempts += 1
print('Accuracy: {:.02%}'.format(hits / attempts))
hits = 0
attempts = 0
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = dc.predict_proba(features)[:, 1].reshape(-1)
predicted_host = scores.argmax()
if labels[predicted_host]:
hits += 1
attempts += 1
print('Accuracy: {:.02%}'.format(hits / attempts))
max_entropy = float('-inf')
max_subject = None
for subject_id in subject_ids:
indices = (data['subject_id'] == subject_id).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
labels = numpy.nan_to_num(data.as_matrix()[indices][:, -1].astype(bool))
subject = crowdastro.data.db.radio_subjects.find_one({'_id': bson.objectid.ObjectId(subject_id.decode('ascii'))})
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
probabilities = softmax(lr.predict_proba(features)[:, 1].reshape(-1))
entropy = -(probabilities * numpy.log(probabilities)).sum()
if entropy > max_entropy:
max_entropy = entropy
max_subject = subject
crowdastro.show.subject(max_subject)
indices = (data['subject_id'] == str(max_subject['_id']).encode('ascii')).as_matrix().reshape(-1)
potential_hosts = numpy.nan_to_num(data.as_matrix()[indices][:, 1:-1].astype(float))
subject = max_subject
radio = crowdastro.data.get_radio(subject, size='5x5')
radio_patches = numpy.zeros((len(potential_hosts), 1, radius * 2, radius * 2))
for index, (x, y, *astro) in enumerate(potential_hosts):
patch = radio[x - radius + padding : x + radius + padding, y - radius + padding : y + radius + padding]
radio_patches[index, 0, :] = patch
radio_features = cnn.predict(radio_patches) @ pca.T
astro_features = potential_hosts[:, 2:]
features = numpy.hstack([radio_features, astro_features])
scores = lr.predict_proba(features)[:, 1].T
probs = softmax(scores)
crowdastro.show.subject(subject)
matplotlib.pyplot.scatter(potential_hosts[:, 0], potential_hosts[:, 1], c=probs)
matplotlib.pyplot.show()
matplotlib.pyplot.plot(sorted(probs), marker='x')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Written Questions Data for a Session
Step2: We should now have all the data in a single JSON file (writtenQuestions.json).
Step3: Apache Drill can query over multiple files in the same directory, so let's try that...
Step4: Now we need to remap those URLs onto URLs of the form used in the Written Questions data.
Step5: Querying Over JOINed JSON and CSV Files
Step6: JOINing Across A Monolithic JSON file and a Directory of Files with Regularly Mismatched Keys
Step7: Now let's do some counting... in the session for which we downloaded the data, how many written questions were tabled by gender, in total?
Step8: How many per person, by gender?
Step9: Can we do the average too?
Step10: How about by party?
Step11: Party and gender?
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from pydrill.client import PyDrill
%matplotlib inline
#Get a connection to the Apache Drill server
drill = PyDrill(host='localhost', port=8047)
#Get Written questions data - may take some time!
stub='http://lda.data.parliament.uk'.strip('/')
#We're going to have to call the API somehow
import requests
##To make thinks more efficient if we do this again, cache requests
#!pip3 install requests_cache
#import requests_cache
#requests_cache.install_cache('parlidata_cache', backend='sqlite')
#Get data from URL
def getURL(url):
print(url)
r=requests.get(url)
print(r.status_code)
return r
#Download data - if there is more, get it
def loader(url):
items=[]
done=False
r=getURL(url)
while not done:
items=items+r.json()['result']['items']
if 'next' in r.json()['result']:
r=getURL(r.json()['result']['next']+'&_pageSize=500')
else: done=True
return items
url='{}/{}.json?session={}'.format(stub,'commonswrittenquestions','2015/16')
items=loader(url)
#Save the data
import json
with open('writtenQuestions.json', 'w') as outfile:
json.dump(items, outfile)
#What does the whole table look like?
q=''' SELECT * from dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` LIMIT 3'''
drill.query(q).to_dataframe()
#Try to select a column
q='''
SELECT j.tablingMember._about AS memberURL
FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j LIMIT 3
'''
drill.query(q).to_dataframe()
#Try to select an item from a list in a column
q='''
SELECT tablingMemberPrinted[0]._value AS Name
FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` LIMIT 3
'''
drill.query(q).to_dataframe()
#Get a dataframe of all the member URLs - so we can get the data fro each from the Parliament data API
q='''
SELECT DISTINCT j.tablingMember._about AS memberURL
FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j
'''
memberIds = drill.query(q).to_dataframe()
memberIds.head()
#The URLs in the written question data donlt actually resolve - we need to tweak them
#Generate a set of members who have tabled questions that have been answered
#Note that the identifier Linked Data URL doesn't link... so patch it...
members= ['{}.json'.format(i.replace('http://','http://lda.')) for i in memberIds['memberURL']]
#Preview the links
members[:3]
#Download the data files into a data directory
!mkdir -p data/members
for member in members:
!wget -quiet -P data/members {member}
!ls data/members
#Preview one of the files
!head data/members/1474.json
q=''' SELECT j.`result`.primaryTopic.gender._value AS gender,
j.`result`._about AS url
FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/data/members` j'''
membersdf=drill.query(q).to_dataframe()
membersdf.head()
#Lets reverse the URL to the same form as in the written questions - then we can use this for a JOIN
membersdf['fixedurl']=membersdf['url'].str.replace('http://lda.','http://').str.replace('.json','')
#Save the data as a CSV file
membersdf.to_csv('data/members.csv',index=False)
!head data/members.csv
#Now find the gender of a question asker - join a query over the monolithic JSON file with the CSV file
q=''' SELECT DISTINCT j.tablingMember._about AS memberURL, m.gender
FROM dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/writtenQuestions.json` j
JOIN dfs.`/Users/ajh59/Dropbox/parlidata/notebooks/data/members.csv` m
ON j.tablingMember._about = m.fixedurl
LIMIT 3'''
drill.query(q).to_dataframe()
#Let's see if we can modify the URL in the spearate JSON files so we can join with the monolithic file
q=''' SELECT DISTINCT j.tablingMember._about AS memberURL,
m.`result`.primaryTopic.gender._value AS gender,
m.`result`._about AS url
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
LIMIT 3'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
drill.query(q).to_dataframe()
q=''' SELECT COUNT(*) AS Number,
m.`result`.primaryTopic.gender._value AS gender
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
GROUP BY m.`result`.primaryTopic.gender._value'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
drill.query(q).to_dataframe()
q=''' SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name,
m.`result`.primaryTopic.gender._value AS gender
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
GROUP BY m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value
'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
drill.query(q).to_dataframe().head()
q='''
SELECT AVG(Number) AS average, gender
FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name,
m.`result`.primaryTopic.gender._value AS gender
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
GROUP BY m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value )
GROUP BY gender
'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
drill.query(q).to_dataframe()
q='''
SELECT AVG(Number) AS average, party
FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name,
m.`result`.primaryTopic.party._value AS party
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
GROUP BY m.`result`.primaryTopic.party._value, j.tablingMemberPrinted[0]._value )
GROUP BY party
'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
dq=drill.query(q).to_dataframe()
dq['average']=dq['average'].astype(float)
dq
dq.set_index('party').sort_values(by='average').plot(kind="barh");
q='''
SELECT AVG(Number) AS average, party, gender
FROM (SELECT COUNT(*) AS Number, j.tablingMemberPrinted[0]._value AS Name,
m.`result`.primaryTopic.party._value AS party,
m.`result`.primaryTopic.gender._value AS gender
FROM dfs.`{path}/writtenQuestions.json` j
JOIN dfs.`{path}/data/members` m
ON j.tablingMember._about = REGEXP_REPLACE(REGEXP_REPLACE(m.`result`._about,'http://lda.','http://'),'\.json','')
GROUP BY m.`result`.primaryTopic.party._value, m.`result`.primaryTopic.gender._value, j.tablingMemberPrinted[0]._value )
GROUP BY party, gender
'''.format(path='/Users/ajh59/Dropbox/parlidata/notebooks')
dq=drill.query(q).to_dataframe()
dq['average']=dq['average'].astype(float)
dq
dq.set_index(['party','gender']).sort_values(by='average').plot(kind="barh");
dq.sort_values(by=['gender','average']).set_index(['party','gender']).plot(kind="barh");
dqp=dq.pivot(index='party',columns='gender')
dqp.columns = dqp.columns.get_level_values(1)
dqp
dqp.plot(kind='barh');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: B. Function construction
Step2: B.2 Node Sampling
Step3: B.3 Gibbs sampling -- $z_{m,n}$
Step4: B.4 Gibbs sampling -- ${\bf c}_{m}$, CRP prior
Step5: B.5 Gibbs sampling -- ${\bf c}_{m}$, likelihood
Step6: B.6 Gibbs sampling -- ${\bf c}_{m}$, posterior
Step7: B.7 Gibbs sampling -- $w_{n}$
Step8: C. Gibbs sampling
Step9: C.2 Gibbs sampling
Step10: V. Topic Model with hLDA
Step11: B. hLDA plot
Step12: VI. Empirical Example
Step13: B. Real data
Step14: VII. Download and Install from Github
Step15: VIII. Optimization
Step16: IX. Code Comparison
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.special import gammaln
import random
from collections import Counter
import string
import graphviz
import pygraphviz
import pydot
def CRP(topic, phi):
'''
CRP gives the probability of topic assignment for specific vocabulary
Return a 1 * j array, where j is the number of topic
Parameter
---------
topic: a list of lists, contains assigned words in each sublist (topic)
phi: double, parameter for CRP
Return
------
p_crp: the probability of topic assignments for new word
'''
p_crp = np.empty(len(topic)+1)
m = sum([len(x) for x in topic])
p_crp[0] = phi / (phi + m)
for i, word in enumerate(topic):
p_crp[i+1] = len(word) / (phi + m)
return p_crp
def node_sampling(corpus_s, phi):
'''
Node sampling samples the number of topics, L
return a j-layer list of lists, where j is the number of topics
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
phi: double, parameter for CRP
Return
------
topic: a list of lists, contains assigned words in each sublist (topic)
'''
topic = []
for corpus in corpus_s:
for word in corpus:
cm = CRP(topic, phi)
theta = np.random.multinomial(1, (cm/sum(cm))).argmax()
if theta == 0:
topic.append([word])
else:
topic[theta-1].append(word)
return topic
def Z(corpus_s, topic, alpha, beta):
'''
Z samples from LDA model
Return two j-layer list of lists, where j is the number of topics
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
topic: a L-dimensional list of lists, sample from node_sampling
alpha: double, parameter
beta: double, parameter
Return
------
z_topic: a j-dimensional list of lists, drawn from L-dimensioanl topic, j<L
z_doc: a j-dimensioanl list of lists, report from which document the word is assigned to each topic
'''
n_vocab = sum([len(x) for x in corpus_s])
t_zm = np.zeros(n_vocab).astype('int')
z_topic = [[] for _ in topic]
z_doc = [[] for _ in topic]
z_tmp = np.zeros((n_vocab, len(topic)))
assigned = np.zeros((len(corpus_s), len(topic)))
n = 0
for i in range(len(corpus_s)):
for d in range(len(corpus_s[i])):
wi = corpus_s[i][d]
for j in range(len(topic)):
lik = (z_topic[j].count(wi) + beta) / (assigned[i, j] + n_vocab * beta)
pri = (len(z_topic[j]) + alpha) / ((len(corpus_s[i]) - 1) + len(topic) * alpha)
z_tmp[n, j] = lik * pri
t_zm[n] = np.random.multinomial(1, (z_tmp[n,:]/sum(z_tmp[n,:]))).argmax()
z_topic[t_zm[n]].append(wi)
z_doc[t_zm[n]].append(i)
assigned[i, t_zm[n]] += 1
n += 1
z_topic = [x for x in z_topic if x != []]
z_doc = [x for x in z_doc if x != []]
return z_topic, z_doc
def CRP_prior(corpus_s, doc, phi):
'''
CRP_prior implies by nCRP
Return a m*j array, whre m is the number of documents and j is the number of topics
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
doc: a j-dimensioanl list of lists, drawn from Z function (z_doc)
phi: double, parameter for CRP
Return
------
c_p: a m*j array, for each document the probability of the topics
'''
c_p = np.empty((len(corpus_s), len(doc)))
for i, corpus in enumerate(corpus_s):
p_topic = [[x for x in doc[j] if x != i] for j in range(len(doc))]
tmp = CRP(p_topic, phi)
c_p[i,:] = tmp[1:]
return c_p
def likelihood(corpus_s, topic, eta):
'''
likelihood gives the propability of data given a particular choice of c
Return a m*j array, whre m is the number of documents and j is the number of topics
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
topic: a j-dimensional list of lists, drawn from Z function (z_assigned)
eta: double, parameter
Return
------
w_m: a m*j array
'''
w_m = np.empty((len(corpus_s), len(topic)))
allword_topic = [word for t in topic for word in t]
n_vocab = sum([len(x) for x in corpus_s])
for i, corpus in enumerate(corpus_s):
prob_result = []
for j in range(len(topic)):
current_topic = topic[j]
n_word_topic = len(current_topic)
prev_dominator = 1
later_numerator = 1
prob_word = 1
overlap = [val for val in set(corpus) if val in current_topic]
prev_numerator = gammaln(len(current_topic) - len(overlap) + n_vocab * eta)
later_dominator = gammaln(len(current_topic) + n_vocab * eta)
for word in corpus:
corpus_list = corpus
if current_topic.count(word) - corpus_list.count(word) < 0 :
a = 0
else:
a = current_topic.count(word) - corpus_list.count(word)
prev_dominator += gammaln(a + eta)
later_numerator += gammaln(current_topic.count(word) + eta)
prev = prev_numerator - prev_dominator
later = later_numerator - later_dominator
like = prev + later
w_m[i, j] = like
w_m[i, :] = w_m[i, :] + abs(min(w_m[i, :]) + 0.1)
w_m = w_m/w_m.sum(axis = 1)[:, np.newaxis]
return w_m
def post(w_m, c_p):
'''
Parameter
---------
w_m: likelihood, drawn from likelihood function
c_p: prior, drawn from CRP_prior function
Return
------
c_m, a m*j list of lists
'''
c_m = (w_m * c_p) / (w_m * c_p).sum(axis = 1)[:, np.newaxis]
return np.array(c_m)
def wn(c_m, corpus_s):
'''
wn return the assignment of words for topics, drawn from multinomial distribution
Return a n*1 array, where n is the total number of word
Parameter
---------
c_m: a m*j list of lists, drawn from post function
corpus_s: a list of lists, contains words in each sublist (document)
Return
------
wn_ass: a n*1 array, report the topic assignment for each word
'''
wn_ass = []
for i, corpus in enumerate(corpus_s):
for word in corpus:
theta = np.random.multinomial(1, c_m[i]).argmax()
wn_ass.append(theta)
return np.array(wn_ass)
most_common = lambda x: Counter(x).most_common(1)[0][0]
def gibbs(corpus_s, topic, alpha, beta, phi, eta, ite):
'''
gibbs will return the distribution of words for topics
Return a j-dimensional list of lists, where j is the number of topics
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
topic: a j-dimensional list of lists, drawn from Z function (z_assigned)
alpha: double, parameter for Z function
beta: double, parameter for Z function
phi: double, parameter fro CRP_prior function
eta: double, parameter for w_n function
ite: int, number of iteration
Return
------
wn_topic: a j-dimensional list of lists, the distribution of words for topics
'''
n_vocab = sum([len(x) for x in corpus_s])
gibbs = np.empty((n_vocab, ite)).astype('int')
for i in range(ite):
z_topic, z_doc = Z(corpus_s, topic, alpha, beta)
c_p = CRP_prior(corpus_s, z_doc, phi)
w_m = likelihood(corpus_s, z_topic, eta)
c_m = post(w_m, c_p)
gibbs[:, i] = wn(c_m, corpus_s)
# drop first 1/10 data
gibbs = gibbs[:, int(ite/10):]
theta = [most_common(gibbs[x]) for x in range(n_vocab)]
n_topic = max(theta)+1
wn_topic = [[] for _ in range(n_topic)]
wn_doc_topic = [[] for _ in range(n_topic)]
doc = 0
n = 0
for i, corpus_s in enumerate(corpus_s):
if doc == i:
for word in corpus_s:
wn_doc_topic[theta[n]].append(word)
n += 1
for j in range(n_topic):
if wn_doc_topic[j] != []:
wn_topic[j].append(wn_doc_topic[j])
wn_doc_topic = [[] for _ in range(n_topic)]
doc += 1
wn_topic = [x for x in wn_topic if x != []]
return wn_topic
def hLDA(corpus_s, alpha, beta, phi, eta, ite, level):
'''
hLDA generates an n*1 list of lists, where n is the number of level
Parameter
---------
corpus_s: a list of lists, contains words in each sublist (document)
alpha: double, parameter for Z function
beta: double, parameter for Z function
phi: double, parameter fro CRP_prior function
eta: double, parameter for w_n function
ite: int, number of iteration
level: int, number of level
Return
hLDA_tree: an n*1 list of lists, each sublist represents a level, the sublist in each level represents a topic
node: an n*1 list of lists, returns how many nodes there are in each level
'''
topic = node_sampling(corpus_s, phi)
print(len(topic))
hLDA_tree = [[] for _ in range(level)]
tmp_tree = []
node = [[] for _ in range(level+1)]
node[0].append(1)
for i in range(level):
if i == 0:
wn_topic = gibbs(corpus_s, topic, alpha, beta, phi, eta, ite)
node_topic = [x for word in wn_topic[0] for x in word]
hLDA_tree[0].append(node_topic)
tmp_tree.append(wn_topic[1:])
tmp_tree = tmp_tree[0]
node[1].append(len(wn_topic[1:]))
else:
for j in range(sum(node[i])):
if tmp_tree == []:
break
wn_topic = gibbs(tmp_tree[0], topic, alpha, beta, phi, eta, ite)
node_topic = [x for word in wn_topic[0] for x in word]
hLDA_tree[i].append(node_topic)
tmp_tree.remove(tmp_tree[0])
if wn_topic[1:] != []:
tmp_tree.extend(wn_topic[1:])
node[i+1].append(len(wn_topic[1:]))
return hLDA_tree, node[:level]
def HLDA_plot(hLDA_object, Len = 8, save = False):
from IPython.display import Image, display
def viewPydot(pdot):
plt = Image(pdot.create_png())
display(plt)
words = hLDA_object[0]
struc = hLDA_object[1]
graph = pydot.Dot(graph_type='graph')
end_index = [np.insert(np.cumsum(i),0,0) for i in struc]
for level in range(len(struc)-1):
leaf_level = level + 1
leaf_word = words[leaf_level]
leaf_struc = struc[leaf_level]
word = words[level]
end_leaf_index = end_index[leaf_level]
for len_root in range(len(word)):
root_word = '\n'.join([x[0] for x in Counter(word[len_root]).most_common(Len)])
leaf_index = leaf_struc[len_root]
start = end_leaf_index[len_root]
end = end_leaf_index[len_root+1]
lf = leaf_word[start:end]
for l in lf:
leaf_w = '\n'.join([x[0] for x in Counter(list(l)).most_common(Len)])
edge = pydot.Edge(root_word, leaf_w)
graph.add_edge(edge)
if save == True:
graph.write_png('graph.png')
viewPydot(graph)
def sim_corpus(n):
n_rows = n
corpus = [[] for _ in range(n_rows)]
for i in range(n_rows):
n_cols = np.random.randint(10, 200, 1, dtype = 'int')[0]
for j in range(n_cols):
num = np.random.normal(0, 1, n_cols)
word = 'w%s' % int(round(num[j], 1)*10)
corpus[i].append(word)
return corpus
corpus_0 = sim_corpus(35)
tree_0 = hLDA(corpus_0, 0.1, 0.01, 2, 0.01, 100, 3)
HLDA_plot(tree_0, 5, False)
def read_corpus(corpus_path):
punc = ['`', ',', "'", '.', '!', '?']
corpus = []
with open(corpus_path, 'r') as f:
for line in f:
for x in punc:
line = line.replace(x, '')
line = line.strip('\n')
word = line.split(' ')
corpus.append(word)
return(corpus)
corpus_1 = read_corpus('sample.txt')
tree_1 = hLDA(corpus_1, 0.1, 0.01, 1, 0.01, 100, 3)
HLDA_plot(tree_1, 5, False)
import hLDA
sim = hLDA.sim_corpus(5)
print(sim[0])
corpus = hLDA.read_corpus('sample.txt')
print(corpus[0])
tree = hLDA.hLDA(corpus, 0.1, 0.01, 1, 0.01, 10, 3)
hLDA.HLDA_plot(tree)
%load_ext Cython
%%cython -a
cimport cython
cimport numpy as np
import numpy as np
@cython.cdivision
@cython.boundscheck(False)
@cython.wraparound(False)
def CRP_c(list topic, double phi):
cdef double[:] cm = np.empty(len(topic)+1)
cdef int m = sum([len(x) for x in topic])
cm[0] = phi / (phi + m)
cdef int i
cdef list word
for i, word in enumerate(topic):
cm[i+1] = len(word) / (phi + m)
return np.array(cm)
def node_sampling_c(list corpus_s, double phi):
cdef list topic = []
cdef int theta
cdef list corpus
cdef str word
for corpus in corpus_s:
for word in corpus:
cm = CRP_c(topic, phi)
theta = np.random.multinomial(1, (cm/sum(cm))).argmax()
if theta == 0:
topic.append([word])
else:
topic[theta-1].append(word)
return topic
%timeit node_sampling_c(corpus_1, 1)
%timeit node_sampling(corpus_1, 1)
import matplotlib.pyplot as plt
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import gensim
def lda_topic(corpus_s, dic, n_topics, ite):
lda = gensim.models.ldamodel.LdaModel(corpus = corpus_s,
id2word = dic,
num_topics = n_topics,
update_every = 1,
chunksize = 1,
passes = 1,
iterations = ite)
return lda.print_topics()
corpus = read_corpus('sample.txt')
def lda_corpus(corpus_s):
texts = []
tokenizer = RegexpTokenizer(r'\w+')
for doc in corpus_s:
for word in doc:
raw = word.lower()
tokens = tokenizer.tokenize(raw)
texts.append(tokens)
dictionary = corpora.Dictionary(texts)
n_corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('sample.mm', n_corpus)
sample = gensim.corpora.MmCorpus('sample.mm')
return sample, dictionary
sample, dic = lda_corpus(corpus)
lda_topic(sample, dic, 3, 5000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: All preinstalled packages
Step2: %require examples
Step3: Best practices
|
<ASSISTANT_TASK:>
Python Code:
import pineapple
%pip freeze
import pineapple # required for all subsequent cells
# Use %pip line magic to list all installed packages
%pip list
# Use %pip line magic to download and install a specific package
%pip install unittest2
# New package is not available for import
import unittest2
# You can also uninstall packages
%pip uninstall -y unittest2
%require unittest2
import unittest2
%require unittest2==1.1.0
%require unittest2==1.1.0 pip>=7
# On the old version to get installed packages
%require
# On the fresh new copy of Pineapple
# Paste everything in between the single quotes
%require Cython==0.23.1 decorator==4.0.2 ipykernel==4.0.3 ipython==4.0.0 ipython-genutils==0.1.0 IVisual-alt==0.2.3 Jinja2==2.8 jsonschema==2.5.1 jupyter-client==4.0.0 jupyter-core==4.0.4 linecache2==1.0.0 MarkupSafe==0.23 matplotlib==1.4.3 mistune==0.7.1 nbconvert==4.0.0 nbformat==4.0.0 nose==1.3.7 notebook==4.0.4 path.py==8.1 pexpect==3.3 pickleshare==0.5 pineapple==0.4 pip==7.1.2 ptyprocess==0.5 py==1.4.30 pycurl==7.19.5 Pygments==2.0.2 pyparsing==2.0.3 pytest==2.7.2 python-dateutil==2.4.2 pytz==2015.4 pyzmq==14.7.0 setuptools==18.2 simplegeneric==0.8.1 six==1.9.0 terminado==0.5 tornado==4.2.1 traceback2==1.4.0 traitlets==4.0.0 unittest2==1.1.0 wheel==0.24.0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-1', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The <a href="http
Step2: We will take some time off now to assign each document to a category, to ease our work later on. Since the two last documents refer to classification we will label them as "Relevant" and the first document as "Not relevant". Since we only have two categories, this makes it a binary problem and we will represent "Relevant" as 1 and "Not relevant" as -1.
Step3: We now create our document collection
Step4: Now we will create the object responsible for the hashed BoW representation. We are going to specify that we want a hash size of 8 bits, which will be translated to a dimension of size 2^8 = 256 (powers of 2 are considered to speed up computations) and a tokenizer that creates 5-grams. We will also specify that we want to
Step5: And that was it!<br>
Step6: We have now created our svm. The parameter C specifies the regularization constant. The best choice for this parameter will usually be selected after a model selection process.<br>
Step7: When the execution finishes, we will have learned our so desired linear model! Mind that for large collections the above call can take hours.<br>
Step8: We can see that it misclassified the first document. This has to do with the nature of our overly-simplified toy dataset which doesn't provide enough information. However, another option of the HashedDocDotFeatures class will allow us to extract some more information from the same dataset!<br>
Step9: If we do not specify these numbers, as we did not do before, then, (you maybe have guessed it!) they are set by default to the following values, n=1, k=0!
Step10: Better!<br>
|
<ASSISTANT_TASK:>
Python Code:
import os
import shogun as sg
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
doc_1 = "this is the first document"
doc_2 = "document classification introduction"
doc_3 = "a third document about classification"
document_collection = [doc_1, doc_2, doc_3]
labels = sg.BinaryLabels(np.array([-1, 1, 1], dtype=np.int64))
string_features = sg.StringCharFeatures(document_collection, sg.RAWBYTE)
hash_size = 8
tokenizer = sg.NGramTokenizer(5)
normalize = True
hashed_feats = sg.HashedDocDotFeatures(hash_size, string_features, tokenizer, normalize)
C = 0.1
epsilon = 0.01
svm = sg.create_machine("SVMOcas", C1=C, C2=C, labels=labels, epsilon=epsilon)
_=svm.train(hashed_feats)
predicted_labels = svm.apply()
print(predicted_labels.get("labels"))
k = 3 # number of tokens, up to which we allow it to skip
n = 3 # number of tokens, up to which we allow it to combine
hashed_feats_quad = sg.HashedDocDotFeatures(hash_size, string_features, tokenizer, normalize, n, k)
svm.train(hashed_feats_quad)
predicted_labels = svm.apply()
print(predicted_labels.get("labels"))
import matplotlib.pyplot as plt
# HashedDocDotFeatures results
hashed_training_examples = [5000, 10000, 15000, 20000, 25000, 30000, 50000, 100000]
# For C=1
hashed_C_1_sec = [2682.750000,5202.690000,8120.460000,10846.410000,13944.200000,17016.840000,30496.720000,66302.950000]
hashed_C_1_roc = [0.980730,0.986382,0.988894,0.990666,0.991602,0.991957,0.993680,0.995184]
# For C=0.1
hashed_C_01_sec = [1074.130000,2142.390000,3434.710000,4641.380000,5984.530000,7206.040000,12864.270000,28393.540000]
hashed_C_01_roc = [0.976560,0.982660,0.985251,0.987380,0.988368,0.989022,0.990950,0.993197]
# Spectrum kernel results
kernel_training_examples = [5000, 10000, 15000, 20000, 25000]
# For C=1
kernel_C_1_sec = [2912.410000,6543.220000,10840.550000,16108.360000,19899.610000]
kernel_C_1_roc = [0.971284,0.976628,0.979715,0.982084,0.984355]
# For C=0.1
kernel_C_01_sec = [1441.380000,3261.870000,5071.040000,7568.130000,10436.430000]
kernel_C_01_roc = [0.946308,0.955245,0.961576,0.965204,0.968264]
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.plot(hashed_training_examples, hashed_C_1_sec, 'b')
plt.plot(kernel_training_examples, kernel_C_1_sec, 'r')
plt.title("Time comparison for C=1")
plt.xlabel("Number of examples")
plt.ylabel("Time in seconds")
plt.legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=2)
plt.subplot(1,2,2)
plt.plot(hashed_training_examples, hashed_C_1_roc, 'b')
plt.plot(kernel_training_examples, kernel_C_1_roc, 'r')
plt.title("Area under ROC comparison for C=1")
plt.xlabel("Number of examples")
plt.ylabel("auROC")
_=plt.legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=4)
plt.clf
plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plt.plot(hashed_training_examples, hashed_C_01_sec, 'b')
plt.plot(kernel_training_examples, kernel_C_01_sec, 'r')
plt.title("Time comparison for C=0.1")
plt.xlabel("Number of examples")
plt.ylabel("Time in seconds")
plt.ylim((0,70000))
plt.legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=2)
plt.subplot(1,2,2)
plt.plot(hashed_training_examples, hashed_C_01_roc, 'b')
plt.plot(kernel_training_examples, kernel_C_01_roc, 'r')
plt.title("Area under ROC comparison for C=0.1")
plt.xlabel("Number of examples")
plt.ylabel("auROC")
_=plt.legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Starting logging once every 100 milliseconds
Step2: 3. Try to change the input signal during the logging.
Step3: 4. Plot values over time
Step4: 2. Using Arduino Shield
Step5: 2. Starting logging once every 100 milliseconds
Step6: 3. Try to change the input signal during the logging.
Step7: 4. Plot values over time
|
<ASSISTANT_TASK:>
Python Code:
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
from pynq.lib.pmod import Grove_ADC
from pynq.lib.pmod import PMOD_GROVE_G4
grove_adc = Grove_ADC(base.PMODA,PMOD_GROVE_G4)
print("{} V".format(round(grove_adc.read(),4)))
grove_adc.set_log_interval_ms(100)
grove_adc.start_log()
log = grove_adc.get_log()
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(log)), log, 'ro')
plt.title('Grove ADC Voltage Log')
plt.axis([0, len(log), min(log), max(log)])
plt.show()
from pynq.lib.arduino import Grove_ADC
from pynq.lib.arduino import ARDUINO_GROVE_I2C
grove_adc = Grove_ADC(base.ARDUINO,ARDUINO_GROVE_I2C)
print("{} V".format(round(grove_adc.read(),4)))
grove_adc.set_log_interval_ms(100)
grove_adc.start_log()
log = grove_adc.get_log()
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(log)), log, 'ro')
plt.title('Grove ADC Voltage Log')
plt.axis([0, len(log), min(log), max(log)])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Step 1
Step5: Input is encoded as one-hot, 7 digits times 12 possibilities
Step6: Same for output, but at most 4 digits
Step7: Step 2
Step8: Step 3
Step9: Before training lets look at sample input and output
Step10: Step 4
|
<ASSISTANT_TASK:>
Python Code:
# Adapted from
# https://github.com/keras-team/keras/blob/master/examples/addition_rnn.py
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
# let's see what compute devices we have available, hopefully a GPU
sess = tf.Session()
devices = sess.list_devices()
for d in devices:
print(d.name)
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
print(sess.run(hello))
from tensorflow import keras
print(keras.__version__)
class CharacterTable(object):
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilities to their character output
def __init__(self, chars):
Initialize character table.
# Arguments
chars: Characters that can appear in the input.
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
def encode(self, C, num_rows):
One hot encode given string C.
# Arguments
num_rows: Number of rows in the returned one hot encoding. This is
used to keep the # of rows for each data the same.
x = np.zeros((num_rows, len(self.chars)))
for i, c in enumerate(C):
x[i, self.char_indices[c]] = 1
return x
def decode(self, x, calc_argmax=True):
if calc_argmax:
x = x.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in x)
class colors:
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
# Parameters for the model and dataset.
TRAINING_SIZE = 50000
DIGITS = 3
# REVERSE = True
REVERSE = False
# Maximum length of input is 'int + int' (e.g., '345+678'). Maximum length of
# int is DIGITS.
MAXLEN = DIGITS + 1 + DIGITS
# All the numbers, plus sign and space for padding.
chars = '0123456789+ '
ctable = CharacterTable(chars)
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789'))
for i in range(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that x+Y == Y+x (hence the sorting).
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN.
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1.
ans += ' ' * (DIGITS + 1 - len(ans))
if REVERSE:
# Reverse the query, e.g., '12+345 ' becomes ' 543+21'. (Note the
# space used for padding.)
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
questions[0]
print('Vectorization...')
x = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
x[i] = ctable.encode(sentence, MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, DIGITS + 1)
len(x[0])
len(questions[0])
questions[0]
x[0]
y[0]
expected[0]
# Shuffle (x, y) in unison as the later parts of x will almost all be larger
# digits.
indices = np.arange(len(y))
np.random.shuffle(indices)
x = x[indices]
y = y[indices]
# Explicitly set apart 10% for validation data that we never train over.
split_at = len(x) - len(x) // 10
(x_train, x_val) = x[:split_at], x[split_at:]
(y_train, y_val) = y[:split_at], y[split_at:]
print('Training Data:')
print(x_train.shape)
print(y_train.shape)
print('Validation Data:')
print(x_val.shape)
print(y_val.shape)
# input shape: 7 digits, each being 0-9, + or space (12 possibilities)
MAXLEN, len(chars)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, GRU, SimpleRNN, Dense, RepeatVector
# Try replacing LSTM, GRU, or SimpleRNN.
# RNN = LSTM
RNN = SimpleRNN # should be enough since we do not have long sequences and only local dependencies
# RNN = GRU
HIDDEN_SIZE = 128
BATCH_SIZE = 128
model = Sequential()
# encoder
model.add(RNN(units=HIDDEN_SIZE, input_shape=(MAXLEN, len(chars))))
# latent space
encoding_dim = 32
model.add(Dense(units=encoding_dim, activation='relu', name="encoder"))
# decoder: have 4 temporal outputs one for each of the digits of the results
model.add(RepeatVector(DIGITS + 1))
# return_sequences=True tells it to keep all 4 temporal outputs, not only the final one (we need all four digits for the results)
model.add(RNN(units=HIDDEN_SIZE, return_sequences=True))
model.add(Dense(name='classifier', units=len(chars), activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
# input one-hot
x_val[0]
# output "one-hot" scores
model.predict(np.array([x_val[0]]))
# output decoded by only showing highest score for digit
model.predict_classes(np.array([x_val[0]]))
%%time
# Train the model each generation and show predictions against the validation
# dataset.
merged_losses = {
"loss": [],
"val_loss": []
}
for iteration in range(1, 50):
print()
print('-' * 50)
print('Iteration', iteration)
iteration_history = model.fit(x_train, y_train,
batch_size=BATCH_SIZE,
epochs=1,
validation_data=(x_val, y_val))
merged_losses["loss"].append(iteration_history.history["loss"])
merged_losses["val_loss"].append(iteration_history.history["val_loss"])
# Select 10 samples from the validation set at random so we can visualize
# errors.
for i in range(10):
ind = np.random.randint(0, len(x_val))
rowx, rowy = x_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowx, verbose=0)
q = ctable.decode(rowx[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if REVERSE else q, end=' ')
print('T', correct, end=' ')
if correct == guess:
print(colors.ok + '☑' + colors.close, end=' ')
else:
print(colors.fail + '☒' + colors.close, end=' ')
print(guess)
import matplotlib.pyplot as plt
plt.ylabel('loss')
plt.xlabel('epoch')
plt.yscale('log')
plt.plot(merged_losses['loss'], 'b')
plt.plot(merged_losses['val_loss'], 'r')
plt.legend(['loss', 'validation loss'])
plt.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A better method in Python 3 is -
Step2: These are the words that appear more than 200 times and I have excluded the really common words (greater than 944 times)
|
<ASSISTANT_TASK:>
Python Code:
s
clean_s = removeDelimiter(s," ",[".",",",";","_","-",":","!","?","\"",")","("])
wordlist = clean_s.split()
dictionary = {}
for word in wordlist:
if word in dictionary:
tmp = dictionary[word]
dictionary[word]=tmp+1
else:
dictionary[word]=1
import operator
sorted_dict = sorted(dictionary.items(), key=operator.itemgetter(1))
sorted(dictionary.items(), key=lambda x: x[1])
#Much more interesting are the uncommon words
infreq_metric = []
for ordered_words,value_words in sorted_dict:
if value_words == 1:
infreq_metric.append((ordered_words,value_words))
for word in infreq_metric:
print('"{}"'.format(word[0]))
#the very common words
freq_metric = []
for ordered_words,value_words in sorted_dict:
if value_words > 944:
freq_metric.append((ordered_words,value_words))
for pair in freq_metric:
print('The word "{}" appears {} times'.format(pair[0],pair[1]))
freq_metric = []
for ordered_words,value_words in sorted_dict:
if value_words >200 and value_words< 944:
freq_metric.append(str(value_words))
print(ordered_words,value_words)
%matplotlib inline
numbs = [int(x) for x in freq_metric]
plt.plot(numbs)
l = [makeSortable(str(dictionary[k])) + " # " + k for k in dictionary.keys()]
for w in sorted(l):
print(w)
count = {}
for k in dictionary.keys():
if dictionary[k] in count:
tmp = count[dictionary[k]]
count[dictionary[k]] = tmp + 1
else:
count[dictionary[k]] = 1
for k in sorted(count.keys()):
print(str(count[k]) + " words appear " + str(k) + " times")
# %load text_analysis.py
# this code is licenced under creative commons licence as long as you
# cite the author: Rene Pickhardt / www.rene-pickhardt.de
# adds leading zeros to a string so all result strings can be ordered
def makeSortable(w):
l = len(w)
tmp = ""
for i in range(5-l):
tmp = tmp + "0"
tmp = tmp + w
return tmp
#replaces all kind of structures passed in l in a text s with the 2nd argument
def removeDelimiter(s,new,l):
for c in l:
s = s.replace(c, new);
return s;
def analyzeWords(s):
s = removeDelimiter(s," ",[".",",",";","_","-",":","!","?","\"",")","("])
wordlist = s.split()
dictionary = {}
for word in wordlist:
if word in dictionary:
tmp = dictionary[word]
dictionary[word]=tmp+1
else:
dictionary[word]=1
l = [makeSortable(str(dictionary[k])) + " # " + k for k in dictionary.keys()]
for w in sorted(l):
print(w)
count = {}
for k in dictionary.keys():
if dictionary[k] in count:
tmp = count[dictionary[k]]
count[dictionary[k]] = tmp + 1
else:
count[dictionary[k]] = 1
for k in sorted(count.keys()):
print(str(count[k]) + " words appear " + str(k) + " times")
def differentWords(s):
s = removeDelimiter(s," ",[".",",",";","_","-",":","!","?","\"",")","("])
wordlist = s.split()
count = 0
dictionary = {}
for word in wordlist:
if word in dictionary:
tmp = dictionary[word]
dictionary[word]=tmp+1
else:
dictionary[word]=1
count = count + 1
print(str(count) + " different words")
print("every word was used " + str(float(len(wordlist))/float(count)) + " times on average")
return count
def analyzeSentences(s):
s = removeDelimiter(s,".",[".",";",":","!","?"])
sentenceList = s.split(".")
wordList = s.split()
wordCount = len(wordList)
sentenceCount = len(sentenceList)
print(str(wordCount) + " words in " + str(sentenceCount) + " sentences ==> " + str(float(wordCount)/float(sentenceCount)) + " words per sentence")
max = 0
satz = ""
for w in sentenceList:
if len(w) > max:
max = len(w);
satz = w;
print(satz + "laenge " + str(len(satz)))
texts = ["ulysses.txt"]
for text in texts:
print(text)
datei = open(text,'r')
s = datei.read().lower()
analyzeSentences(s)
differentWords(s)
analyzeWords(s)
datei.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Since finding natural neighbors already calculates circumcenters and circumradii, return
Step2: We can then use the information in tri_info later.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.gridding.triangles import find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
neighbors, tri_info = find_natural_neighbors(tri, test_points)
print(neighbors)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenter and radius information from tri_info, plot circumcircles and
# circumcenters for each triangle.
for _idx, item in tri_info.items():
ax.plot(item['cc'][0], item['cc'][1], 'k.', markersize=5)
circ = plt.Circle(item['cc'], item['r'], edgecolor='k', facecolor='none',
transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Composing Learning Algorithms
Step3: NOTE
Step5: There are a few important points about the code above. First, it keeps track of the number of examples seen, as this will constitute the weight of the client update (when computing an average across clients).
Step6: Composing a Learning Algorithm
Step8: In order to feed the dataset into our model, the data is flattened and converted into tuples of the form (flattened_image_vector, label).
Step9: Preparing the model
Step10: Preparing the optimizers
Step11: Defining the building blocks
Step12: Composing the building blocks
Step13: Running the algorithm
Step14: As expected, the client work has an empty state (remember the client work code above!). However, other building blocks may have non-empty state. For example, the finalizer keeps track of how many iterations have occurred. Since next has not been run yet, it has a state of 0.
Step15: Now run a training round.
Step16: The output of this (tff.learning.templates.LearningProcessOutput) has both a .state and .metrics output. Let's look at both.
Step17: Clearly, the finalizer state has incremented by one, as one round of .next has been run.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
from typing import Callable
import tensorflow as tf
import tensorflow_federated as tff
@tf.function
def client_update(model: tff.learning.Model,
dataset: tf.data.Dataset,
server_weights: tff.learning.ModelWeights,
client_optimizer: tf.keras.optimizers.Optimizer):
Performs training (using the server model weights) on the client's dataset.
# Initialize the client model with the current server weights.
client_weights = tff.learning.ModelWeights.from_model(model)
tf.nest.map_structure(lambda x, y: x.assign(y),
client_weights, server_weights)
# Use the client_optimizer to update the local model.
# Keep track of the number of examples as well.
num_examples = 0.0
for batch in dataset:
with tf.GradientTape() as tape:
# Compute a forward pass on the batch of data
outputs = model.forward_pass(batch)
num_examples += tf.cast(outputs.num_examples, tf.float32)
# Compute the corresponding gradient
grads = tape.gradient(outputs.loss, client_weights.trainable)
# Compute the gradient norm and clip
gradient_norm = tf.linalg.global_norm(grads)
if gradient_norm > 1:
grads = tf.nest.map_structure(lambda x: x/gradient_norm, grads)
grads_and_vars = zip(grads, client_weights.trainable)
# Apply the gradient using a client optimizer.
client_optimizer.apply_gradients(grads_and_vars)
# Compute the difference between the server weights and the client weights
client_update = tf.nest.map_structure(tf.subtract,
client_weights.trainable,
server_weights.trainable)
return tff.learning.templates.ClientResult(
update=client_update, update_weight=num_examples)
def build_gradient_clipping_client_work(
model_fn: Callable[[], tff.learning.Model],
optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer],
) -> tff.learning.templates.ClientWorkProcess:
Creates a client work process that uses gradient clipping.
with tf.Graph().as_default():
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
model = model_fn()
data_type = tff.SequenceType(model.input_spec)
model_weights_type = tff.learning.framework.weights_type_from_model(model)
@tff.federated_computation
def initialize_fn():
return tff.federated_value((), tff.SERVER)
@tff.tf_computation(model_weights_type, data_type)
def client_update_computation(model_weights, dataset):
model = model_fn()
optimizer = optimizer_fn()
return client_update(model, dataset, model_weights, optimizer)
@tff.federated_computation(
initialize_fn.type_signature.result,
tff.type_at_clients(model_weights_type),
tff.type_at_clients(data_type)
)
def next_fn(state, model_weights, client_dataset):
client_result = tff.federated_map(
client_update_computation, (model_weights, client_dataset))
# Return empty measurements, though a more complete algorithm might
# measure something here.
measurements = tff.federated_value((), tff.SERVER)
return tff.templates.MeasuredProcessOutput(state, client_result,
measurements)
return tff.learning.templates.ClientWorkProcess(
initialize_fn, next_fn)
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data()
NUM_CLIENTS = 10
BATCH_SIZE = 20
def preprocess(dataset):
def batch_format_fn(element):
Flatten a batch of EMNIST data and return a (features, label) tuple.
return (tf.reshape(element['pixels'], [-1, 784]),
tf.reshape(element['label'], [-1, 1]))
return dataset.batch(BATCH_SIZE).map(batch_format_fn)
client_ids = sorted(emnist_train.client_ids)[:NUM_CLIENTS]
federated_train_data = [preprocess(emnist_train.create_tf_dataset_for_client(x))
for x in client_ids
]
def create_keras_model():
initializer = tf.keras.initializers.GlorotNormal(seed=0)
return tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer=initializer),
tf.keras.layers.Softmax(),
])
def model_fn():
keras_model = create_keras_model()
return tff.learning.from_keras_model(
keras_model,
input_spec=federated_train_data[0].element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
client_optimizer_fn = lambda: tf.keras.optimizers.SGD(learning_rate=0.01)
server_optimizer_fn = lambda: tf.keras.optimizers.SGD(learning_rate=1.0)
@tff.tf_computation()
def initial_model_weights_fn():
return tff.learning.ModelWeights.from_model(model_fn())
model_weights_type = initial_model_weights_fn.type_signature.result
distributor = tff.learning.templates.build_broadcast_process(model_weights_type)
client_work = build_gradient_clipping_client_work(model_fn, client_optimizer_fn)
# TFF aggregators use a factory pattern, which create an aggregator
# based on the output type of the client work. This also uses a float (the number
# of examples) to govern the weight in the average being computed.)
aggregator_factory = tff.aggregators.MeanFactory()
aggregator = aggregator_factory.create(model_weights_type.trainable,
tff.TensorType(tf.float32))
finalizer = tff.learning.templates.build_apply_optimizer_finalizer(
server_optimizer_fn, model_weights_type)
fed_avg_with_clipping = tff.learning.templates.compose_learning_process(
initial_model_weights_fn,
distributor,
client_work,
aggregator,
finalizer
)
state = fed_avg_with_clipping.initialize()
state.client_work
state.finalizer
learning_process_output = fed_avg_with_clipping.next(state, federated_train_data)
learning_process_output.state.finalizer
learning_process_output.metrics
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Look at gene's median transcript count
Step2: Clean data matrix to be compatible with the cluster labels and identities
|
<ASSISTANT_TASK:>
Python Code:
(n_transcripts_per_gene > 1e3).sum()
n_transcripts_per_gene[n_transcripts_per_gene > 1e4]
median_transcripts_per_gene = table1_t.median()
median_transcripts_per_gene.head()
sns.distplot(median_transcripts_per_gene)
fig = plt.gcf()
fig.savefig('median_transcripts_per_gene.png')
data = median_transcripts_per_gene
mask = data > 0
sns.distplot(data[mask])
fig = plt.gcf()
fig.savefig('median_transcripts_per_gene_greater0.png')
gene_symbols = table1_t.columns.map(lambda x: x.split(':')[-1].upper())
gene_symbols.name = 'symbol'
table1_t.columnsmns = gene_symbols
table1_t.head()
barcodes = 'r1_' + table1_t.index
barcodes.name = 'barcode'
table1_t.index = barcodes
table1_t.head()
table1_t.to_csv('expression_table1.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll use all columns except Gender for this tutorial. We could use gender by converting the gender to a numeric value (e.g., 0 for Male, 1 for Female) but for the purproses of this post, we'll just skip this column.
Step2: The 'Dataset' column is the value we are trying to predict...whether the user has liver disease or not so we'll that as our "Y" and the other columns for our "X" array.
Step3: Before we run our machine learning models, we need to set a random number to use to seed them. This can be any random number that you'd like it to be. Some people like to use a random number generator but for the purposes of this, I'll just set it to 12 (it could just as easily be 1 or 3 or 1023 or any other number).
Step4: Now we need to set up our models that we'll be testing out. We'll set up a list of the models and give them each a name. Additionally, I'm going to set up the blank arrays/lists for the outcomes and the names of the models to use for comparison.
Step5: We are going to use a k-fold validation to evaluate each algorithm and will run through each model with a for loop, running the analysis and then storing the outcomes into the lists we created above. We'll use a 10-fold cross validation.
Step6: From the above, it looks like the Logistic Regression, Support Vector Machine and Linear Discrimation Analysis methods are providing the best results. If we take a look at a box plot to see what the accuracy is for each cross validation fold, we can see just how good each does relative to each other and their means.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,10)
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
#read in the data
data = pd.read_csv('indian_liver_patient.csv')
data.head()
data_to_use = data
del data_to_use['Gender']
data_to_use.dropna(inplace=True)
data_to_use.head()
values = data_to_use.values
Y = values[:,9]
X = values[:,0:9]
random_seed = 12
outcome = []
model_names = []
models = [('LogReg', LogisticRegression()),
('SVM', SVC()),
('DecTree', DecisionTreeClassifier()),
('KNN', KNeighborsClassifier()),
('LinDisc', LinearDiscriminantAnalysis()),
('GaussianNB', GaussianNB())]
for model_name, model in models:
k_fold_validation = model_selection.KFold(n_splits=10, random_state=random_seed)
results = model_selection.cross_val_score(model, X, Y, cv=k_fold_validation, scoring='accuracy')
outcome.append(results)
model_names.append(model_name)
output_message = "%s| Mean=%f STD=%f" % (model_name, results.mean(), results.std())
print(output_message)
fig = plt.figure()
fig.suptitle('Machine Learning Model Comparison')
ax = fig.add_subplot(111)
plt.boxplot(outcome)
ax.set_xticklabels(model_names)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Variables
Step7: 3. Key Properties --> Seawater Properties
Step8: 3.2. Ocean Freezing Point Value
Step9: 4. Key Properties --> Resolution
Step10: 4.2. Canonical Horizontal Resolution
Step11: 4.3. Number Of Horizontal Gridpoints
Step12: 5. Key Properties --> Tuning Applied
Step13: 5.2. Target
Step14: 5.3. Simulations
Step15: 5.4. Metrics Used
Step16: 5.5. Variables
Step17: 6. Key Properties --> Key Parameter Values
Step18: 6.2. Additional Parameters
Step19: 7. Key Properties --> Assumptions
Step20: 7.2. On Diagnostic Variables
Step21: 7.3. Missing Processes
Step22: 8. Key Properties --> Conservation
Step23: 8.2. Properties
Step24: 8.3. Budget
Step25: 8.4. Was Flux Correction Used
Step26: 8.5. Corrected Conserved Prognostic Variables
Step27: 9. Grid --> Discretisation --> Horizontal
Step28: 9.2. Grid Type
Step29: 9.3. Scheme
Step30: 9.4. Thermodynamics Time Step
Step31: 9.5. Dynamics Time Step
Step32: 9.6. Additional Details
Step33: 10. Grid --> Discretisation --> Vertical
Step34: 10.2. Number Of Layers
Step35: 10.3. Additional Details
Step36: 11. Grid --> Seaice Categories
Step37: 11.2. Number Of Categories
Step38: 11.3. Category Limits
Step39: 11.4. Ice Thickness Distribution Scheme
Step40: 11.5. Other
Step41: 12. Grid --> Snow On Seaice
Step42: 12.2. Number Of Snow Levels
Step43: 12.3. Snow Fraction
Step44: 12.4. Additional Details
Step45: 13. Dynamics
Step46: 13.2. Transport In Thickness Space
Step47: 13.3. Ice Strength Formulation
Step48: 13.4. Redistribution
Step49: 13.5. Rheology
Step50: 14. Thermodynamics --> Energy
Step51: 14.2. Thermal Conductivity
Step52: 14.3. Heat Diffusion
Step53: 14.4. Basal Heat Flux
Step54: 14.5. Fixed Salinity Value
Step55: 14.6. Heat Content Of Precipitation
Step56: 14.7. Precipitation Effects On Salinity
Step57: 15. Thermodynamics --> Mass
Step58: 15.2. Ice Vertical Growth And Melt
Step59: 15.3. Ice Lateral Melting
Step60: 15.4. Ice Surface Sublimation
Step61: 15.5. Frazil Ice
Step62: 16. Thermodynamics --> Salt
Step63: 16.2. Sea Ice Salinity Thermal Impacts
Step64: 17. Thermodynamics --> Salt --> Mass Transport
Step65: 17.2. Constant Salinity Value
Step66: 17.3. Additional Details
Step67: 18. Thermodynamics --> Salt --> Thermodynamics
Step68: 18.2. Constant Salinity Value
Step69: 18.3. Additional Details
Step70: 19. Thermodynamics --> Ice Thickness Distribution
Step71: 20. Thermodynamics --> Ice Floe Size Distribution
Step72: 20.2. Additional Details
Step73: 21. Thermodynamics --> Melt Ponds
Step74: 21.2. Formulation
Step75: 21.3. Impacts
Step76: 22. Thermodynamics --> Snow Processes
Step77: 22.2. Snow Aging Scheme
Step78: 22.3. Has Snow Ice Formation
Step79: 22.4. Snow Ice Formation Scheme
Step80: 22.5. Redistribution
Step81: 22.6. Heat Diffusion
Step82: 23. Radiative Processes
Step83: 23.2. Ice Radiation Transmission
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-3', 'seaice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.model.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.variables.prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea ice temperature"
# "Sea ice concentration"
# "Sea ice thickness"
# "Sea ice volume per grid cell area"
# "Sea ice u-velocity"
# "Sea ice v-velocity"
# "Sea ice enthalpy"
# "Internal ice stress"
# "Salinity"
# "Snow temperature"
# "Snow depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS-10"
# "Constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.seawater_properties.ocean_freezing_point_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.target')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.simulations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.metrics_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.tuning_applied.variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.typical_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ice strength (P*) in units of N m{-2}"
# "Snow conductivity (ks) in units of W m{-1} K{-1} "
# "Minimum thickness of ice created in leads (h0) in units of m"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.key_parameter_values.additional_parameters')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.description')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.on_diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.assumptions.missing_processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.properties')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Mass"
# "Salt"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ocean grid"
# "Atmosphere Grid"
# "Own Grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Structured grid"
# "Unstructured grid"
# "Adaptive grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite differences"
# "Finite elements"
# "Finite volumes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.thermodynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.dynamics_time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.horizontal.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.layering')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Zero-layer"
# "Two-layers"
# "Multi-layers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.number_of_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.discretisation.vertical.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.has_mulitple_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.number_of_categories')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.category_limits')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.ice_thickness_distribution_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.seaice_categories.other')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.has_snow_on_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.number_of_snow_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.snow_fraction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.grid.snow_on_seaice.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.horizontal_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.transport_in_thickness_space')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Incremental Re-mapping"
# "Prather"
# "Eulerian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.ice_strength_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Hibler 1979"
# "Rothrock 1975"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.redistribution')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rafting"
# "Ridging"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.dynamics.rheology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Free-drift"
# "Mohr-Coloumb"
# "Visco-plastic"
# "Elastic-visco-plastic"
# "Elastic-anisotropic-plastic"
# "Granular"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.enthalpy_formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice latent heat (Semtner 0-layer)"
# "Pure ice latent and sensible heat"
# "Pure ice latent and sensible heat + brine heat reservoir (Semtner 3-layer)"
# "Pure ice latent and sensible heat + explicit brine inclusions (Bitz and Lipscomb)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.thermal_conductivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pure ice"
# "Saline ice"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Conduction fluxes"
# "Conduction and radiation heat fluxes"
# "Conduction, radiation and latent heat transport"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.basal_heat_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heat Reservoir"
# "Thermal Fixed Salinity"
# "Thermal Varying Salinity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.fixed_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.heat_content_of_precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.energy.precipitation_effects_on_salinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.new_ice_formation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_vertical_growth_and_melt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_lateral_melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Floe-size dependent (Bitz et al 2001)"
# "Virtual thin ice melting (for single-category)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.ice_surface_sublimation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.mass.frazil_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.has_multiple_sea_ice_salinities')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.sea_ice_salinity_thermal_impacts')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.mass_transport.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.salinity_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Prescribed salinity profile"
# "Prognostic salinity profile"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.constant_salinity_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.salt.thermodynamics.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_thickness_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Virtual (enhancement of thermal conductivity, thin ice melting)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Parameterised"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.ice_floe_size_distribution.additional_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.are_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.formulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flocco and Feltham (2010)"
# "Level-ice melt ponds"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.melt_ponds.impacts')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Albedo"
# "Freshwater"
# "Heat"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_aging')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_aging_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.has_snow_ice_formation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.snow_ice_formation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.redistribution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.thermodynamics.snow_processes.heat_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Single-layered heat diffusion"
# "Multi-layered heat diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.surface_albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Parameterized"
# "Multi-band albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.seaice.radiative_processes.ice_radiation_transmission')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Delta-Eddington"
# "Exponential attenuation"
# "Ice radiation transmission per category"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But, this is a painful way to construct responses. Hence, other high level abstractions are available.
Step2: Notice how okResponse will always add a Content-Length header for you.
Step3: Let's add some content to our response packet
Step4: Note, how okResponse automatically added a Content-Length header for us.
Step5: You can pass a custom value for min_compression_length kwarg to okResponse.
Step6: Internally, okResponse uses build_http_response and hence you can also pass any argument also accepted by build_http_response. Example, it supports a conn_close argument which will add a Connection
Step7: Chunked Encoding
Step8: If we omit the min_compression_length flag
|
<ASSISTANT_TASK:>
Python Code:
from proxy.http.parser import HttpParser, httpParserTypes
from proxy.common.constants import HTTP_1_1
response = HttpParser(httpParserTypes.RESPONSE_PARSER)
response.code = b'200'
response.reason = b'OK'
response.version = HTTP_1_1
print(response.build_response())
from proxy.http.responses import okResponse
print(okResponse().tobytes())
response = okResponse(
headers={
b'X-Custom-Header': b'my value',
},
)
print(response.tobytes())
response = okResponse(
content=b'Hello World',
headers={
b'X-Custom-Header': b'my value',
},
)
print(response.tobytes())
response = okResponse(
content=b'H' * 21,
headers={
b'X-Custom-Header': b'my value',
},
)
print(response.tobytes())
response = okResponse(
content=b'H' * 21,
headers={
b'Host': b'jaxl.com',
},
min_compression_length=21,
)
print(response.tobytes())
response = okResponse(
content=b'Hello World',
headers={
b'Host': b'jaxl.com',
},
conn_close=True,
)
print(response.tobytes())
from proxy.http.parser import ChunkParser
chunks = ChunkParser.to_chunks(b'Hello World', chunk_size=5)
response = okResponse(
content=chunks,
headers={
b'Transfer-Encoding': b'chunked',
},
# Avoid compressing chunks for demo purposes here
# Ideally you should omit this flag and send
# compressed chunks.
min_compression_length=len(chunks),
)
print(response.tobytes())
response = okResponse(
content=chunks,
headers={
b'Transfer-Encoding': b'chunked',
},
)
print(response.tobytes())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As a reminder, the equations describing a system in State Space Form are the measurement equation
Step2: First, we will look at the maximum a posteriori point (MAP)
Step3: The result seems a bit off with respect to the known true values. Let's sample the posterior to obtain a distribution of possible values for these parameters
Step4: As a final remark, note that a simple scatter plot of the samples shows that the chosen parametrization is probably not optimal, with a high correlation
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
%matplotlib inline
# True values
T = 500 # Time steps
sigma2_eps0 = 3 # Variance of the observation noise
sigma2_eta0 = 10 # Variance in the update of the mean
# Simulate data
np.random.seed(12345)
eps = np.random.normal(scale=sigma2_eps0**0.5, size=T)
eta = np.random.normal(scale=sigma2_eta0**0.5, size=T)
mu = np.cumsum(eta)
y = mu + eps
# Plot the time series
fig, ax = plt.subplots(figsize=(13,2))
ax.fill_between(np.arange(T), 0, y, facecolor=(0.7,0.7,1), edgecolor=(0,0,1))
ax.set(xlabel='$T$', title='Simulated series');
import kalman
import pymc3
from pymc3 import Model, HalfCauchy
with Model() as model:
ɛ_σ2 = HalfCauchy(name='ɛ_σ2', beta=1e6)
η_σ2 = HalfCauchy(name='η_σ2', beta=1e6)
Z = np.array(1.)
d = np.array(0.)
H = ɛ_σ2
T = np.array(1.)
c = np.array(0.)
R = np.array(1.)
Q = η_σ2
a0 = np.array(0.)
P0 = np.array(1e6)
ts = kalman.KalmanFilter('ts', Z, d, H, T, c, R, Q, a0, P0, observed=y)
import re
with model:
MAP = pymc3.find_MAP()
# We need to undo the log transform
{re.sub('_log__', '', k): np.exp(v) for k,v in MAP.items()}
with model:
trace = pymc3.sample()
pymc3.traceplot(trace);
plt.scatter(trace['η_σ2'], trace['ɛ_σ2'])
plt.setp(plt.gca(), 'xlabel', 'Update local level variance',
'ylabel', 'Observation noise variance');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: URL builder
Step2: Fahrenheit to Celsius
Step3: Fetch JSON data from URL
|
<ASSISTANT_TASK:>
Python Code:
config = ConfigParser.RawConfigParser()
config.read('synchronization.cfg')
api_key = config.get('Darksky', 'api_key')
geolocator = Nominatim()
location = geolocator.geocode('Muntstraat 10 Leuven')
latitude = location.latitude
longitude = location.longitude
base_url = config.get('Darksky', 'base_url') + api_key \
+ '/' + str(latitude) + ',' + str(longitude) + ','
def url_builder(start_date, end_date=dt.datetime.now()):
url_list = []
delta = end_date - start_date
for counter in range(delta.days):
timestamp = str(time.mktime((start_date + dt.timedelta(days=counter)).timetuple()))[:-2]
if os.path.isfile('local_data/full_data_' + timestamp + '.json'):
continue
full_url = base_url + timestamp
url_list.append(full_url)
return url_list
url_list = url_builder(dt.datetime(2017,6,12))
len(url_list)
def f_t_c(fahrenheit):
return (((fahrenheit - 32) * 5.0) / 9.0)
def fetch_and_store_json(url):
try:
request = requests.get(url=url, timeout=10)
except ReadTimeout as t:
print "Read timeout"
request = None
if request is None:
while(request is None):
request = requests.get(url=url, timeout=1)
content = json.loads(request.content)
storage = open('local_data/full_data_' + url.split(',')[2] + '.json', 'w')
#storage.write(json.dumps(content)) #for BigQuery ready json file
storage.write(json.dumps(content, separators=(',', ': '), indent=5)) #For clean indentation
storage.close()
for url in tqdm(url_list):
fetch_and_store_json(url)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.