text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Missão
Step2: Informações Sobre os Consumidores
Step3: Análise Geral de Compras
Step4: Análise Demográfica
Step5: Informações Demográficas Por Gênero
Step6: Análise de Compras Por Gênero
Step7: Consumidores Mais Populares (Top 5)
Step8: Itens Mais Populares
Step9: Itens Mais Lucrativos
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Imports
import pandas as pd
import numpy as np
# Carrega o arquivo
load_file = "dados_compras.json"
purchase_file = pd.read_json(load_file, orient = "records")
purchase_file.head()
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
# Implemente aqui sua solução
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, import the compare module
Step2: Then load and visulalize the table
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import compare
from compare import load_sourmash_csv
# File name
load_sourmash_csv('SRR606249.pe.trim2and30_comparison.k51.csv')
from compare import create_cluster_map
#Input file name, output image name, title
create_cluster_map("SRR606249.pe.trim2and30_comparison.k51.csv", "Yep.png", 'Shakya Complete and Subsampled with Variable Quality Trimming and K = 51')
from compare import sort_by_similarity
# Input file name, output file name
sort_by_similarity("SRR606249.pe.trim2and30_comparison.k51.csv", "sorted.SRR606249.pe.trim2and30_comparison.k51.csv")
from compare import create_tsne
#Input file name, output image name
create_tsne("SRR606249.pe.trim2and30_comparison.k51.csv", "yes.png]")
from compare import create_mds_plot
#Input file name, output image name
create_mds_plot("SRR606249.pe.trim2and30_comparison.k51.csv", "yep.png")
import pandas as pd
import seaborn as sns
import numpy
from matplotlib import pyplot
from sklearn import manifold
import os.path
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import plotly
import plotly.plotly as py
df_csv = pd.read_csv("SRR606249.pe.trim2and30_comparison.k51.csv")
#def create_mds_plot(filename, save_fig):
m = numpy.loadtxt(open("SRR606249.pe.trim2and30_comparison.k51.csv"), delimiter="," , skiprows=1)
from sklearn.manifold import mds
from sklearn.preprocessing import StandardScaler
data_std = StandardScaler().fit_transform(m)
from sklearn.decomposition import PCA
pca = PCA(n_components=8, svd_solver='full')
data_pca = pca.fit_transform(data_std)
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9,
dissimilarity="euclidean", n_jobs=1).fit_transform(m)
df = pd.DataFrame(mds)
df.columns=['t1','t2']
# Rename index with column names - path
x = dict([(i,os.path.basename(i)) for i in df_csv.columns])
dfnew = df_csv.rename(index=str, columns=x)
dfnew ['']= dfnew.columns
output = dfnew.set_index('')
#output
#df['labels'] = output.columns
#df
#Convert to df to dic
new_output = output.to_dict()
new_output
#df_new
#pyplot.scatter(df.t1, df.t2, label=df['labels'])
py.plot(data=new_output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want to plot the distribution of the mutations along the chromosomes, so, we first read the positions of the mutations (read from a random sample of 100,000 mutations)
Step2: We want to add information of the positions of the centromeric regions and the chromosome boundaries. We read this from the table data/chromosome-data.tsv
Step3: To ensure the chromosomes are plotted in the correct order, we provide a list that defines that order
Step4: Finally, we can plot the mutations
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from collections import defaultdict
from ICGC_data_parser import SSM_Reader
distribution = defaultdict(list)
for record in SSM_Reader(filename='data/ssm_sample.vcf'):
# Associate CHROMOSOME -> [MUTATION POSITIONS]
distribution[record.CHROM].append(record.POS)
from collections import namedtuple
# Create a custom class whose objects
# hold information of a chromosome
Chromosome = namedtuple('Chromosome', ['length',
'centromere_start',
'centromere_end'])
import pandas as pd
# Open the file with the information of the centromeric regions
all_data = pd.read_table('data/chromosome-data.tsv',
delimiter='\t')
# Filter for human data
human_data = all_data[ all_data['species'] == 'Homo sapiens' ]
chromosomes = {}
for _, record in human_data.iterrows():
chrom = record['chromosome']
length = record['chromosome length (bp)']
c_start = record['centromeric region start']
c_end = record['centromeric region end']
chromosomes[chrom] = Chromosome(length, c_start, c_end)
chrom_names = [str(i+1) for i in range(22)] + ['X', 'Y', 'MT']
for chrom in chrom_names:
fig, ax = plt.subplots(figsize=(8, 2))
# Main plot
ax.hist(distribution[chrom], bins=300)
ax.set(title=f'Chromosome {chrom}')
if chrom in chromosomes:
# Fetch data on chromosome
# length and centromere positions
chrom_data = chromosomes[chrom]
# Chromosome boundaries
ax.axvline(chrom_data.length, ls='--', color='purple')
ax.axvline(0, ls='--', color='purple')
# Chromosome centromeres
ax.axvline(chrom_data.centromere_end, ls=':', color='purple')
ax.axvline(chrom_data.centromere_start, ls=':', color='purple')
plt.show()
chrom = '10'
fig, ax = plt.subplots(figsize=(13, 3))
# Main plot
ax.hist(distribution[chrom], bins=300)
ax.set(title=f'Chromosome {chrom}')
if chrom in chromosomes:
# Fetch data on chromosome
# length and centromere positions
chrom_data = chromosomes[chrom]
# Chromosome boundaries
ax.axvline(chrom_data.length, ls='--', color='purple')
ax.axvline(0, ls='--', color='purple')
# Chromosome centromeres
ax.axvline(chrom_data.centromere_end, ls=':', color='purple')
ax.axvline(chrom_data.centromere_start, ls=':', color='purple')
plt.savefig('chromosome-mutations.png')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Chicago taxi fare training experience
Step2: Query dataset
Step3: Column info
Step4: Raw descriptive statistics
Step5: Feature engineering
Step6: Remaining null values per column after feature engineering
Step7: Data profiling
Step8: Visual dropoff locations
Step9: Location histograms
Step10: Time based explorations
Step11: Trip loginess
Step12: Fare by trip start hour
Step13: Split dataframe to examples and output
Step14: Training pipeline
Step15: Option 1
Step16: Option 2
Step17: Prediction test
Step18: Cross validation score to test set
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from pandas_profiling import ProfileReport
from scipy import stats
from sklearn.ensemble import RandomForestRegressor
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
# MLflow
import mlflow
import mlflow.sklearn
# plotting libraries:
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
# Google clients
import google.auth
from google.cloud import bigquery
from google.cloud import bigquery_storage
# Set default appearance
# - overide maplot libs ugly colours.
# - default figure size
sns.set(color_codes=True)
mpl.rcParams['figure.figsize'] = [13, 8]
%matplotlib inline
BQ_DATASET = 'chicago_taxi_trips'
BQ_TABLE = 'taxi_trips'
BQ_QUERY =
with tmp_table as (
SELECT trip_seconds, trip_miles, fare, tolls,
company, pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude,
DATETIME(trip_start_timestamp, 'America/Chicago') trip_start_timestamp,
DATETIME(trip_end_timestamp, 'America/Chicago') trip_end_timestamp,
CASE WHEN (pickup_community_area IN (56, 64, 76)) OR (dropoff_community_area IN (56, 64, 76)) THEN 1 else 0 END is_airport,
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE
dropoff_latitude IS NOT NULL and
dropoff_longitude IS NOT NULL and
pickup_latitude IS NOT NULL and
pickup_longitude IS NOT NULL and
fare > 0 and
trip_miles > 0 and
MOD(ABS(FARM_FINGERPRINT(unique_key)), 100) {}
ORDER BY RAND()
LIMIT 20000)
SELECT *,
EXTRACT(YEAR FROM trip_start_timestamp) trip_start_year,
EXTRACT(MONTH FROM trip_start_timestamp) trip_start_month,
EXTRACT(DAY FROM trip_start_timestamp) trip_start_day,
EXTRACT(HOUR FROM trip_start_timestamp) trip_start_hour,
FORMAT_DATE('%a', DATE(trip_start_timestamp)) trip_start_day_of_week
FROM tmp_table
# Create BigQuery client
credentials, your_project_id = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform']
)
bqclient = bigquery.Client(credentials=credentials, project=your_project_id,)
bqstorageclient = bigquery_storage.BigQueryReadClient(credentials=credentials)
df = (
bqclient.query(BQ_QUERY.format('between 0 and 99'))
.result()
.to_dataframe(bqstorage_client=bqstorageclient)
)
display(df.info())
display(df.describe())
def feature_engineering(data):
# Add 'N/A' for missing 'Company'
data.fillna(value={'company':'N/A','tolls':0}, inplace=True)
# Drop rows contains null data.
data.dropna(how='any', axis='rows', inplace=True)
# Pickup and dropoff locations distance
data['abs_distance'] = (np.hypot(data['dropoff_latitude']-data['pickup_latitude'], data['dropoff_longitude']-data['pickup_longitude']))*100
# Remove extremes, outliers
possible_outliers_cols = ['trip_seconds', 'trip_miles', 'fare', 'abs_distance']
data=data[(np.abs(stats.zscore(data[possible_outliers_cols])) < 3).all(axis=1)].copy()
# Reduce location accuracy
data=data.round({'pickup_latitude': 3, 'pickup_longitude': 3, 'dropoff_latitude':3, 'dropoff_longitude':3})
return data
df=feature_engineering(df)
display(df.describe())
print(df.isnull().sum().sort_values(ascending=False))
ProfileReport(df, title='Chicago taxi dataset profiling Report').to_notebook_iframe()
sc = plt.scatter(df.dropoff_longitude, df.dropoff_latitude, c = df['fare'], cmap = 'summer')
plt.colorbar(sc)
fig, axs = plt.subplots(2)
fig.suptitle('Pickup location histograms')
df.hist('pickup_longitude', bins=100, ax=axs[0])
df.hist('pickup_latitude', bins=100, ax=axs[1])
plt.show()
fig, axs = plt.subplots(2)
fig.suptitle('Dropoff location histograms')
df.hist('dropoff_longitude', bins=100, ax=axs[0])
df.hist('dropoff_latitude', bins=100, ax=axs[1])
plt.show()
fig, axs = plt.subplots(4)
fig.suptitle('Trip start histograms')
fig.set_size_inches(18, 12, forward=True)
df.hist('trip_start_year', bins=8, ax=axs[0], )
df.hist('trip_start_month', bins=12, ax=axs[1])
df.hist('trip_start_day', bins=31, ax=axs[2])
df.hist('trip_start_hour', bins=24, ax=axs[3])
plt.show()
fig, axs = plt.subplots(2)
fig.set_size_inches(18, 8, forward=True)
df.hist('trip_miles', bins=50, ax=axs[0])
df.hist('trip_seconds', bins=50, ax=axs[1])
plt.show()
display(df.groupby('trip_start_hour')['fare'].mean().plot())
# Drop complex fields and split dataframe to examples and output
mlflow.log_param('training_shape', f'{df.shape}')
X=df.drop(['trip_start_timestamp'],axis=1)
y=df['fare']
# global variables
experiment_name = 'chicago-taxi-1'
ct_pipe = ColumnTransformer(transformers=[
('hourly_cat', OneHotEncoder(categories=[range(0,24)], sparse = False), ['trip_start_hour']),
('dow', OneHotEncoder(categories=[['Mon', 'Tue', 'Sun', 'Wed', 'Sat', 'Fri', 'Thu']], sparse = False), ['trip_start_day_of_week']),
('std_scaler', StandardScaler(), [
'trip_start_year',
'abs_distance',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'trip_miles',
'trip_seconds'])
])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123)
X_train=X_train.drop('fare', axis=1)
# for more details: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
rfr_pipe = Pipeline([
('ct', ct_pipe),
('forest_reg', RandomForestRegressor(
n_estimators = 20,
max_features = 'auto',
n_jobs = -1,
random_state = 3,
max_depth=None,
max_leaf_nodes=None,
))
])
rfr_score = cross_val_score(rfr_pipe, X_train, y_train, scoring = 'neg_mean_squared_error', cv = 5)
rfr_rmse = np.sqrt(-rfr_score)
rfr_rmse.mean()
mlflow.log_metric('train_cross_valid_score_rmse_mean', np.sqrt(-rfr_score).mean())
mlflow.log_param('number_of_estimators', 20)
# To see all RandomForestRegressor hyper parameters:
# estimator=RandomForestRegressor()
# display(estimator.get_params())
# Train model
mlflow.set_experiment('chicago-taxi-0')
# mlflow.sklearn.autolog()
with mlflow.start_run(nested=True) as mlflow_run:
final_model=rfr_pipe.fit(X_train, y_train)
mlflow.sklearn.log_model(final_model, 'chicago_rnd_forest')
param_grid = {'forest_reg__n_estimators': [5, 250], 'forest_reg__max_features': [6, 16, 'auto']}
forest_gs = GridSearchCV(rfr_pipe, param_grid, cv = 5, scoring = 'neg_mean_squared_error', n_jobs = -1)
forest_gs.fit(X_train, y_train)
print(f'Best parameters: {forest_gs.best_params_}')
print(f'Best score: {np.sqrt(-forest_gs.best_score_)}')
print(f'(All scores: {np.sqrt(-forest_gs.cv_results_['mean_test_score'])})')
final_model=forest_gs.best_estimator_
X_pred = pd.DataFrame(X_test, columns=X_test.columns)
X_pred['fare_pred'] = final_model.predict(X_test.drop('fare',axis=1))
X_pred.head(5)
rfr_score = cross_val_score(final_model, X_test, y_test, scoring='neg_mean_squared_error', cv = 5)
rfr_rmse = np.sqrt(-rfr_score)
rfr_rmse.mean()
mlflow.log_metric('eval_cross_valid_score_rmse_mean', np.sqrt(-rfr_score).mean())
# Comparer test
def model_comparer(job_name, **kwargs):
print(f'Model blessing: "{job_name}"')
experiment = mlflow.get_experiment_by_name(experiment_name)
filter_string = f"tags.job_name ILIKE '{job_name}_%'"
df = mlflow.search_runs([experiment.experiment_id], filter_string=filter_string)
display(df)
# Compare
# Available columns:
# run_id experiment_id status artifact_uri start_time end_time metrics.train_cross_valid_score_rmse_mean params.number_of_estimators tags.job_name tags.mlflow.source.name tags.mlflow.user tags.mlflow.source.type tags.version
eval_max = df.loc[df['metrics.eval_cross_valid_score_rmse_mean'].idxmax()]
train_max= df.loc[df['metrics.train_cross_valid_score_rmse_mean'].idxmax()]
display(eval_max)
return eval_max
# You need to set a previous training job name manually. Which is following this naming pattern: training_job_...time stamp...
best_run = model_comparer('training_job_20210119T220534')
client = mlflow.tracking.MlflowClient()
def register_model(run_id, model_name):
model_uri = f'runs:/{run_id}/{model_name}'
registered_model = mlflow.register_model(model_uri, model_name)
print(registered_model)
registered_models=client.search_registered_models(filter_string=f"name='{experiment_name}'", max_results=1, order_by=['timestamp DESC'])
if len(registered_models) ==0:
register_model(best_run.run_id, experiment_name)
else:
last_version = registered_models[0].latest_versions[0]
run = client.get_run(last_version.run_id)
print(run)
if not run:
print(f'Registered version run missing!')
last_eval_metric=run.data.metrics['eval_cross_valid_score_rmse_mean']
best_run_metric=best_run['metrics.eval_cross_valid_score_rmse_mean']
if last_eval_metric<best_run_metric:
print(f'Register better version with metric: {best_run_metric}')
register_model(best_run.run_id, experiment_name)
else:
print(f'Registered version still better. Metric: {last_eval_metric}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The original gross volume function checks that top height is greater than 0
Step2: MWEs
Step3: Timings
Step4: The array method is 20x faster. This is worth implementing. We should also add tests to help be explicity about the behaviour of these volume functions.
Step5: Run timings
Step6: It yielded a 13% reduction in the time.
Step7: Compare performance visualizations
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from gypsy.GYPSYNonSpatial import GrossTotalVolume_Pl
GrossTotalVolume_Pl(np.random.random(10) * 100, np.random.random(10) * 100)
def GrossTotalVolume_Pl_arr(BA_Pl, topHeight_Pl):
a1 = 0.194086
a2 = 0.988276
a3 = 0.949346
a4 = -3.39036
Tvol_Pl = a1* (BA_Pl**a2) * (topHeight_Pl **a3) * np.exp(1+(a4/((topHeight_Pl**2)+1)))
return Tvol_Pl
print(GrossTotalVolume_Pl_arr(10, 10))
print(GrossTotalVolume_Pl_arr(0, 10))
print(GrossTotalVolume_Pl_arr(10, 0))
print(GrossTotalVolume_Pl_arr(np.random.random(10) * 100, np.random.random(10) * 100))
print(GrossTotalVolume_Pl_arr(np.zeros(10) * 100, np.random.random(10) * 100))
ba = np.random.random(1000) * 100
top_height = np.random.random(1000) * 100
d = pd.DataFrame({'ba': ba, 'th': top_height})
%%timeit
d.apply(
lambda x: GrossTotalVolume_Pl(
x.at['ba'],
x.at['th']
),
axis=1
)
%%timeit
GrossTotalVolume_Pl_arr(ba, top_height)
%%bash
git log --since "2016-11-14 19:30" --oneline # 19:30 GMT/UTC
! git diff "HEAD~$(git log --since "2016-11-14 19:30" --oneline | wc -l)" ../gypsy
%%bash
# git checkout 36941343aca2df763f93192abef461093918fff4 -b vectorize-volume-functions
# time gypsy simulate ../private-data/prepped_random_sample_300.csv --output-dir tmp
# rm -rfd tmp
# real 4m51.287s
# user 4m41.770s
# sys 0m1.070s
45/336.
from gypsy.forward_simulation import simulate_forwards_df
data = pd.read_csv('../private-data/prepped_random_sample_300.csv', index_col=0, nrows=10)
%%prun -D forward-sim-3.prof -T forward-sim-3.txt -q
result = simulate_forwards_df(data)
!head forward-sim-3.txt
! rm -rfd gypsy-output
output_dir = 'gypsy-output'
%%prun -D forward-sim-2.prof -T forward-sim-2.txt -q
# restart the kernel first
data = pd.read_csv('../private-data/prepped_random_sample_300.csv', index_col=0, nrows=10)
result = simulate_forwards_df(data)
os.makedirs(output_dir)
for plot_id, df in result.items():
filename = '%s.csv' % plot_id
output_path = os.path.join(output_dir, filename)
df.to_csv(output_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This code demonstrates that the sum of the weights approach one over time.
Step2: Next, here is a 3D surface chart that you can move using the sliders.
Step3: And here is a contour plot. I plotted the contour of the original loss function so you could see the valley that the optimization routine would chase as well as the clear convergence of the algorithm.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from random import choice, seed
from mpl_toolkits.mplot3d import axes3d
import numpy as np
from ipywidgets import interact
seed(333)
data = [((-2, 3), 1),
((-1, -1), -1),
(( 2, -3), 1)]
weights = [1, 1]
n_iterations = 101
w1_vals = []
w2_vals = []
data_loss = []
def predict(X, weights, y=None):
return sum(w*x for w, x in zip(weights, X)) or -y
def error_rate(prediction, y):
return max(0, -y*prediction)
@np.vectorize
def all_points_error(w1, w2):
data = [((-2, 3), 1),
((-1, -1), -1),
(( 2, -3), 1)]
total_error = []
for X, y in data:
y_hat = predict(X, (w1, w2), y)
loss = error_rate(y_hat, y)
total_error.append(loss)
average_error = sum(total_error)/len(total_error)
return average_error
for t in range(n_iterations):
X, y = choice(data)
# Predict with current weights. If tie, predict wrong.
prediction = predict(X, weights, y)
loss = error_rate(prediction, y)
if loss:
for index, x in enumerate(X):
total_weight = sum(weights) or 1
bias = (1-total_weight)*(weights[index]/total_weight)
delta = y*x/(t+1) + bias
weights[index] += delta
if t%(n_iterations//50) == 0:
w1, w2 = weights[0], weights[1]
w1_vals.append(w1)
w2_vals.append(w2)
data_loss.append(all_points_error(w1, w2))
plt.figure()
for index, (w1, w2) in enumerate(zip(w1_vals, w2_vals)):
x1 = list(range(-3,4))
x2 = [-w1/w2*x for x in x1]
plt.plot(x1, x2, color="%f" % (1-index/len(w1_vals)))
plt.plot(-2, 3, "o", color="LawnGreen")
plt.plot(-1, -1, "o", color="OrangeRed")
plt.plot(2, -3, "o", color="LawnGreen")
plt.title("The decision boundary from each \niteration of SGD")
plt.xlabel("x1")
plt.ylabel("x2")
plt.ylim(-5,5)
plt.show()
[sum(total_weight) for total_weight in zip(w1_vals, w2_vals)]
def convex_plot(azim, elev):
fig = plt.figure()
ax = fig.gca(projection="3d")
x1mesh, x2mesh = np.mgrid[-2:2:200j,-2:2:200j]
fmesh = all_points_error(x1mesh, x2mesh)
ax.plot_surface(x1mesh, x2mesh, fmesh, alpha=.5, linewidth=0.35)
ax.view_init(azim=azim, elev=elev)
ax.plot3D(w1_vals, w2_vals, data_loss, color="r")
plt.show()
interact(convex_plot, azim=(0,90,10), elev=(0,90,10))
fig = plt.figure(figsize=(5,5))
plt.axis("equal")
x1mesh, x2mesh = np.mgrid[-2:2:200j,-2:2:200j]
fmesh = all_points_error(x1mesh, x2mesh)
CS = plt.contour(x1mesh, x2mesh, fmesh, 30)
plt.plot(w1_vals, w2_vals, "o-", markersize=5, linewidth=4, alpha=.5)
plt.ylim(-2,2)
plt.xlim(-2,2)
plt.clabel(CS, inline=1, inline_spacing=5, fontsize=8)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the Fermi distribution
Step3: In this equation
Step4: Write a function plot_fermidist(mu, kT) that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters mu and kT.
Step5: Use interact with plot_fermidist to explore the distribution
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
Image('fermidist.png')
def fermidist(energy, mu, kT):
e = 2.71828182845904523536028747135266249775724709369995
Compute the Fermi distribution at energy, mu and kT.
x = 1/(e **((energy - mu)/kT) + 1)
return x
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
def plot_fermidist(mu, kT):
E = np.linspace(0, 10., 100)
y = plt.plot(E, fermidist(E, mu, kT))
plt.xlabel('t')
plt.ylabel('X(t)')
return y
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
interact(plot_fermidist, mu = (0.0,5.0), kT=(.1,10.0));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Packages
Step2: Environments
Step3: Neat stuff
|
<ASSISTANT_TASK:>
Python Code:
# Create pkg_cache and environments
pkg_cache = cache.packages(root_pkgs)
envs = environment.environments(root_envs)
print(pkg_cache[:5])
print()
print(envs[:5])
pi = pkg_cache[0]
pi.index # info/index.json
# We can access fields of index.json directly from the object.
pi.name, pi.version, pi.build
# Access to info/files
pi.files
# The full spec of the package. This is always "name-version-build"
pi.full_spec
# We can queries against the information we have on packages
# For example, I want to find all MIT licensed packages in the cache
{pi.full_spec: pi.license for pi in pkg_cache if pi.license == 'MIT'}
e = envs[2]
e
# We can discover the currently activated environment
{e.path: e.activated() for e in envs}
# We can see all the packages that claim to be linked into the environment, keyed by name
e.linked_packages
# linked packages are either hard-linked, symlinked, or copied into environments.
set(chain(e.hard_linked, e.soft_linked, e.copy_linked)) ^ set(e.linked_packages.values())
# The origin channel of each package
e.package_channels
# We also have access to the history of the environment.
# The history object is an adaptation of conda's history parser.
# (note: The interface to this may change in the future)
e.history.object_log
# Calculate potential collisions in environments by packages claiming the same file paths
# Very quick and naive way of detecting file path collisions.
for i, p1 in enumerate(pkg_cache):
for p2 in pkg_cache[i+1:]:
if p1.name == p2.name:
continue
x = p1.files.intersection(p2.files)
if x:
print("{} collides with {}".format(p1, p2))
print("\tCollisions: ", x)
# Cache Utils has some higher order, convenience functions
# See what environments a package is linked into
# Note that this is a O(n) operation where n is the sum of the installed packages in each environment you're checking.
# If you're running this for the first time, it has to read all the metadata for each environment.
# Also note, that this creates new package info objects and environment objects each run, so each run
# prompts a full scan of both the package cache and all environments.
cu.linked_environments((pkg_cache[0],), envs)
# Find which environments the latest packages are linked to.
# This example uses Versio to parse and compare PEP440 compliant version numbers
# This will exclude packages like packages like jpeg and openssl
# This loop simple creates Version objects so we can compare them later.
Versions = {}
for x in pkg_cache:
try:
if x.name in Versions:
Versions[x.name].append(Version(x.version))
else:
Versions[x.name] = [Version(x.version)]
except:
print("Skipping ", x.name, x.version)
# sort the value lists and pick the latest versions
#pversions = {k: str(list(sorted(v))[-1]) for k, v in Versions.items()}
# sort the value lists and pick the older versions
pversions = {k: list(map(str, list(sorted(v))[:-1])) for k, v in Versions.items()}
# The most up-to-date packages are linked to which environments?
#latest_pkgs = [x for x in pkg_cache if x.name in pversions and x.version == pversions[x.name]]
# Find the environments that older packages are linked to
latest_pkgs = [x for x in pkg_cache if x.name in pversions and x.version in set(pversions[x.name])]
# Simply print the results nicely
{str(k): list(map(str, v)) for k, v in cu.linked_environments(latest_pkgs, envs).items()}
# All packages that are not linked to any environment
cu.unlinked_packages(pkg_cache, envs)
# Environment representation of root environment
e = environment.Environment(join(root_envs, 'env2'))
# Long running. Disk intensive.
filter_pyc = lambda f: filter(lambda x: not x.endswith('.pyc'), f)
# List all files in an environment that are not hardlinked (and should be).
# Note that *.pyc files are filtered out.
not_linked = {x: tuple(filter_pyc(y)) for x, y in eu.check_hardlinked_env(envs[0]).items()}
# If you wish to see all the non-existant hardlinks, including *.pyc files, remove the filter_pyc function call
# not_linked = {x: y for x, y in eu.check_hardlinked_env(envs[0]).items()}
not_linked
# We can leverage the information in the environment's history to get packages
# that were explicitly installed by the user.
eu.explicitly_installed(e)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convert A List To A Tuple
Step2: Note that the second element that is passed to the zip() function makes use of the step value to make sure that only the world and 2 elements are selected. Likewise, the first element uses the step value to select hello and 1.
Step3: Clone Or Copy A List in Python
|
<ASSISTANT_TASK:>
Python Code:
# List of Strings to a String
listOfStrings = ['One', 'Two', 'Three']
strOfStrings = ''.join(listOfStrings)
print(strOfStrings)
# List Of Integers to a String
listOfNumbers = [1, 2, 3]
strOfNumbers = ''.join(str(n) for n in listOfNumbers)
print(strOfNumbers)
helloWorld = ['hello','world','1','2']
# print(list(zip(helloWorld)))
helloWorldDictionary = dict(zip(helloWorld[0::2], helloWorld[1::2]))
# Print out the result
print(helloWorldDictionary)
a = [1, 2, 3, 4, 5]
# Create a list iterator object
i = iter(a)
for k in i:
print(k)
# Zip and create a dictionary
print(dict(zip(i, i)))
## Difference Between The Python append() and extend() Methods?
# Append [4,5] to `shortList`
# This is your list
shortList = [1, 2, 3]
longerList = [1, 2, 3]
# Check whether it's iterable
list.__iter__
shortList.append([4, 5])
# Use the `print()` method to show `shortList`
print(shortList)
# Extend `longerList` with [4,5]
longerList.extend([4, 5])
# Use the `print()` method to see `longerList`
print(longerList)
# Copy the grocery list by slicing and store it in the `newGroceries` variable
groceries = [1, 2, 3, 4, 5, 6]
newGroceries = groceries[:]
# Copy the grocery list with the `list()` function and store it in a `groceriesForFriends` variable
groceriesForFriends = list(groceries)
# Import the copy library
import copy as c
# Create a `groceriesForFamily` variable and assign the copied grocery list to it
groceriesForFamily = c.copy(groceries)
# Use `deepcopy()` and assign the copied list to a `groceriesForKids` variable
groceriesForKids = c.deepcopy(groceries)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 写程序,可由键盘读入两个整数m与n(n不等于0),询问用户意图,如果要求和则计算从m到n的和输出,如果要乘积则计算从m到n的积并输出,如果要求余数则计算m除以n的余数的值并输出,否则则计算m整除n的值并输出。
Step2: 写程序,能够根据北京雾霾PM2.5数值给出对应的防护建议。如当PM2.5数值大于500,则应该打开空气净化器,戴防雾霾口罩等。
Step3: 英文单词单数转复数,要求输入一个英文动词(单数形式),能够得到其复数形式,或给出单数转复数形式的建议(提示,some_string.endswith(some_letter)函数可以判断某字符串结尾字符)。
|
<ASSISTANT_TASK:>
Python Code:
name=str(input('your name:'))
month=int(input('your birth month:'))
day=int(input('your birth day'))
if month==1 and day<21:
print(name,'你是摩羯座!')
if month==12 and day>21:
print(name,'你是摩羯座!')
if month==2 and day<19:
print(name,'你是水瓶座!')
if month==1 and day>20:
print(name,'你是水瓶座!')
if month==3 and day<21:
print(name,'你是双鱼座!')
if month==2 and day>18:
print(name,'你是双鱼座!')
if month==4 and day<21:
print(name,'你是白羊座!')
if month==3 and day>20:
print(name,'你是白羊座!')
if month==5 and day<22:
print(name,'你是金牛座!')
if month==4 and day>20:
print(name,'你是金牛座!')
if month==6 and day<23:
print(name,'你是摩羯座!')
if month==5 and day>21:
print(name,'你是摩羯座!')
if month==7 and day<24:
print(name,'你是双子座!')
if month==6 and day>22:
print(name,'你是双子座!')
if month==8 and day<24:
print(name,'你是巨蟹座!')
if month==7 and day>23:
print(name,'你是巨蟹座!')
if month==9 and day<24:
print(name,'你是狮子座!')
if month==8 and day>23:
print(name,'你是狮子座!')
if month==10 and day<24:
print(name,'你是处女座!')
if month==9 and day>23:
print(name,'你是处女座!')
if month==11 and day<23:
print(name,'你是天秤座!')
if month==10 and day>23:
print(name,'你是天秤座!')
if month==12 and day<23:
print(name,'你是射手座!')
if month==11 and day>22:
print(name,'你是射手座!')
m=int(input())
n=int(input())
aim=int(input('if you want to get m+n,enter 1.if you want to get m*n,enter 2.if you want to get m%n,enter 3.if you want to get m/n,enter 4'))
if aim==1:
print(m+n)
elif aim==2:
print(m*n)
elif aim==3:
print(m%n)
else:
print(m/n)
pm=int(input())
if pm<=50:
print('空气质量优,各类人群可正常活动')
elif pm<=100:
print('空气质量良,极少数异常敏感人群应减少户外活动')
elif pm<=150:
print('轻度污染,体质弱者应减少长时间、高强度的户外锻炼。')
elif pm<=200:
print('中度污染,一般人群适量减少户外运动。')
elif pm<=300:
print('重度污染,体质弱者停止户外运动,一般人群适量减少户外运动。')
else:
print('一般人群因避免户外运动。')
word=str(input())
if word.endswith(('s','sh','ch','x','o')):
print(word+'es')
elif word.endswith(('by','cy','dy','fy','gy','hy','jy','ky','ly','my','ny','py','qy','ry','sy','ty','vy','wy','xy','zy')):
print('把y改i再加es')
else:
print(word+'s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: epochs
Step11: Check out your predictions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : (1/(1+np.exp(-x))) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error * 1
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(output_error_term, self.weights_hidden_to_output.T)
hidden_error_term = hidden_error * (hidden_outputs * (1 - hidden_outputs))
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:,None]
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
from time import time
from sklearn.grid_search import ParameterGrid
### Set the hyperparameters here ###
iterations = [10000]
learning_rate = [0.03, 0.3]
hidden_nodes = [10, 15, 20, 25]
output_nodes = 1
# Set the parameter grid
param_grid = {'ii': iterations, 'lrate': learning_rate, 'hnodes': hidden_nodes}
grid = ParameterGrid(param_grid)
for params in grid:
# We add a model to assess the duration
start_time = time()
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, params['hnodes'], output_nodes, params['lrate'])
losses = {'train':[], 'validation':[]}
for ii in range(params['ii']):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\r#epochs: {}, #hidden_nodes: {}:,\
learning_rate: {}, Progress: {:2.1f}".format(params['ii'],
params['hnodes'],
params['lrate'],
100 * ii/float(params['ii']))\
+ "% ... Training loss: " + str(train_loss)[:5] \
+ "% ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
print('')
print('Network ran in {0:.3f}s'.format(time() - start_time))
import sys
from time import time
### Set the hyperparameters here ###
iterations = 10000
learning_rate = 0.3
hidden_nodes = 20
output_nodes = 1
# We add a model to assess the duration
start_time = time()
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
print('')
print('Network ran in {0:.3f}s'.format(time() - start_time))
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-2', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Avec la librairie pytz
Step2: Les valeurs possibles pour les timezone sont fournies par une constante.
Step3: Nous créons d'abord une date naive à laquelle nous renseignons le timezone.
Step4: Nous utilisons cette même date pour créer une date aware à New York.
Step5: Nous changeons le timezone de cette date aware des US en France.
Step6: Cette nouvelle date n'affiche pas les même heures, mais n'a pas non plus le même timezone. Nous pouvons vérifier qu'il s'agit de la même date.
Step7: Les deux dates localisées contiennent la même information d'heure. Nous vérifions qu'il ne s'agit pas du même instant et qu'il y a un décalage d'une heure.
Step8: Prise en compte du changement d'heure
Step9: Voir donc les bonnes pratiques pour manipuler les dates comme
|
<ASSISTANT_TASK:>
Python Code:
import datetime as dt
print("Maintenant naif :", dt.datetime.now())
instant = dt.datetime.now(dt.timezone(dt.timedelta(hours=2)))
print("Maintenant aware :", instant)
print("Info timezone :", instant.tzinfo)
instant = dt.datetime.now(dt.timezone(dt.timedelta(hours=2), name="France"))
print("Maintenant aware :", instant)
print("Info timezone :", instant.tzinfo)
instant = dt.datetime.now(dt.timezone(dt.timedelta(hours=2), name="plus_2"))
autre_instant = dt.datetime.now(dt.timezone(dt.timedelta(hours=1), name="plus_1"))
print("Maintenant aware 2 :", instant)
print("Maintenant aware 1 :", autre_instant)
autre_instant - instant
import pytz
paris_tz = pytz.timezone('Europe/Paris')
print(paris_tz)
paris_tz
pytz.all_timezones
now_naive = dt.datetime.now()
print("Instant actuel naif:", now_naive)
h_paris_aware = paris_tz.localize(now_naive)
print("France, aware :", h_paris_aware)
new_york_tz = pytz.timezone('America/New_York')
print("Instant actuel naif:", now_naive)
h_new_york_aware = new_york_tz.localize(now_naive)
print("New York, aware :", h_new_york_aware)
h_new_york_in_paris = h_new_york_aware.astimezone(paris_tz)
print("France from US :", h_new_york_in_paris)
h_new_york_aware == h_new_york_in_paris
print("Heure US :", h_new_york_aware.hour)
print("Heure Fr :", h_paris_aware.hour)
print("Différence :", h_new_york_aware - h_paris_aware)
winter_day = paris_tz.localize(dt.datetime(2019, 3, 30))
summer_day = paris_tz.localize(dt.datetime(2019, 4, 2))
print("Hiver :", winter_day)
print("Été. :", summer_day)
print("Différence :", summer_day - winter_day)
after_3_days = winter_day + dt.timedelta(days=3)
print("3 jours plus tard :", after_3_days)
print("3 jours plus tard relocalisé :", after_3_days.astimezone(paris_tz))
utc_tz = pytz.timezone('UTC')
meeting = dt.datetime(2020, 10, 15, 15, 30)
paris_tz = pytz.timezone('Europe/Paris')
meeting = meeting.astimezone(paris_tz).astimezone(utc_tz)
print(meeting)
new_york_tz = pytz.timezone('America/New_York')
print(f"for New Yoerkers : {meeting.astimezone(new_york_tz)}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: First, lets define some helper functions to simplify the rest of the examples.
Step6: Now lets create some data to use in the examples.
Step7: ℹ️ After running this, it looks like the timestamps disappeared!
Step8: Fixed time windows
Step9: Sliding time windows
Step10: A thing to note with SlidingWindows is that one element might be processed multiple times because it might overlap in more than one window.
Step11: Note that by doing the windowing after the processing, we only process / print the elments once, but the windowing afterwards is the same.
|
<ASSISTANT_TASK:>
Python Code:
# Install apache-beam with pip.
!pip install --quiet apache-beam
import apache_beam as beam
def human_readable_window(window) -> str:
Formats a window object into a human readable string.
if isinstance(window, beam.window.GlobalWindow):
return str(window)
return f'{window.start.to_utc_datetime()} - {window.end.to_utc_datetime()}'
class PrintElementInfo(beam.DoFn):
Prints an element with its Window information.
def process(self, element, timestamp=beam.DoFn.TimestampParam, window=beam.DoFn.WindowParam):
print(f'[{human_readable_window(window)}] {timestamp.to_utc_datetime()} -- {element}')
yield element
@beam.ptransform_fn
def PrintWindowInfo(pcollection):
Prints the Window information with how many elements landed in that window.
class PrintCountsInfo(beam.DoFn):
def process(self, num_elements, window=beam.DoFn.WindowParam):
print(f'>> Window [{human_readable_window(window)}] has {num_elements} elements')
yield num_elements
return (
pcollection
| 'Count elements per window' >> beam.combiners.Count.Globally().without_defaults()
| 'Print counts info' >> beam.ParDo(PrintCountsInfo())
)
import time
from apache_beam.options.pipeline_options import PipelineOptions
def to_unix_time(time_str: str, time_format='%Y-%m-%d %H:%M:%S') -> int:
Converts a time string into Unix time.
time_tuple = time.strptime(time_str, time_format)
return int(time.mktime(time_tuple))
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.pvalue.PBegin)
@beam.typehints.with_output_types(beam.window.TimestampedValue)
def AstronomicalEvents(pipeline):
return (
pipeline
| 'Create data' >> beam.Create([
('2021-03-20 03:37:00', 'March Equinox 2021'),
('2021-04-26 22:31:00', 'Super full moon'),
('2021-05-11 13:59:00', 'Micro new moon'),
('2021-05-26 06:13:00', 'Super full moon, total lunar eclipse'),
('2021-06-20 22:32:00', 'June Solstice 2021'),
('2021-08-22 07:01:00', 'Blue moon'),
('2021-09-22 14:21:00', 'September Equinox 2021'),
('2021-11-04 15:14:00', 'Super new moon'),
('2021-11-19 02:57:00', 'Micro full moon, partial lunar eclipse'),
('2021-12-04 01:43:00', 'Super new moon'),
('2021-12-18 10:35:00', 'Micro full moon'),
('2021-12-21 09:59:00', 'December Solstice 2021'),
])
| 'With timestamps' >> beam.MapTuple(
lambda timestamp, element:
beam.window.TimestampedValue(element, to_unix_time(timestamp))
)
)
# Lets see how the data looks like.
beam_options = PipelineOptions(flags=[], type_check_additional='all')
with beam.Pipeline(options=beam_options) as pipeline:
(
pipeline
| 'Astronomical events' >> AstronomicalEvents()
| 'Print element' >> beam.Map(print)
)
import apache_beam as beam
# All elements fall into the GlobalWindow by default.
with beam.Pipeline() as pipeline:
(
pipeline
| 'Astrolonomical events' >> AstronomicalEvents()
| 'Print element info' >> beam.ParDo(PrintElementInfo())
| 'Print window info' >> PrintWindowInfo()
)
import apache_beam as beam
from datetime import timedelta
# Fixed-sized windows of approximately 3 months.
window_size = timedelta(days=3*30).total_seconds() # in seconds
print(f'window_size: {window_size} seconds')
with beam.Pipeline() as pipeline:
elements = (
pipeline
| 'Astronomical events' >> AstronomicalEvents()
| 'Fixed windows' >> beam.WindowInto(beam.window.FixedWindows(window_size))
| 'Print element info' >> beam.ParDo(PrintElementInfo())
| 'Print window info' >> PrintWindowInfo()
)
import apache_beam as beam
from datetime import timedelta
# Sliding windows of approximately 3 months every month.
window_size = timedelta(days=3*30).total_seconds() # in seconds
window_period = timedelta(days=30).total_seconds() # in seconds
print(f'window_size: {window_size} seconds')
print(f'window_period: {window_period} seconds')
with beam.Pipeline() as pipeline:
(
pipeline
| 'Astronomical events' >> AstronomicalEvents()
| 'Sliding windows' >> beam.WindowInto(
beam.window.SlidingWindows(window_size, window_period)
)
| 'Print element info' >> beam.ParDo(PrintElementInfo())
| 'Print window info' >> PrintWindowInfo()
)
import apache_beam as beam
from datetime import timedelta
# Sliding windows of approximately 3 months every month.
window_size = timedelta(days=3*30).total_seconds() # in seconds
window_period = timedelta(days=30).total_seconds() # in seconds
print(f'window_size: {window_size} seconds')
print(f'window_period: {window_period} seconds')
with beam.Pipeline() as pipeline:
(
pipeline
| 'Astronomical events' >> AstronomicalEvents()
#------
# ℹ️ Here we're processing / printing the data before windowing.
| 'Print element info' >> beam.ParDo(PrintElementInfo())
| 'Sliding windows' >> beam.WindowInto(
beam.window.SlidingWindows(window_size, window_period)
)
#------
| 'Print window info' >> PrintWindowInfo()
)
import apache_beam as beam
from datetime import timedelta
# Sessions divided by approximately 1 month gaps.
gap_size = timedelta(days=30).total_seconds() # in seconds
print(f'gap_size: {gap_size} seconds')
with beam.Pipeline() as pipeline:
(
pipeline
| 'Astronomical events' >> AstronomicalEvents()
| 'Session windows' >> beam.WindowInto(beam.window.Sessions(gap_size))
| 'Print element info' >> beam.ParDo(PrintElementInfo())
| 'Print window info' >> PrintWindowInfo()
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Creating iterable object
Step3: Generator
|
<ASSISTANT_TASK:>
Python Code:
# iterable objecct retuns iterator to iter() function
s = 'abc'
itr = iter(s)
print(next(itr))
print(next(itr))
print(next(itr))
## __iter__() function and __next__() function can be called directly as well
x = s.__iter__()
print(x.__next__())
print(next(x))
# StopIteration signal is raised after all items are retrieved
print(next(itr))
# STRING is a iterable, but not iterator. It does not support __next__ method
next(s)
class Reverse:
Iterator for looping over a sequence backwards.
def __init__(self, data):
self.data = data
self.index = len(data)
def __iter__(self):
return self
def __next__(self):
if self.index == 0:
raise StopIteration
self.index = self.index - 1
return self.data[self.index]
rev = Reverse('UCSY')
for c in rev:
print(c)
print('---')
rev = Reverse([100, 200, 300])
for c in rev:
print(c)
def rev_gen(data):
# for index in range(len(data)-1, -1, -1):
# yield data[index]
for elem in data[::-1]:
yield elem
rev = rev_gen('ucsy')
for c in rev:
print(c)
type(rev)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic interact
Step2: When you pass this function as the first argument to interact along with an integer keyword argument (x=10), a slider is generated and bound to the function parameter.
Step3: When you move the slider, the function is called, and its return value is printed.
Step4: If you pass a string, interact will generate a text box.
Step5: interact can also be used as a decorator. This allows you to define a function and interact with it in a single shot. As this example shows, interact also works with functions that have multiple arguments.
Step6: Fixing arguments using fixed
Step7: When we call interact, we pass fixed(20) for q to hold it fixed at a value of 20.
Step8: Notice that a slider is only produced for p as the value of q is fixed.
Step9: The following table gives an overview of different argument types, and how they map to interactive controls
Step10: If a 3-tuple of integers is passed (min,max,step), the step size can also be set.
Step11: A float-valued slider is produced if any of the elements of the tuples are floats. Here the minimum is 0.0, the maximum is 10.0 and step size is 0.1 (the default).
Step12: The step size can be changed by passing a third element in the tuple.
Step13: For both integer and float-valued sliders, you can pick the initial value of the widget by passing a default keyword argument to the underlying Python function. Here we set the initial value of a float slider to 5.5.
Step14: Dropdown menus are constructed by passing a list of strings. In this case, the strings are both used as the names in the dropdown menu UI and passed to the underlying Python function.
Step15: If you want a dropdown menu that passes non-string values to the Python function, you can pass a list of ('label', value) pairs. The first items are the names in the dropdown menu UI and the second items are values that are the arguments passed to the underlying Python function.
Step16: Finally, if you need more granular control than that afforded by the abbreviation, you can pass a ValueWidget instance as the argument. A ValueWidget is a widget that aims to control a single value. Most of the widgets bundled with ipywidgets inherit from ValueWidget. For more information, see this section on widget types.
Step17: interactive
Step18: Unlike interact, interactive returns a Widget instance rather than immediately displaying the widget.
Step19: The widget is an interactive, a subclass of VBox, which is a container for other widgets.
Step20: The children of the interactive are two integer-valued sliders and an output widget, produced by the widget abbreviations above.
Step21: To actually display the widgets, you can use IPython's display function.
Step22: At this point, the UI controls work just like they would if interact had been used. You can manipulate them interactively and the function will be called. However, the widget instance returned by interactive also gives you access to the current keyword arguments and return value of the underlying Python function.
Step23: Here is the current return value of the function.
Step24: Disabling continuous updates
Step25: Notice that the output is updated even while dragging the mouse on the slider. This is not useful for long running functions due to lagging
Step26: There are two ways to mitigate this. You can either only execute on demand, or restrict execution to mouse release events.
Step27: You can do the same thing with interactive by using a dict as the second argument, as shown below.
Step28: continuous_update
Step29: More control over the user interface
Step30: Arguments that are dependent on each other
Step31: Flickering and jumping output
Step32: Interact with multiple functions
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
def f(x):
return x
interact(f, x=10);
interact(f, x=True);
interact(f, x='Hi there!');
@interact(x=True, y=1.0)
def g(x, y):
return (x, y)
def h(p, q):
return (p, q)
interact(h, p=5, q=fixed(20));
interact(f, x=widgets.IntSlider(min=-10, max=30, step=1, value=10));
interact(f, x=(0,4));
interact(f, x=(0,8,2));
interact(f, x=(0.0,10.0));
interact(f, x=(0.0,10.0,0.01));
@interact(x=(0.0,20.0,0.5))
def h(x=5.5):
return x
interact(f, x=['apples','oranges']);
interact(f, x=[('one', 10), ('two', 20)]);
interact(f, x=widgets.Combobox(options=["Chicago", "New York", "Washington"], value="Chicago"));
from IPython.display import display
def f(a, b):
display(a + b)
return a+b
w = interactive(f, a=10, b=20)
type(w)
w.children
display(w)
w.kwargs
w.result
def slow_function(i):
print(int(i),list(x for x in range(int(i)) if
str(x)==str(x)[::-1] and
str(x**2)==str(x**2)[::-1]))
return
%%time
slow_function(1e6)
from ipywidgets import FloatSlider
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
interact_manual(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5));
slow = interactive(slow_function, {'manual': True}, i=widgets.FloatSlider(min=1e4, max=1e6, step=1e4))
slow
interact(slow_function,i=FloatSlider(min=1e5, max=1e7, step=1e5, continuous_update=False));
a = widgets.IntSlider()
b = widgets.IntSlider()
c = widgets.IntSlider()
ui = widgets.HBox([a, b, c])
def f(a, b, c):
print((a, b, c))
out = widgets.interactive_output(f, {'a': a, 'b': b, 'c': c})
display(ui, out)
x_widget = FloatSlider(min=0.0, max=10.0, step=0.05)
y_widget = FloatSlider(min=0.5, max=10.0, step=0.05, value=5.0)
def update_x_range(*args):
x_widget.max = 2.0 * y_widget.value
y_widget.observe(update_x_range, 'value')
def printer(x, y):
print(x, y)
interact(printer,x=x_widget, y=y_widget);
%matplotlib inline
from ipywidgets import interactive
import matplotlib.pyplot as plt
import numpy as np
def f(m, b):
plt.figure(2)
x = np.linspace(-10, 10, num=1000)
plt.plot(x, m * x + b)
plt.ylim(-5, 5)
plt.show()
interactive_plot = interactive(f, m=(-2.0, 2.0), b=(-3, 3, 0.5))
output = interactive_plot.children[-1]
output.layout.height = '350px'
interactive_plot
import ipywidgets as widgets
from IPython.display import display
a = widgets.IntSlider(value=5, min=0, max=10)
def f1(a):
display(a)
def f2(a):
display(a * 2)
out1 = widgets.interactive_output(f1, {'a': a})
out2 = widgets.interactive_output(f2, {'a': a})
display(a)
display(out1)
display(out2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Homework 5
Step2: Part 3
Step3: Part 4
Step4: Part 5
Step5: Problem 2
Step7: Problem 3
Step9: Problem 4
Step11: Problem 5
Step13: Problem 6
|
<ASSISTANT_TASK:>
Python Code:
%%file roots.py
def quad_roots(a=1.0, b=2.0, c=0.0):
Returns the roots of a quadratic equation: ax^2 + bx + c = 0.
INPUTS
=======
a: float, optional, default value is 1
Coefficient of quadratic term
b: float, optional, default value is 2
Coefficient of linear term
c: float, optional, default value is 0
Constant term
RETURNS
========
roots: 2-tuple of complex floats
Has the form (root1, root2) unless a = 0
in which case a ValueError exception is raised
EXAMPLES
=========
>>> quad_roots(1.0, 1.0, -12.0)
((3+0j), (-4+0j))
import cmath # Can return complex numbers from square roots
if a == 0:
raise ValueError("The quadratic coefficient is zero. This is not a quadratic equation.")
else:
sqrtdisc = cmath.sqrt(b * b - 4.0 * a * c)
r1 = -b + sqrtdisc
r2 = -b - sqrtdisc
return (r1 / 2.0 / a, r2 / 2.0 / a)
%%file test_roots.py
import roots
def test_quadroots_result():
assert roots.quad_roots(1.0, 1.0, -12.0) == ((3+0j), (-4+0j))
def test_quadroots_types():
try:
roots.quad_roots("", "green", "hi")
except TypeError as err:
assert(type(err) == TypeError)
def test_quadroots_zerocoeff():
try:
roots.quad_roots(a=0.0)
except ValueError as err:
assert(type(err) == ValueError)
!pytest --cov
%%file .travis.yml
language: python
python:
- "3.5"
before_install:
- pip install pytest pytest-cov
script:
- pytest
%%file setup.cfg
[tool:pytest]
addopts = --doctest-modules --cov-report term-missing --cov roots
%%file .travis.yml
language: python
python:
- "3.5"
before_install:
- pip install pytest pytest-cov
- pip install coveralls
script:
- py.test
after_success:
- coveralls
%%file README.md
[](https://travis-ci.org/crystalzhaizhai/cs207testing.svg?branch=master)
[](https://coveralls.io/github/crystalzhaizhai/cs207testing?branch=master)
%%file reaction_coeffs.py
from math import exp
def k_constant(k):
return k
def k_arr(a,e,t):
r=8.314
if a<= 0:
raise ValueError("a<0!")
if t<= 0:
raise ValueError("t<0!")
return (a*exp(-e/(r*t)))
def k_mod_arr(a,e,t,b):
r=8.314
if a<= 0:
raise ValueError("a<0!")
if t<= 0:
raise ValueError("t<0!")
return (a*(t**b)*exp(-e/(r*t)))
import reaction_coeffs as rc
a=10**7
b=0.5
e=10**3
t=10**2
print(rc.k_arr(a,e,t))
print(rc.k_mod_arr(a,e,t,b))
print(rc.k_constant(rc.k_arr(a,e,t)))
%%file progress_rate.py
import numpy as np
import copy
def progress_rate(x,v,vv,kk):
Returns progression rate of chemical reactoins
INPUTS
=======
x: array of float
concentration of molecule species
v: array of integers
Coefficient of molecule species on the left side of equation
vv: array of integers
Coefficient of melecule species on the right side of equation
kk: array of float
reaction rate coefficient
RETURNS
========
roots: array of floats
EXAMPLES
=========
>>> progress_rate([[1.0],[2.0],[3.0]],[[2.0],[1.0],[0.0]],[[0],[0],[1]],10)
20.0
if (kk<0):
raise ValueError("k<0")
if all([i[0]<=0 for i in v]):
raise ValueError("no reactants")
if all([i[0]<=0 for i in vv]):
raise ValueError("no products")
tmp=[x[i][0]**v[i][0] for i in range(len(x))]
w=copy.copy(kk)
for i in range(len(x)):
w*=tmp[i]
# return (np.transpose([np.array([w*(vv[i][0]-v[i][0]) for i in range(len(x))])]))
return(w)
%%file progress_test.py
import progress_rate as pr
def test_normal():
x=[[1.0],[2.0],[3.0]]
v_i_prime=[[2.0],[1.0],[0.0]]
v_i_prime_prime=[[0],[0],[1]]
k=10
assert pr.progress_rate(x,v_i_prime,v_i_prime_prime,k)==20.0
def test_k_positive():
try:
x=[[1.0],[2.0],[3.0]]
v_i_prime=[[2.0],[1.0],[0.0]]
v_i_prime_prime=[[0],[0],[1]]
k=-10
pr.progress_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
def test_reactants():
try:
x=[[1.0],[2.0],[3.0]]
v_i_prime=[[0],[0],[0]]
v_i_prime_prime=[[0],[0],[1]]
k=10
pr.progress_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(type(err)==ValueError)
def test_products():
try:
x=[[1.0],[2.0],[3.0]]
v_i_prime=[[2],[1],[0.0]]
v_i_prime_prime=[[0],[0],[0]]
k=10
pr.progress_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(type(err)==ValueError)
import doctest
doctest.testmod(verbose=True)
!pytest
!pytest --cov
%%file progress1_rate.py
import numpy as np
import copy
def progress1_rate(x,v,vv,k):
Returns progress rate of chemical reactoins
INPUTS
=======
x: array of float
concentration of molecule species
v: array of integers
Coefficient of molecule species on the left side of equation
vv: array of integers
Coefficient of melecule species on the right side of equation
kk: array of float
reaction rate coefficient
RETURNS
========
roots: array of floats
EXAMPLES
=========
>>> progress1_rate(np.array([[1.0],[2.0],[1]]),np.array([[1,2],[2,0],[0,2]]),np.array([[0,0],[0,1],[2,1]]),[10,10])
[40.0, 10.0]
if (any([kk<0 for kk in k])):
raise ValueError("k<0")
flag=True
for j in range(v.shape[1]):
if all([v[i][j]<=0 for i in range(v.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no reactants")
flag=True
for j in range(vv.shape[1]):
if all([vv[i][j]<=0 for i in range(vv.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no products")
if x.shape[0]!=v.shape[0] or v.shape!=vv.shape or v.shape[1]!=len(k):
raise ValueError("dimensions not match")
tmp=np.array([x[i][0]**v[i][j] for i in range(v.shape[0]) for j in range(v.shape[1])]).reshape(v.shape)
w=copy.copy(k)
for i in range(len(x)):
for j in range(v.shape[1]):
a=tmp[i][j]
w[j]*=a
return (w)
import progress1_rate as pr1
import numpy as np
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[10,10]
print(pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k))
%%file progress1_test.py
import progress1_rate as pr1
import numpy as np
def test_normal1():
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[10,10]
assert pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k)==[40.0, 10.0]
def test_nagative_k1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[-10,10]
pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(type(err)==ValueError)
def test_reactants1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[0,2],[0,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[10,10]
pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
def test_products1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,0],[2,0]])
k=[10,10]
pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
def test_dimensions1():
try:
x=np.array([[1.0],[2.0]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[1,0],[0,1],[2,0]])
k=[10,10]
pr1.progress1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
%%file reaction1_rate.py
import numpy as np
import copy
def reaction1_rate(x,v,vv,k):
Returns reaction rate of chemical reactoins
INPUTS
=======
x: array of float
concentration of molecule species
v: array of integers
Coefficient of molecule species on the left side of equation
vv: array of integers
Coefficient of melecule species on the right side of equation
kk: array of float
reaction rate coefficient
RETURNS
========
roots: array of floats
EXAMPLES
=========
>>> reaction1_rate(np.array([[1.0],[2.0],[1]]),np.array([[1,0],[2,0],[0,2]]),np.array([[0,1],[0,2],[1,0]]),[10,10])
array([[-40.],
[ 10.],
[-80.],
[ 20.],
[ 40.],
[-20.]])
if (any([kk<0 for kk in k])):
raise ValueError("k<0")
flag=True
for j in range(v.shape[1]):
if all([v[i][j]<=0 for i in range(v.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no reactants")
flag=True
for j in range(vv.shape[1]):
if all([vv[i][j]<=0 for i in range(vv.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no products")
if x.shape[0]!=v.shape[0] or v.shape!=vv.shape or v.shape[1]!=len(k):
raise ValueError("dimensions not match")
tmp=np.array([x[i][0]**v[i][j] for i in range(v.shape[0]) for j in range(v.shape[1])]).reshape(v.shape)
w=copy.copy(k)
for i in range(len(x)):
for j in range(v.shape[1]):
a=tmp[i][j]
w[j]*=a
return (np.transpose([np.array([w[j]*(vv[i][j]-v[i][j]) for i in range(len(x)) for j in range(v.shape[1])])]))
%%file reaction1_test.py
import reaction1_rate as rr1
import numpy as np
def test_normal1():
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,0],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,1],[0,2],[1,0]])
k=[10,10]
assert all(rr1.reaction1_rate(x,v_i_prime,v_i_prime_prime,k)==[[-40],[10],[-80],[20],[40],[-20]])
def test_nagative_k1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[-10,10]
rr1.reaction1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(type(err)==ValueError)
def test_reactants1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[0,2],[0,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,1],[2,1]])
k=[10,10]
rr1.reaction1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
def test_products1():
try:
x=np.array([[1.0],[2.0],[1]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[0,0],[0,0],[2,0]])
k=[10,10]
rr1.reaction1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
def test_dimensions1():
try:
x=np.array([[1.0],[2.0]])
v_i_prime=np.array([[1,2],[2,0],[0,2]])
v_i_prime_prime=np.array([[1,0],[0,1],[2,0]])
k=[10,10]
rr1.reaction1_rate(x,v_i_prime,v_i_prime_prime,k)
except ValueError as err:
assert(True)
import doctest
doctest.testmod(verbose=True)
!pytest --cov
%%file chemkin.py
import numpy as np
import copy
def reaction1_rate(x,v,vv,k):
Returns reaction rate of chemical reactoins
INPUTS
=======
x: array of float
concentration of molecule species
v: array of integers
Coefficient of molecule species on the left side of equation
vv: array of integers
Coefficient of melecule species on the right side of equation
kk: array of float
reaction rate coefficient
RETURNS
========
roots: array of floats
EXAMPLES
=========
>>> reaction1_rate(np.array([[1.0],[2.0],[1]]),np.array([[1,0],[2,0],[0,2]]),np.array([[0,1],[0,2],[1,0]]),[10,10])
array([[-40.],
[ 10.],
[-80.],
[ 20.],
[ 40.],
[-20.]])
if (any([kk<0 for kk in k])):
raise ValueError("k<0")
flag=True
for j in range(v.shape[1]):
if all([v[i][j]<=0 for i in range(v.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no reactants")
flag=True
for j in range(vv.shape[1]):
if all([vv[i][j]<=0 for i in range(vv.shape[0])]):
flag=False
break
if (flag==False):
raise ValueError("no products")
if x.shape[0]!=v.shape[0] or v.shape!=vv.shape or v.shape[1]!=len(k):
raise ValueError("dimensions not match")
tmp=np.array([x[i][0]**v[i][j] for i in range(v.shape[0]) for j in range(v.shape[1])]).reshape(v.shape)
w=copy.copy(k)
for i in range(len(x)):
for j in range(v.shape[1]):
a=tmp[i][j]
w[j]*=a
return (np.transpose([np.array([w[j]*(vv[i][j]-v[i][j]) for i in range(len(x)) for j in range(v.shape[1])])]))
import numpy as np
import chemkin as ck
x=np.array([[2],[1],[0.5],[1],[1]])
v=np.array([[2,0,0],[1,0,1],[0,1,0],[0,1,0],[0,0,1]])
vv=np.array([[1,0,0],[0,1,0],[2,0,1],[0,0,1],[0,1,0]])
import chemkin as ck
import reaction_coeffs as rc
t=[750,1500,2500]
a1=10**8
b1=0.5
e1=5*10**4
k2=10**4
a3=10**7
e3=10**4
for i in range(len(t)):
k=[rc.k_mod_arr(a1,e1,t[i],b1),rc.k_constant(k2),rc.k_arr(a3,e3,t[i])]
print(ck.reaction1_rate(x,v,vv,k))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authenticate your GCP account
Step2: Create a Cloud Storage bucket
Step3: Only if your bucket doesn't already exist
Step4: Import libraries
Step5: Reading a sample TabNet prediction on syn2 data
Step6: Download and preprocess the predictions.
Step7: Parse the mask values in prediction. Then, concatenate the mask values.
Step8: Visualize the mask value matrix.
|
<ASSISTANT_TASK:>
Python Code:
PROJECT_ID = "[<your-project-id>]"
import os
import sys
import warnings
warnings.filterwarnings('ignore')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# If you are running this notebook in Colab, follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
def install_dlvm_packages():
!pip install tabulate
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
!pip install witwidget --quiet
!pip install tensorflow==1.15.0 --quiet
!gcloud config set project $PROJECT_ID
elif "DL_PATH" in os.environ:
install_dlvm_packages()
BUCKET_NAME = "[<your-bucket-name>]"
REGION = "us-central1"
!gsutil mb -l $REGION gs://$BUCKET_NAME
import numpy as np
import json
from google.cloud import storage
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
!gsutil cp gs://cloud-samples-data/ai-platform/synthetic/tab_net_output/syn2 gs://$BUCKET_NAME
# Replace your the BUCKET_NAME and PREDICTION_FILE
# BUCKET_NAME = "[<your-bucket-name>]"
# PREDICTION_FILE = "[<your-prediction-file>]"
PREDICTION_FILE = "syn2"
MASK_KEY = "aggregated_mask_values"
HEADER = [("feat_" + str(i)) for i in range(1, 12)]
HEADER
storage_client = storage.Client()
bucket = storage_client.get_bucket(BUCKET_NAME)
blob = bucket.blob(PREDICTION_FILE)
f = blob.download_as_string(client=None).decode("utf-8").strip()
predictions = f.split("\n")
predictions[:1]
masks = []
for prediction in predictions:
prediction = json.loads(prediction)
masks.append(prediction[MASK_KEY])
masks = np.matrix(masks)
masks.shape
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(121)
ax.imshow(masks[:50, :], interpolation='bilinear', cmap=cm.Greys_r)
ax.set_xlabel('Features')
ax.set_ylabel('Sample index')
ax.xaxis.set_ticks(np.arange(len(HEADER)))
ax.set_xticklabels(HEADER, rotation='vertical')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the next piece of code we will cycle through our directory again
Step2: Here we recreate our list from the last exercise, counting the instances of the word privacy in each file.
Step3: Next we use the len function to count the total number of words in each file.
Step4: Now we can calculate the ratio of the word privacy to the total number of words in the file. To accomplish this we simply divide the two numbers.
Step5: Now our descriptive statistics concerning word frequencies have added value. We can see that there has indeed been a steady increase in the frequency of the use of the word privacy in our corpus. When we investigate the yearly usage, we can see that the frequency almost doubled between 2008 and 2009, as well as dramatic increase between 2012 and 2014. This is also apparent in the difference between the 39th and the 40th sittings of Parliament.
Step6: Using the tabulate module, we will display our tuple as a table.
Step7: And finally, we will write the values to a CSV file called privacyFreqTable.
Step8: Part 2
Step9: Now, we can count the number of files in each dataset. This is also an important activity for error-checking. While it is easy to trust the numerical output of the code when it works sucessfully, we must always be sure to check that the code is actually performing in exactly the way we want it to. In this case, these numbers can be cross-referenced with the original XML data, where each transcript exists as its own file. A quick check of the directory shows that the numbers are correct.
Step10: Here is a screenshot of some of the raw data. We can see that there are <u>97</u> files in 2006, <u>117</u> in 2007 and <u>93</u> in 2008. The rest of the data is also correct.
Step11: These numbers confirm our earlier results. There is a clear indication that the usage of the term privacy is increasing, with major changes occuring between the years 2008 and 2009, as well as between 2012 and 2014. This trend is also clearly obervable between the 39th and 40th sittings of Parliament.
Step12: Now we will combine the three lists into one large list and assign it to the variable large.
Step13: We can use the same calculations to determine the total number of occurences of privacy, as well as the total number of words in the corpus. We can also calculate the total ratio of privacy to the total number of words.
Step14: Another type of word frequency statistic we can generate is a type/token ratio. The types are the total number of unique words in the corpus, while the tokens are the total number of words. The type/token ratio is used to determine the variability of the language used in the text. The higher the ratio, the more complex the text will be. First we'll determine the total number of types, using <i>Python's</i> set function.
Step15: Now we can divide the types by the tokens to determine the ratio.
Step16: Finally, we will use the NLTK module to create a graph that shows the top 50 most frequent words in the Hansard corpus. Although privacy will not appear in the graph, it's always interesting to see what types of words are most common, and what their distribution is. NLTK will be introduced with more detail in the next section featuring concordance outputs, but here all we need to know is that we assign our variable large to the NLTK function Text in order to work with the corpus data. From there we can determine the frequency distribution for the whole text.
Step17: Here we will assign the frequency distribution to the plot function to produce a graph. While it's a little hard to read, the most commonly used word in the Hansard corpus is the, with a frequency just over 400,000 occurences. The next most frequent word is to, which only has a frequency of about 225,000 occurences, almost half of the first most common word. The first 10 most frequent words appear with a much greater frequency than any of the other words in the corpus.
Step18: Another feature of the NLTK frequency distribution function is the generation of a list of hapaxes. These are words that appear only once in the entire corpus. While not meaningful for this study, it's an interesting way to explore the data.
|
<ASSISTANT_TASK:>
Python Code:
# This is where the modules are imported
import nltk
from os import listdir
from os.path import splitext
from os.path import basename
from tabulate import tabulate
# These functions iterate through the directory and create a list of filenames
def list_textfiles(directory):
"Return a list of filenames ending in '.txt'"
textfiles = []
for filename in listdir(directory):
if filename.endswith(".txt"):
textfiles.append(directory + "/" + filename)
return textfiles
def remove_ext(filename):
"Removes the file extension, such as .txt"
name, extension = splitext(filename)
return name
def remove_dir(filepath):
"Removes the path from the file name"
name = basename(filepath)
return name
def get_filename(filepath):
"Removes the path and file extension from the file name"
filename = remove_ext(filepath)
name = remove_dir(filename)
return name
# These functions work on the content of the files
def read_file(filename):
"Read the contents of FILENAME and return as a string."
infile = open(filename)
contents = infile.read()
infile.close()
return contents
def count_in_list(item_to_count, list_to_search):
"Counts the number of a specified word within a list of words"
number_of_hits = 0
for item in list_to_search:
if item == item_to_count:
number_of_hits += 1
return number_of_hits
filenames = []
for files in list_textfiles('../Counting Word Frequencies/data'):
files = get_filename(files)
filenames.append(files)
corpus = []
for filename in list_textfiles('../Counting Word Frequencies/data'):
text = read_file(filename)
words = text.split()
clean = [w.lower() for w in words if w.isalpha()]
corpus.append(clean)
for words, names in zip(corpus, filenames):
print("Instances of the word \'privacy\' in", names, ":", count_in_list("privacy", words))
for files, names in zip(corpus, filenames):
print("There are", len(files), "words in", names)
print("Ratio of instances of privacy to total number of words in the corpus:")
for words, names in zip(corpus, filenames):
print('{:.6f}'.format(float(count_in_list("privacy", words))/(float(len(words)))),":",names)
raw = []
for i in range(len(corpus)):
raw.append(count_in_list("privacy", corpus[i]))
ratio = []
for i in range(len(corpus)):
ratio.append('{:.3f}'.format((float(count_in_list("privacy", corpus[i]))/(float(len(corpus[i])))) * 100))
table = zip(filenames, raw, ratio)
print(tabulate(table, headers = ["Filename", "Raw", "Ratio %"], floatfmt=".3f", numalign="left"))
import csv
with open('privacyFreqTable.csv','wb') as f:
w = csv.writer(f)
w.writerows(table)
corpus_1 = []
for filename in list_textfiles('../Counting Word Frequencies/data'):
text = read_file(filename)
words = text.split(" OFFICIAL REPORT (HANSARD)")
corpus_1.append(words)
for files, names in zip(corpus_1, filenames):
print("There are", len(files), "files in", names)
for names, files, words in zip(filenames, corpus_1, corpus):
print("In", names, "there were", len(files), "debates. The word privacy was said", \
count_in_list('privacy', words), "times.")
corpus_3 = []
for filename in list_textfiles('../Counting Word Frequencies/data2'):
text = read_file(filename)
words = text.split()
clean = [w.lower() for w in words if w.isalpha()]
corpus_3.append(clean)
large = list(sum(corpus_3, []))
print("There are", count_in_list('privacy', large), "occurences of the word 'privacy' and a total of", \
len(large), "words.")
print("The ratio of instances of privacy to total number of words in the corpus is:", \
'{:.6f}'.format(float(count_in_list("privacy", large))/(float(len(large)))), "or", \
'{:.3f}'.format((float(count_in_list("privacy", large))/(float(len(large)))) * 100),"%")
print("There are", (len(set(large))), "unique words in the Hansard corpus.")
print("The type/token ratio is:", ('{:.6f}'.format(len(set(large))/(float(len(large))))), "or",\
'{:.3f}'.format(len(set(large))/(float(len(large)))*100),"%")
text = nltk.Text(large)
fd = nltk.FreqDist(text)
%matplotlib inline
fd.plot(50,cumulative=False)
fd.hapaxes()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Neural Network Settings
Step2: Get the training data
Step3: Setup the model
Step4: Fit the model
Step5: Visualize the inputs and predictions
Step6: Save fitted model
|
<ASSISTANT_TASK:>
Python Code:
# Import some stuff
from __future__ import print_function, absolute_import, division
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
# Set some constants
batch_size = 128
nb_classes = 10
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# Dropout parameters
conv_dropout = 0.25
dens_dropout = 0.5
# Set hidden layer size
nb_hidden = 128
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(conv_dropout))
model.add(Flatten())
model.add(Dense(nb_hidden))
model.add(Activation('relu'))
model.add(Dropout(dens_dropout))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
import time
t1 = time.time()
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
t2 = time.time()
print('Training Finished in {} minutes.'.format((t2-t1)/60))
# Evaluate
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
%matplotlib inline
def show_predictions(i):
X = X_test[i:i+1, :1]
digit_probs = model.predict_proba(X)[0]
idx = np.argmax(digit_probs)
actual = np.argmax(Y_test[i])
# Plot
fig, ax = plt.subplots(1, 1)
ax.imshow(X[0, 0])
ax.set_title('Predicted Digit = {}\nActual digit = {}'.format(idx, actual))
# Run this lots of times to visualize the testing set
show_predictions(np.random.randint(X_test.shape[0]))
import output_model
output_model.save_model(model, 'models/MNIST_cnn_model')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examining the TensorFlow Graph
Step2: Define a Keras model
Step3: Download and prepare the training data.
Step4: Train the model and log data
Step5: Op-level graph
Step6: You can also optionally use TensorBoard.dev to create a hosted, shareable experiment.
Step7: By default, TensorBoard displays the op-level graph. (On the left, you can see the “Default” tag selected.) Note that the graph is inverted; data flows from bottom to top, so it’s upside down compared to the code. However, you can see that the graph closely matches the Keras model definition, with extra edges to other computation nodes.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Load the TensorBoard notebook extension.
%load_ext tensorboard
from datetime import datetime
from packaging import version
import tensorflow as tf
from tensorflow import keras
print("TensorFlow version: ", tf.__version__)
assert version.parse(tf.__version__).release[0] >= 2, \
"This notebook requires TensorFlow 2.0 or above."
import tensorboard
tensorboard.__version__
# Clear any logs from previous runs
!rm -rf ./logs/
# Define the model.
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
(train_images, train_labels), _ = keras.datasets.fashion_mnist.load_data()
train_images = train_images / 255.0
# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# Train the model.
model.fit(
train_images,
train_labels,
batch_size=64,
epochs=5,
callbacks=[tensorboard_callback])
%tensorboard --logdir logs
!tensorboard dev upload \
--logdir logs \
--name "Sample op-level graph" \
--one_shot
# The function to be traced.
@tf.function
def my_func(x, y):
# A simple hand-rolled layer.
return tf.nn.relu(tf.matmul(x, y))
# Set up logging.
stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = 'logs/func/%s' % stamp
writer = tf.summary.create_file_writer(logdir)
# Sample data for your function.
x = tf.random.uniform((3, 3))
y = tf.random.uniform((3, 3))
# Bracket the function call with
# tf.summary.trace_on() and tf.summary.trace_export().
tf.summary.trace_on(graph=True, profiler=True)
# Call only one tf.function when tracing.
z = my_func(x, y)
with writer.as_default():
tf.summary.trace_export(
name="my_func_trace",
step=0,
profiler_outdir=logdir)
%tensorboard --logdir logs/func
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What does this look like? Let's look at the top of the file with head.
Step2: We'll use the pandas data analysis library to read the data. But we'll need a few caveats.
Step3: Interesting side note
Step4: Now, let's also replace all our NAs with 0s. This makes sense because if a gene is not detected, then its expression value will be 0. From the header file of the original data, it said
Step5: Cool, pretty close to 0.5. Now let's replace all NAs with 0, with fillna(0).
Step6: Finally, we will add 1 and log-transform the data, so it's closer to normally distributed. Gene expression data is known to be log-normal.
Step7: Create metadata about the expression features
Step8: So there's fewer unique gene names, meaning we should use the ENSEMBL ids for the unique IDs. We'll do this by resetting the columns of expression, and creating metadata about the expression features, stored as expression_feature_data.
Step9: Now let's create the expression_feature_data DataFrame, and add a column of 'gene_name' for the renamed feature.
Step10: Preparing the metadata
Step11: The first category we'll use is pretty straightforward, it'll just be the name of the tissue. We can just use the index as the 'phenotype'.
Step12: Next, let's add some categories on the data, grouping different tissue types together that have the same structure or function. My awesome MD/PhD friend Cynthia Hsu (you have to search for her name on the webpage) came up with these categories.
Step13: Assign colors to the phenotypes
Step14: Let's create an iterator so we can easily loop over this list of colors without having to reference indices.
Step15: Finally, let's make the phenotype to color mapping as a dictionary.
Step16: Make a flotilla study!
|
<ASSISTANT_TASK:>
Python Code:
! curl http://www.ebi.ac.uk/gxa/experiments/E-MTAB-513.tsv > E-MTAB-513.tsv
! curl http://www.ebi.ac.uk/arrayexpress/files/E-MTAB-513/E-MTAB-513.sdrf.txt> E-MTAB-513.sdrf.txt
! head E-MTAB-513.tsv
! head E-MTAB-513.sdrf.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import flotilla
expression = pd.read_table('E-MTAB-513.tsv', skiprows=3, index_col=[0, 1])
expression.head()
expression = expression.T
expression.head()
expression.min().min()
expression = expression.fillna(0)
expression = np.log2(expression + 1)
ensembl_ids = pd.Index([a for a, b in expression.columns.tolist()])
gene_names = pd.Index([b for a, b in expression.columns.tolist()])
len(ensembl_ids.unique())
len(gene_names.unique())
expression.columns = ensembl_ids
expression_feature_data = pd.DataFrame(index=ensembl_ids)
expression_feature_data['gene_name'] = gene_names
expression_feature_data.head()
metadata = pd.DataFrame(index=expression.index)
metadata.head()
metadata['phenotype'] = metadata.index
metadata.head()
# All of these tissue types are part of the reproductive system
metadata['reproductive'] = metadata.phenotype.isin(['animal ovary', 'testis'])
# ALl of these tissue types generate hormones
metadata['hormonal'] = metadata.phenotype.isin(['animal ovary', 'testis', 'adrenal gland', 'thyroid'])
# These tissues are part of in the immune system
metadata['immune'] = metadata.phenotype.isin(['leukocyte', 'thyroid', 'lymph node'])
# These tissues are fatty
metadata['fatty'] = metadata.phenotype.isin(['adipose tissue', 'brain', 'breast'])
# These tissues contain either smooth (involuntary) or skeletal (voluntary) muscle
metadata['muscle'] = metadata.phenotype.isin(['colon', 'heart', 'prostate', 'skeletal muscle'])
# These tissues' main function is to filter blood in some way
metadata['filtration'] = metadata.phenotype.isin(['colon', 'kidney', 'liver'])
# These tissues have high blood flow to them, compared to other tissues
metadata['high_blood_flow'] = metadata.phenotype.isin(['brain', 'colon', 'kidney', 'liver', 'lung'])
colors = sns.color_palette('husl', len(expression.index))
sns.palplot(colors)
colors_iter = iter(colors)
phenotype_to_color = {phenotype: colors_iter.next() for phenotype in metadata.phenotype}
phenotype_to_color
study = flotilla.Study(metadata, expression_data=expression,
metadata_phenotype_to_color=phenotype_to_color,
expression_feature_data=expression_feature_data,
expression_feature_rename_col='gene_name',
species='hg19')
study.expression.feature_data.head()
study.interactive_pca()
study.plot_gene('RBFOX1')
study.plot_gene('ADIPOQ')
study.plot_gene('OCA2')
study.plot_gene('MAPT')
era_metadata = pd.read_table("E-MTAB-513.sdrf.txt")
from pprint import pprint
pprint(sorted(era_metadata.columns.tolist()))
era_metadata['Comment[biosource provider]']
era_metadata['Scan Name']
era_metadata['Material Type']
era_metadata['Material Type.1']
era_metadata.Description
study.interactive_clustermap()
study.save('bodymap2')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: instaniate pyemu object and drop prior info. Then reorder the jacobian and save as binary. This is needed because the pest utilities require strict order between the control file and jacobian
Step2: Draw some vectors from the prior and write the vectors to par files
Step3: Run pnulpar
Step4: Now for pyemu
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pyemu
mc = pyemu.MonteCarlo(jco="freyberg.jcb",verbose=False,forecasts=[])
mc.drop_prior_information()
jco_ord = mc.jco.get(mc.pst.obs_names,mc.pst.par_names)
ord_base = "freyberg_ord"
jco_ord.to_binary(ord_base + ".jco")
mc.pst.control_data.parsaverun = ' '
mc.pst.write(ord_base+".pst")
nsing = 5
# setup the dirs to hold all this stuff
par_dir = "prior_par_draws"
proj_dir = "proj_par_draws"
parfile_base = os.path.join(par_dir,"draw_")
projparfile_base = os.path.join(proj_dir,"draw_")
if os.path.exists(par_dir):
shutil.rmtree(par_dir)
os.mkdir(par_dir)
if os.path.exists(proj_dir):
shutil.rmtree(proj_dir)
os.mkdir(proj_dir)
mc = pyemu.MonteCarlo(jco=ord_base+".jco")
# make some draws
mc.draw(10)
#for i in range(10):
# mc.parensemble.iloc[i,:] = i+1
#write them to files
mc.parensemble.index = [str(i+1) for i in range(mc.parensemble.shape[0])]
mc.parensemble.to_parfiles(parfile_base)
mc.parensemble.shape
exe = os.path.join("pnulpar.exe")
args = [ord_base+".pst","y",str(nsing),"y","pnulpar_qhalfx.mat",parfile_base,projparfile_base]
in_file = os.path.join("misc","pnulpar.in")
with open(in_file,'w') as f:
f.write('\n'.join(args)+'\n')
os.system(exe + ' <'+in_file)
pnul_en = pyemu.ParameterEnsemble(mc.pst)
parfiles =[os.path.join(proj_dir,f) for f in os.listdir(proj_dir) if f.endswith(".par")]
pnul_en.read_parfiles(parfiles)
pnul_en.loc[:,"fname"] = pnul_en.index
pnul_en.index = pnul_en.fname.apply(lambda x:str(int(x.split('.')[0].split('_')[-1])))
f = pnul_en.pop("fname")
pnul_en.sort_index(axis=1,inplace=True)
pnul_en.sort_index(axis=0,inplace=True)
pnul_en
print(mc.parensemble.istransformed)
mc.parensemble._transform()
en = mc.project_parensemble(nsing=nsing,inplace=False)
print(mc.parensemble.istransformed)
#en._back_transform()
en.sort_index(axis=1,inplace=True)
en.sort_index(axis=0,inplace=True)
en
#pnul_en.sort(inplace=True)
#en.sort(inplace=True)
diff = 100.0 * np.abs(pnul_en - en) / en
#diff[diff<1.0] = np.NaN
dmax = diff.max(axis=0)
dmax.sort_index(ascending=False,inplace=True)
dmax.plot(figsize=(10,10))
diff
en.loc[:,"wf6_2"]
pnul_en.loc[:,"wf6_2"]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What's this TensorFlow business?
Step2: Example Model
Step3: TensorFlow supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful).
Step4: Training a specific model
Step5: To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 64 x 10, since our batches have size 64 and the output of the final affine layer should be 10, corresponding to our 10 classes)
Step6: You should see the following from the run above
Step7: You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use GPU devices. However, with TensorFlow, the default device is a GPU if one is available, and a CPU otherwise, so we can skip the device specification from now on.
Step8: Train the model
Step9: Check the accuracy of the model.
Step10: Train a great model on CIFAR-10!
Step11: Describe what you did here
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import math
import timeit
import matplotlib.pyplot as plt
%matplotlib inline
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=10000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# clear old variables
tf.reset_default_graph()
# setup input (e.g. the data that changes every batch)
# The first dim is None, and gets sets automatically based on batch size fed in
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
def simple_model(X,y):
# define our weights (e.g. init_two_layer_convnet)
# setup variables
Wconv1 = tf.get_variable("Wconv1", shape=[7, 7, 3, 32])
bconv1 = tf.get_variable("bconv1", shape=[32])
W1 = tf.get_variable("W1", shape=[5408, 10])
b1 = tf.get_variable("b1", shape=[10])
# define our graph (e.g. two_layer_convnet)
a1 = tf.nn.conv2d(X, Wconv1, strides=[1,2,2,1], padding='VALID') + bconv1
h1 = tf.nn.relu(a1)
h1_flat = tf.reshape(h1,[-1,5408])
y_out = tf.matmul(h1_flat,W1) + b1
return y_out
y_out = simple_model(X,y)
# define our loss
total_loss = tf.losses.hinge_loss(tf.one_hot(y,10),logits=y_out)
mean_loss = tf.reduce_mean(total_loss)
# define our optimizer
optimizer = tf.train.AdamOptimizer(5e-4) # select optimizer and set learning rate
train_step = optimizer.minimize(mean_loss)
def run_model(session, predict, loss_val, Xd, yd,
epochs=1, batch_size=64, print_every=100,
training=None, plot_losses=False):
# have tensorflow compute accuracy
correct_prediction = tf.equal(tf.argmax(predict,1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# shuffle indicies
train_indicies = np.arange(Xd.shape[0])
np.random.shuffle(train_indicies)
training_now = training is not None
# setting up variables we want to compute (and optimizing)
# if we have a training function, add that to things we compute
variables = [mean_loss,correct_prediction,accuracy]
if training_now:
variables[-1] = training
# counter
iter_cnt = 0
for e in range(epochs):
# keep track of losses and accuracy
correct = 0
losses = []
# make sure we iterate over the dataset once
for i in range(int(math.ceil(Xd.shape[0]/batch_size))):
# generate indicies for the batch
start_idx = (i*batch_size)%Xd.shape[0]
idx = train_indicies[start_idx:start_idx+batch_size]
# create a feed dictionary for this batch
feed_dict = {X: Xd[idx,:],
y: yd[idx],
is_training: training_now }
# get batch size
actual_batch_size = yd[idx].shape[0]
# have tensorflow compute loss and correct predictions
# and (if given) perform a training step
loss, corr, _ = session.run(variables,feed_dict=feed_dict)
# aggregate performance stats
losses.append(loss*actual_batch_size)
correct += np.sum(corr)
# print every now and then
if training_now and (iter_cnt % print_every) == 0:
print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}"\
.format(iter_cnt,loss,np.sum(corr)/actual_batch_size))
iter_cnt += 1
total_correct = correct/Xd.shape[0]
total_loss = np.sum(losses)/Xd.shape[0]
print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}"\
.format(total_loss,total_correct,e+1))
if plot_losses:
plt.plot(losses)
plt.grid(True)
plt.title('Epoch {} Loss'.format(e+1))
plt.xlabel('minibatch number')
plt.ylabel('minibatch loss')
plt.show()
return total_loss,total_correct
with tf.Session() as sess:
with tf.device("/gpu:0"): #"/cpu:0" or "/gpu:0"
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step,True)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# clear old variables
tf.reset_default_graph()
# define our input (e.g. the data that changes every batch)
# The first dim is None, and gets sets automatically based on batch size fed in
shape_1 = 32
X = tf.placeholder(tf.float32, [None, shape_1, shape_1, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
# define model
def complex_model(X,y,is_training):
conv1 = tf.layers.conv2d(
X,
filters=32,
kernel_size=[7, 7],
padding='SAME',
activation=tf.nn.relu)
bn1 = tf.layers.batch_normalization(
conv1,
axis=1,
training=is_training)
pool1 = tf.layers.max_pooling2d(
bn1,
pool_size=[2, 2],
strides=2)
flattened = tf.layers.flatten(pool1)
d1 = tf.layers.dense(flattened, 1024, activation=tf.nn.relu)
y_out = tf.layers.dense(d1, 10)
return y_out
y_out = complex_model(X,y,is_training)
# Now we're going to feed a random batch into the model
# and make sure the output is the right size
x = np.random.randn(64, shape_1, shape_1,3)
with tf.Session() as sess:
with tf.device("/cpu:0"): #"/cpu:0" or "/gpu:0"
tf.global_variables_initializer().run()
ans = sess.run(y_out,feed_dict={X:x,is_training:True})
%timeit sess.run(y_out,feed_dict={X:x,is_training:True})
print(ans.shape)
print(np.array_equal(ans.shape, np.array([64, 10])))
try:
with tf.Session() as sess:
with tf.device("/gpu:0") as dev: #"/cpu:0" or "/gpu:0"
tf.global_variables_initializer().run()
ans = sess.run(y_out,feed_dict={X:x,is_training:True})
%timeit sess.run(y_out,feed_dict={X:x,is_training:True})
except tf.errors.InvalidArgumentError:
print("no gpu found, please use Google Cloud if you want GPU acceleration")
# rebuild the graph
# trying to start a GPU throws an exception
# and also trashes the original graph
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
y_out = complex_model(X,y,is_training)
# Inputs
# y_out: is what your model computes
# y: is your TensorFlow variable with label information
# Outputs
# mean_loss: a TensorFlow variable (scalar) with numerical loss
# optimizer: a TensorFlow optimizer
# This should be ~3 lines of code!
# define our loss
total_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,logits=y_out)
mean_loss = tf.reduce_mean(total_loss)
# define our optimizer
optimizer = tf.train.RMSPropOptimizer(1e-3) # select optimizer and set learning rate
# batch normalization in tensorflow requires this extra dependency
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = optimizer.minimize(mean_loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64,100,train_step)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# Feel free to play with this cell
import tensorflow.contrib.slim as slim
def vgg_like_model(X,y,is_training):
net = slim.repeat(X, 2, slim.conv2d, 64, [3, 3], scope='conv1')
net= tf.layers.batch_normalization(net, axis=1, training=is_training)
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
net= tf.layers.batch_normalization(net, axis=1, training=is_training)
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
net= tf.layers.batch_normalization(net, axis=1, training=is_training)
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = tf.layers.flatten(net)
net = slim.fully_connected(net, 512, scope='fc7')
net= tf.layers.batch_normalization(net, training=is_training)
net = slim.fully_connected(net, 10, activation_fn=None, scope='fc8')
return net
tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, 32, 32, 3])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
y_out = vgg_like_model(X,y,is_training)
total_loss = tf.losses.sparse_softmax_cross_entropy(labels=y,logits=y_out)
mean_loss = tf.reduce_mean(total_loss)
# define our optimizer
optimizer = tf.train.AdamOptimizer(1e-3) # select optimizer and set learning rate
train_step = optimizer.minimize(mean_loss)
# batch normalization in tensorflow requires this extra dependency
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = optimizer.minimize(mean_loss)
# Feel free to play with this cell
# This default code creates a session
# and trains your model for 10 epochs
# then prints the validation set accuracy
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,4,64,100,train_step,True)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
# Test your model here, and make sure
# the output of this cell is the accuracy
# of your best model on the training and val sets
# We're looking for >= 70% accuracy on Validation
print('Training')
run_model(sess,y_out,mean_loss,X_train,y_train,1,64)
print('Validation')
run_model(sess,y_out,mean_loss,X_val,y_val,1,64)
print('Test')
run_model(sess,y_out,mean_loss,X_test,y_test,1,64)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we will create a dummy set of texts and convert it into a corpus
Step2: Then to run the LdaModel on it
Step3: Integration with Sklearn
Step4: Next, we use countvectorizer to convert the collection of text documents to a matrix of token counts.
Step5: Next, we just need to fit X and id2word to our Lda wrapper.
Step6: Example for Using Grid Search
Step7: Example of Using Pipeline
Step8: LSI Model
Step9: Example of Using Pipeline
Step10: Random Projections Model
Step11: Example of Using Pipeline
Step12: LDASeq Model
Step13: Example of Using Pipeline
|
<ASSISTANT_TASK:>
Python Code:
from gensim.sklearn_integration import SklLdaModel
from gensim.corpora import Dictionary
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer']
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
model = SklLdaModel(num_topics=2, id2word=dictionary, iterations=20, random_state=1)
model.fit(corpus)
model.transform(corpus)
import numpy as np
from gensim import matutils
from gensim.models.ldamodel import LdaModel
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from gensim.sklearn_integration.sklearn_wrapper_gensim_ldamodel import SklLdaModel
rand = np.random.mtrand.RandomState(1) # set seed for getting same result
cats = ['rec.sport.baseball', 'sci.crypt']
data = fetch_20newsgroups(subset='train', categories=cats, shuffle=True)
vec = CountVectorizer(min_df=10, stop_words='english')
X = vec.fit_transform(data.data)
vocab = vec.get_feature_names() # vocab to be converted to id2word
id2word = dict([(i, s) for i, s in enumerate(vocab)])
obj = SklLdaModel(id2word=id2word, num_topics=5, iterations=20)
lda = obj.fit(X)
from sklearn.model_selection import GridSearchCV
from gensim.models.coherencemodel import CoherenceModel
def scorer(estimator, X, y=None):
goodcm = CoherenceModel(model=estimator.gensim_model, texts= texts, dictionary=estimator.gensim_model.id2word, coherence='c_v')
return goodcm.get_coherence()
obj = SklLdaModel(id2word=dictionary, num_topics=5, iterations=20)
parameters = {'num_topics': (2, 3, 5, 10), 'iterations': (1, 20, 50)}
model = GridSearchCV(obj, parameters, scoring=scorer, cv=5)
model.fit(corpus)
model.best_params_
from sklearn.pipeline import Pipeline
from sklearn import linear_model
def print_features_pipe(clf, vocab, n=10):
''' Better printing for sorted list '''
coef = clf.named_steps['classifier'].coef_[0]
print coef
print 'Positive features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[::-1][:n] if coef[j] > 0]))
print 'Negative features: %s' % (' '.join(['%s:%.2f' % (vocab[j], coef[j]) for j in np.argsort(coef)[:n] if coef[j] < 0]))
id2word = Dictionary([_.split() for _ in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
model = SklLdaModel(num_topics=15, id2word=id2word, iterations=10, random_state=37)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, data.target)
print_features_pipe(pipe, id2word.values())
print(pipe.score(corpus, data.target))
from gensim.sklearn_integration import SklLsiModel
model = SklLsiModel(num_topics=15, id2word=id2word)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, data.target)
print_features_pipe(pipe, id2word.values())
print(pipe.score(corpus, data.target))
from gensim.sklearn_integration import SklRpModel
model = SklRpModel(num_topics=2)
np.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, data.target)
print_features_pipe(pipe, id2word.values())
print(pipe.score(corpus, data.target))
from gensim.sklearn_integration import SklLdaSeqModel
test_data = data.data[0:2]
test_target = data.target[0:2]
id2word = Dictionary(map(lambda x: x.split(), test_data))
corpus = [id2word.doc2bow(i.split()) for i in test_data]
model = SklLdaSeqModel(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
clf = linear_model.LogisticRegression(penalty='l2', C=0.1) # l2 penalty used
pipe = Pipeline((('features', model,), ('classifier', clf)))
pipe.fit(corpus, test_target)
print_features_pipe(pipe, id2word.values())
print(pipe.score(corpus, test_target))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step8: Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are
Step9: Test on Images
Step10: run your solution on all test_images and make copies into the test_images directory).
Step11: Test on Videos
Step12: Let's try the one with the solid white lane on the right first ...
Step14: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step16: At this point, if you were successful you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. Modify your draw_lines function accordingly and try re-running your pipeline.
Step18: Reflections
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimesions:', image.shape)
plt.imshow(image) #call as plt.imshow(gray, cmap='gray') to show a grayscaled image
import math
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
you should call plt.imshow(gray, cmap='gray')
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, α, img, β, λ)
import os
images = os.listdir("test_images/")
# Create a directory to save processed images
processed_directory_name = "processed_images"
if not os.path.exists(processed_directory_name):
os.mkdir(processed_directory_name)
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images directory.
# kernel_size for gaussian blur
kernel_size = 5
# thresholds for canny edge
low_threshold = 60
high_threshold = 140
# constants for Hough transformation
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 20 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 30 #minimum number of pixels making up a line
max_line_gap = 150 # maximum gap in pixels between connectable line segments
# vertices for polygon with area of interest
left_bottom = [50, 539]
right_bottom = [900, 539]
apex = [470, 320]
vertices = [left_bottom, right_bottom, apex]
def image_process_pipeline(image):
# Convert GrayScale
image_grayscale = grayscale(image)
#plt.imshow(image_grayscale)
# Apply Gaussian Blur
image_gaussianBlur = gaussian_blur(image_grayscale, kernel_size)
#plt.imshow(image_gaussianBlur)
# Detect Edges
image_cannyEdge = canny(image_gaussianBlur, low_threshold, high_threshold)
#plt.imshow(image_cannyEdge)
# Mask edges to area of interest
imshape = image.shape
vertices = np.array([[(0,imshape[0]),(450, 320), (490, 320), (imshape[1],imshape[0])]], dtype=np.int32)
image_Mask = region_of_interest(image_cannyEdge, vertices)
#plt.imshow(image_Mask)
# Detect Hough Lines and draw lines on blank image
image_houghLines = hough_lines(image_Mask, rho, theta, threshold, min_line_len, max_line_gap)
#plt.imshow(image_houghLines)
# Draw lines on original image
image_linesAndEdges = weighted_img(image_houghLines, image)
return image_linesAndEdges
for raw_image in images:
# Read Image as Matrix
#image = mpimg.imread("test_images/"+images[0])
image = mpimg.imread("test_images/"+raw_image)
result = image_process_pipeline(image)
# Show processed image
plt.imshow(result)
# Save the image
mpimg.imsave(os.path.join(processed_directory_name, "processed"+raw_image), result)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image with lines are drawn on lanes)
result = image_process_pipeline(image)
return result
white_output = 'white.mp4'
clip1 = VideoFileClip("solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = 'yellow.mp4'
clip2 = VideoFileClip('solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = 'extra.mp4'
clip2 = VideoFileClip('challenge.mp4')
challenge_clip = clip2.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Less safe zipcodes
Step4: Crime monthly
Step5: Weekly trends
Step6: Month Days trend
Step8: check zipcodes , which crime more, local buisessnes. For example, does the location of bars have any correlation with car theft or rape?
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
try:
# module exists
import seaborn as sns
seaborn_exists = True
except ImportError:
# module doesn't exist
seaborn_exists = True
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
%matplotlib inline
# custom features of plots
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Helvetica Neue'
plt.rcParams['font.monospace'] = 'Helvetica Neue'
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['figure.titlesize'] = 12
df = pd.read_csv('./data/cleaneddataset.csv')
print (df['crime'].unique())
df.head(5)
# replace long names with short names
crimemap = {
'MOTOR VEHICLE THEFT': 'VEHICLE THEFT',
'LARCENY-THEFT': 'LARCENY THEFT',
'MURDER AND NON-NEGLIGENT MANSLAUGHTER' : 'MURDER',
'AGGRAVATED ASSAULT': 'ASSAULT'
}
df['crime'].replace(crimemap, inplace=True)
cutoff = 50
plt.figure(figsize=(15,8))
sd = df['zip'].value_counts(sort=True,ascending=True)
sd.index = sd.index.astype(int)
sd = sd[~(sd<cutoff)]
ax = sd.plot.bar()
ax.set_ylabel('Number of Incidents')
ax.set_xlabel('Zipcodes with more than '+str(cutoff)+' crimes')
plt.show()
crime_year = pd.crosstab([df['year'],df['month']],df['crime'])
fig, ax = plt.subplots(nrows=1, ncols=1,figsize=(12,6))
crime_year.plot(kind='bar', stacked=False, grid=False,ax=ax)
ax.set_ylabel("number of incidents")
ax.set_xlabel("year")
ax.legend(loc = (1,0.1))
ax.set_ylim(0,3000)
plt.tight_layout()
plt.show()
ax = crime_year.plot()
ax.set_ylabel("number of incidents")
ax.set_xlabel("year")
ax.legend(loc = (1,0.1))
ax.set_ylim(0,3000)
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
plt.tight_layout()
plt.show()
#sns.heatmap(crime_year.T)
#plt.show()
# a set of colors to plot the bars
color_sequence = ['#1f77b4', '#ff7f0e', '#2ca02c','#d62728','#8c564b',
'#377eb8','#4daf4a','#984ea3','#f781bf']
# create the figure
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(12,12), sharex=True)
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_year_col = crime_year.ix[:,k]
#plot the data with a selected color
crime_year_col.plot(kind='bar', ax=ax, color=color_sequence[k])
ax.legend(loc = (0,1))
# rotate the x-axis ticks
ax.set_xticklabels(ax.get_xticklabels(),rotation=90)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
#df.time = pd.to_datetime(df['datetime'], format='%m/%d/%Y %H:%M')
#df.head(5)
df.groupby(['year','month'])['crime'].count().plot(kind='bar')
plt.show()
crime_weekday = pd.crosstab(df['weekday'],df['crime'])
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12,8), sharex=True)
if seaborn_exists:
daysOfWeekList = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
#daysOfWeekList = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
crime_weekday=crime_weekday.reindex(daysOfWeekList)
ax=sns.heatmap(crime_weekday.T,annot=True, fmt="d",linewidths=0.5,cmap='RdBu_r')
ax.set_xticklabels(ax.get_xticklabels(),rotation=30)
plt.tight_layout()
plt.savefig('heatmap.png')
plt.show()
fig, axes = plt.subplots(nrows=3, ncols=3,figsize=(12,12),sharex=True)
print ('| Crime | Highest | Lowest |')
print ('| --- | --- | --- |')
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_weakday_col = crime_weekday.ix[:,k]
crime_name = crime_weakday_col.name
crime_max_label,crime_max_val = crime_weakday_col.idxmax(), crime_weakday_col.max()
crime_min_label,crime_min_val = crime_weakday_col.idxmin(), crime_weakday_col.min()
print ('| {} | {} ({}) | {} ({}) |'.format(crime_name,crime_max_label,crime_max_val,crime_min_label,crime_min_val))
crime_weakday_col.plot(kind='line',ax=ax,color='r',marker='o')
#crime_weakday_col.plot(kind='bar',ax=ax,color='r')
ax.legend(loc = (0,1))
ax.set_xticklabels(ax.get_xticklabels(),rotation=60)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
crime_monthday = pd.crosstab(df['day'],df['crime'])
fig, axes = plt.subplots(nrows=3, ncols=3,figsize=(12,12),sharex=True)
#print ('| Crime | Highest | Lowest |')
#print ('| --- | --- | --- |')
k=0
for i in range(0,3):
for j in range(0,3):
ax = axes[i,j]
# selec kth columns
crime_monthday_col = crime_monthday.ix[:,k]
'''crime_name = crime_weakday_col.name
crime_max_label,crime_max_val = crime_weakday_col.idxmax(), crime_weakday_col.max()
crime_min_label,crime_min_val = crime_weakday_col.idxmin(), crime_weakday_col.min()
print ('| {} | {} ({}) | {} ({}) |'.format(crime_name,crime_max_label,crime_max_val,crime_min_label,crime_min_val))'''
crime_monthday_col.plot(kind='line',ax=ax,color='r',marker='o')
ax.legend(loc = (0,1))
ax.set_xticklabels(ax.get_xticklabels(),rotation=0)
ax.set_xlabel('')
k+=1
plt.tight_layout()
plt.show(fig)
dg = pd.crosstab(df['date'],df['crime'])
for col in dg.columns:
print (col)
print (dg.sort_values(by=col,ascending=False).index[0:3])
daysOfWeekList = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
wdf = pd.crosstab(df['crime'],df['weekday'])[daysOfWeekList]
wdf.to_json('crime_weekly.json')
wdf.to_csv('crime_weekly.csv')
def save_crime(names):
#make sure there is no white space in the filename
for name in names:
wdf = pd.crosstab(df['weekday'],df['crime'])[name]
wdf = pd.DataFrame(wdf).reindex([daysOfWeekList])
wdf.columns = ['count']
wdf.to_csv('./crime_weekly/'+name.replace(" ", "_")+'.csv',sep=',')
save_crime(sorted(df.crime.unique())) # for all types of crimes, rem
sorted(df.crime.unique())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import libraries
Step7: User-defined functions
Step8: Load dataset
Step9: Pre-processing data
Step10: Fix missing value
Step11: Standardize country code
Step12: Extract serving_size into gram value
Step13: Parse additives
Step14: Organic or Not
Step15: Visualize food features
Step16: Top countries
Step17: Nutrition grade
Step18: Nutrition score
Step19: Serving size
Step20: Energy, fat, ...
Step21: Carbohydrates, protein, fiber
Step22: Sugar, Vitamins
Step23: Minerals
Step24: Explore food nutrition
Step25: Which food has more sugar?
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
%matplotlib inline
def remove_na_rows(df, cols=None):
remove row with NaN in any column
if cols is None:
cols = df.columns
return df[np.logical_not(np.any(df[cols].isnull().values, axis=1))]
def trans_country_name(x):
translate country name to code (2-char)
try:
country_name = x.split(',')[0]
if country_name in dictCountryName2Code:
return dictCountryName2Code[country_name]
except:
return None
def parse_additives(x):
parse additives column values into a list
try:
dict = {}
for item in x.split(']'):
token = item.split('->')[0].replace("[", "").strip()
if token: dict[token] = 1
return [len(dict.keys()), sorted(dict.keys())]
except:
return None
def trans_serving_size(x):
pick up gram value from serving_size column
try:
serving_g = float((x.split('(')[0]).replace("g", "").strip())
return serving_g
except:
return 0.0
def distplot2x2(cols):
make dist. plot on 2x2 grid for up to 4 features
sb.set(style="white", palette="muted")
f, axes = plt.subplots(2, 2, figsize=(7, 7), sharex=False)
b, g, r, p = sb.color_palette("muted", 4)
colors = [b, g, r, p]
axis = [axes[0,0],axes[0,1],axes[1,0],axes[1,1]]
for n,col in enumerate(cols):
sb.distplot(food[col].dropna(), hist=True, rug=False, color=colors[n], ax=axis[n])
food = pd.read_excel("../../0.0-Datasets/openfoodfacts_100k.xlsx")
food.shape
# (99999, 162)
food.columns
food.head()
# columns_to_keep = ['code','product_name','created_datetime','brands','categories','origins','manufacturing_places','energy_100g','fat_100g','saturated-fat_100g','trans-fat_100g','cholesterol_100g','carbohydrates_100g','sugars_100g','omega-3-fat_100g','omega-6-fat_100g','fiber_100g','proteins_100g','salt_100g','sodium_100g','alcohol_100g','vitamin-a_100g','vitamin-c_100g','potassium_100g','chloride_100g','calcium_100g','phosphorus_100g','iron_100g','magnesium_100g','zinc_100g','copper_100g','manganese_100g','fluoride_100g','ingredients_text','countries','countries_en','serving_size','additives','nutrition_grade_fr','nutrition_grade_uk','nutrition-score-fr_100g','nutrition-score-uk_100g','url','image_url','image_small_url']
columns_to_keep = ['code','product_name','created_datetime','brands','energy_100g','fat_100g','saturated-fat_100g','trans-fat_100g','cholesterol_100g','carbohydrates_100g','sugars_100g','fiber_100g','proteins_100g','salt_100g','sodium_100g','vitamin-a_100g','vitamin-c_100g','calcium_100g','iron_100g','ingredients_text','countries','countries_en','serving_size','additives','nutrition_grade_fr','nutrition-score-fr_100g','url']
food = food[columns_to_keep]
columns_numeric_all = ['energy_100g','fat_100g','saturated-fat_100g','trans-fat_100g','cholesterol_100g','carbohydrates_100g','sugars_100g','omega-3-fat_100g','omega-6-fat_100g','fiber_100g','proteins_100g','salt_100g','sodium_100g','alcohol_100g','vitamin-a_100g','vitamin-c_100g','potassium_100g','chloride_100g','calcium_100g','phosphorus_100g','iron_100g','magnesium_100g','zinc_100g','copper_100g','manganese_100g','fluoride_100g','nutrition-score-fr_100g','nutrition-score-uk_100g']
columns_numeric = set(columns_numeric_all) & set(columns_to_keep)
columns_categoric = set(columns_to_keep) - set(columns_numeric)
# turn off
if False:
for col in columns_numeric:
if not col in ['nutrition-score-fr_100g', 'nutrition-score-uk_100g']:
food[col] = food[col].fillna(0)
for col in columns_categoric:
if col in ['nutrition_grade_fr', 'nutrition_grade_uk']:
food[col] = food[col].fillna('-')
else:
food[col] = food[col].fillna('')
# list column names: categoric vs numeric
columns_categoric, columns_numeric
food.head(3)
# standardize country
country_lov = pd.read_excel("../../0.0-Datasets/country_cd.xlsx")
# country_lov.shape
# country_lov.head()
# country_lov[country_lov['GEOGRAPHY_NAME'].str.startswith('United')].head()
# country_lov['GEOGRAPHY_CODE'].tolist()
# country_lov.ix[0,'GEOGRAPHY_CODE'], country_lov.ix[0,'GEOGRAPHY_NAME']
# create 2 dictionaries
dictCountryCode2Name = {}
dictCountryName2Code = {}
for i in country_lov.index:
dictCountryCode2Name[country_lov.ix[i,'GEOGRAPHY_CODE']] = country_lov.ix[i,'GEOGRAPHY_NAME']
dictCountryName2Code[country_lov.ix[i,'GEOGRAPHY_NAME']] = country_lov.ix[i,'GEOGRAPHY_CODE']
# add Country_Code column - pick 1st country from list
food['countries_en'] = food['countries_en'].fillna('')
food['country_code'] = food['countries_en'].apply(str).apply(lambda x: trans_country_name(x))
# add country_code to columns_categoric set
columns_categoric.add('country_code')
# verify bad country
food[food['country_code'] != food['countries']][['country_code', 'countries']].head(20)
food['ingredients_text'].head() # leave as is
# add serving_size in gram column
food['serving_size'].head(10)
food['serving_size'] = food['serving_size'].fillna('')
food['serving_size_gram'] = food['serving_size'].apply(lambda x: trans_serving_size(x))
# add serving_size_gram
columns_numeric.add('serving_size_gram')
food[['serving_size_gram', 'serving_size']].head()
food['additives'].head(10)
food['additives'] = food['additives'].fillna('')
food['additive_list'] = food['additives'].apply(lambda x: parse_additives(x))
# add additive_list
columns_categoric.add('additive_list')
food[['additive_list', 'additives']].head()
food["creation_date"] = food["created_datetime"].apply(str).apply(lambda x: x[:x.find("T")])
def extract_year(x):
try:
return int(x[:x.find("-")])
except:
return None
food["year_added"] = food["created_datetime"].dropna().apply(str).apply(extract_year)
# add creation_date
columns_categoric.add('creation_date')
columns_numeric.add('year_added')
food[['created_datetime', 'creation_date', 'year_added']].head()
# food['product_name']
food.head(3)
columns_numeric
year_added = food['year_added'].value_counts().sort_index()
#year_added
year_i = [int(x) for x in year_added.index]
x_pos = np.arange(len(year_i))
year_added.plot.bar()
plt.xticks(x_pos, year_i)
plt.title("Food labels added per year")
TOP_N = 10
dist_country = food['country_code'].value_counts()
top_country = dist_country[:TOP_N][::-1]
country_s = [dictCountryCode2Name[x] for x in top_country.index]
y_pos = np.arange(len(country_s))
top_country.plot.barh()
plt.yticks(y_pos, country_s)
plt.title("Top {} Country Distribution".format(TOP_N))
# dist_nutri_grade = food['nutrition_grade_uk'].value_counts()
# no value
dist_nutri_grade = food[food["nutrition_grade_fr"].isin(['a','b','c','d','e'])]
dist_nutri_grade = dist_nutri_grade['nutrition_grade_fr'].value_counts()
dist_nutri_grade.sort_index(ascending=False).plot.barh()
plt.title("Nutrition Grade Dist")
food['nutrition-score-fr_100g'].dropna().plot.hist()
plt.title("{} Dist.".format("Nutri-Score"))
food['serving_size_gram'].dropna().plot.hist()
plt.title("{} Dist.".format("Serving Size (g)"))
distplot2x2([ 'energy_100g','fat_100g','saturated-fat_100g','trans-fat_100g'])
distplot2x2(['carbohydrates_100g', 'cholesterol_100g', 'proteins_100g', 'fiber_100g'])
distplot2x2([ 'sugars_100g', 'salt_100g', 'vitamin-a_100g', 'vitamin-c_100g'])
distplot2x2(['calcium_100g', 'iron_100g', 'sodium_100g'])
df = food[food["country_code"].isin(['US','FR'])][['energy_100g', 'sugars_100g','country_code']]
df = remove_na_rows(df)
df.head(3)
sb.pairplot(df, hue="country_code", size=2.5)
# prepare a small dataframe for ['US', 'FR']
df2 = food[food["country_code"].isin(['US','FR'])][['energy_100g', 'sugars_100g','country_code','nutrition_grade_fr']]
df2 = df2[df2["nutrition_grade_fr"].isin(['a','d'])]
df2 = df2.sort_values(by="nutrition_grade_fr")
# df2.head()
# create a grid of scatter plot
g = sb.FacetGrid(df2, row="nutrition_grade_fr", col="country_code", margin_titles=True)
g.map(plt.scatter, "sugars_100g", "energy_100g", color="steelblue")
g.set(xlim=(0, 100), ylim=(0, 8000))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using fastai's training loop
Step2: To use it in fastai, we first pull the DataLoaders from the module into a DataLoaders object
Step3: We can now create a Learner and fit
Step4: As you can see, migrating from Lightning allowed us to reduce the amount of code, and doesn't require you to change any of your existing data pipelines, optimizers, loss functions, models, etc. Once you've made this change, you can then benefit from fastai's rich set of callbacks, transforms, visualizations, and so forth.
Step5: Here, we're telling DataBlock that we have a B&W image input, and a category output, our input items are file names of images, the images are labeled based on the name of the parent folder, and they are split by training vs validation based on the grandparent folder name. It's important to actually look at your data, so fastai also makes it easy to visualize your inputs and outputs, for instance
|
<ASSISTANT_TASK:>
Python Code:
from migrating_lightning import *
from fastai.vision.all import *
model = LitModel()
data = DataLoaders(model.train_dataloader(), model.val_dataloader()).cuda()
learn = Learner(data, model, loss_func=F.cross_entropy, opt_func=Adam, metrics=accuracy)
learn.fit_one_cycle(1, 0.001)
mnist = DataBlock(blocks=(ImageBlock(cls=PILImageBW), CategoryBlock),
get_items=get_image_files,
splitter=GrandparentSplitter(),
get_y=parent_label)
dls = mnist.dataloaders(untar_data(URLs.MNIST_TINY))
dls.show_batch(max_n=9, figsize=(4,4))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now that you have imported the library, we will walk you through its different applications. You will start with an example, where we compute for you the loss of one training example.
Step2: Writing and running programs in TensorFlow has the following steps
Step3: As expected, you will not see 20! You got a tensor saying that the result is a tensor that does not have the shape attribute, and is of type "int32". All you did was put in the 'computation graph', but you have not run this computation yet. In order to actually multiply the two numbers, you will have to create a session and run it.
Step4: Great! To summarize, remember to initialize your variables, create a session and run the operations inside the session.
Step6: When you first defined x you did not have to specify a value for it. A placeholder is simply a variable that you will assign data to only later, when running the session. We say that you feed data to these placeholders when running the session.
Step8: Expected Output
Step10: Expected Output
Step12: Expected Output
Step14: Expected Output
Step15: Expected Output
Step16: Change the index below and run the cell to visualize some examples in the dataset.
Step17: As usual you flatten the image dataset, then normalize it by dividing by 255. On top of that, you will convert each label to a one-hot vector as shown in Figure 1. Run the cell below to do so.
Step19: Note that 12288 comes from $64 \times 64 \times 3$. Each image is square, 64 by 64 pixels, and 3 is for the RGB colors. Please make sure all these shapes make sense to you before continuing.
Step21: Expected Output
Step23: Expected Output
Step25: Expected Output
Step27: Expected Output
Step28: Run the following cell to train your model! On our machine it takes about 5 minutes. Your "Cost after epoch 100" should be 1.016458. If it's not, don't waste time; interrupt the training by clicking on the square (⬛) in the upper bar of the notebook, and try to correct your code. If it is the correct cost, take a break and come back in 5 minutes!
Step29: Expected Output
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
%matplotlib inline
np.random.seed(1)
y_hat = tf.constant(36, name='y_hat') # Define y_hat constant. Set to 36.
y = tf.constant(39, name='y') # Define y. Set to 39
loss = tf.Variable((y - y_hat)**2, name='loss') # Create a variable for the loss
init = tf.global_variables_initializer() # When init is run later (session.run(init)),
# the loss variable will be initialized and ready to be computed
with tf.Session() as session: # Create a session and print the output
session.run(init) # Initializes the variables
print(session.run(loss)) # Prints the loss
a = tf.constant(2)
b = tf.constant(10)
c = tf.multiply(a,b)
print(c)
sess = tf.Session()
print(sess.run(c))
# Change the value of x in the feed_dict
x = tf.placeholder(tf.int64, name = 'x')
print(sess.run(2 * x, feed_dict = {x: 3}))
sess.close()
# GRADED FUNCTION: linear_function
def linear_function():
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
np.random.seed(1)
### START CODE HERE ### (4 lines of code)
X = tf.constant(np.random.randn(3,1), name = "X")
W = tf.constant(np.random.randn(4,3), name = 'W')
b = tf.constant(np.random.randn(4,1), name = 'b')
Y = tf.add(tf.matmul( W, X), b )
### END CODE HERE ###
# Create the session using tf.Session() and run it with sess.run(...) on the variable you want to calculate
### START CODE HERE ###
sess = tf.Session()
result = sess.run(Y)
### END CODE HERE ###
# close the session
sess.close()
return result
print( "result = " + str(linear_function()))
# GRADED FUNCTION: sigmoid
def sigmoid(z):
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
### START CODE HERE ### ( approx. 4 lines of code)
# Create a placeholder for x. Name it 'x'.
x = tf.placeholder(tf.float32, name = 'x')
# compute sigmoid(x)
sigmoid = tf.sigmoid(x)
# Create a session, and run it. Please use the method 2 explained above.
# You should use a feed_dict to pass z's value to x.
with tf.Session() as sess:
# Run session and call the output "result"
result = sess.run(sigmoid, feed_dict = {x:z})
### END CODE HERE ###
return result
print ("sigmoid(0) = " + str(sigmoid(0)))
print ("sigmoid(12) = " + str(sigmoid(12)))
# GRADED FUNCTION: cost
def cost(logits, labels):
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
### START CODE HERE ###
# Create the placeholders for "logits" (z) and "labels" (y) (approx. 2 lines)
z = tf.placeholder(tf.float32, name = 'z')
y = tf.placeholder(tf.float32, name = 'y')
# Use the loss function (approx. 1 line)
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits = z, labels = y)
# Create a session (approx. 1 line). See method 1 above.
sess = tf.Session()
# Run the session (approx. 1 line).
cost = sess.run(cost, feed_dict={z: logits, y:labels})
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return cost
logits = sigmoid(np.array([0.2,0.4,0.7,0.9]))
cost = cost(logits, np.array([0,0,1,1]))
print ("cost = " + str(cost))
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
Creates a matrix where the i-th row corresponds to the ith class number and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
### START CODE HERE ###
# Create a tf.constant equal to C (depth), name it 'C'. (approx. 1 line)
C = tf.constant(C,name= 'C')
# Use tf.one_hot, be careful with the axis (approx. 1 line)
one_hot_matrix = tf.one_hot(depth=C, indices=labels, axis=0)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session (approx. 1 line)
one_hot = sess.run(one_hot_matrix)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return one_hot
labels = np.array([1,2,3,0,2,1])
one_hot = one_hot_matrix(labels, C = 4)
print ("one_hot = " + str(one_hot))
# GRADED FUNCTION: ones
def ones(shape):
Creates an array of ones of dimension shape
Arguments:
shape -- shape of the array you want to create
Returns:
ones -- array containing only ones
### START CODE HERE ###
# Create "ones" tensor using tf.ones(...). (approx. 1 line)
ones = tf.ones(shape=shape)
# Create the session (approx. 1 line)
sess = tf.Session()
# Run the session to compute 'ones' (approx. 1 line)
ones = sess.run(ones)
# Close the session (approx. 1 line). See method 1 above.
sess.close()
### END CODE HERE ###
return ones
print ("ones = " + str(ones([3])))
# Loading the dataset
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Example of a picture
index = 0
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# Flatten the training and test images
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T
# Normalize image vectors
X_train = X_train_flatten/255.
X_test = X_test_flatten/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
print ("number of training examples = " + str(X_train.shape[1]))
print ("number of test examples = " + str(X_test.shape[1]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_x, n_y):
Creates the placeholders for the tensorflow session.
Arguments:
n_x -- scalar, size of an image vector (num_px * num_px = 64 * 64 * 3 = 12288)
n_y -- scalar, number of classes (from 0 to 5, so -> 6)
Returns:
X -- placeholder for the data input, of shape [n_x, None] and dtype "float"
Y -- placeholder for the input labels, of shape [n_y, None] and dtype "float"
Tips:
- You will use None because it let's us be flexible on the number of examples you will for the placeholders.
In fact, the number of examples during test/train is different.
### START CODE HERE ### (approx. 2 lines)
X = tf.placeholder(tf.float32, shape=[n_x, None])
Y = tf.placeholder(tf.float32, shape=[n_y, None])
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(12288, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
Initializes parameters to build a neural network with tensorflow. The shapes are:
W1 : [25, 12288]
b1 : [25, 1]
W2 : [12, 25]
b2 : [12, 1]
W3 : [6, 12]
b3 : [6, 1]
Returns:
parameters -- a dictionary of tensors containing W1, b1, W2, b2, W3, b3
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 6 lines of code)
W1 = tf.get_variable("W1", [25,12288], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b1 = tf.get_variable("b1", [25,1], initializer = tf.zeros_initializer())
W2 = tf.get_variable("W2", [12,25], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b2 = tf.get_variable("b2", [12,1], initializer = tf.zeros_initializer())
W3 = tf.get_variable("W3", [6,12], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
b3 = tf.get_variable("b3", [6,1], initializer = tf.zeros_initializer())
### END CODE HERE ###
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
return parameters
tf.reset_default_graph()
with tf.Session() as sess:
parameters = initialize_parameters()
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
### START CODE HERE ### (approx. 5 lines) # Numpy Equivalents:
Z1 = tf.add(tf.matmul(W1, X), b1) # Z1 = np.dot(W1, X) + b1
A1 = tf.nn.relu(Z1) # A1 = relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2) # Z2 = np.dot(W2, a1) + b2
A2 = tf.nn.relu(Z2) # A2 = relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3) # Z3 = np.dot(W3,Z2) + b3
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
print("Z3 = " + str(Z3))
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
# to fit the tensorflow requirement for tf.nn.softmax_cross_entropy_with_logits(...,...)
logits = tf.transpose(Z3)
labels = tf.transpose(Y)
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
X, Y = create_placeholders(12288, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
print("cost = " + str(cost))
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.0001,
num_epochs = 1500, minibatch_size = 32, print_cost = True):
Implements a three-layer tensorflow neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set, of shape (input size = 12288, number of training examples = 1080)
Y_train -- test set, of shape (output size = 6, number of training examples = 1080)
X_test -- training set, of shape (input size = 12288, number of training examples = 120)
Y_test -- test set, of shape (output size = 6, number of test examples = 120)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = X_train.shape # (n_x: input size, m : number of examples in the train set)
n_y = Y_train.shape[0] # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_x, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , minibatch_cost = sess.run([optimizer, cost], feed_dict = {X : minibatch_X, Y : minibatch_Y})
### END CODE HERE ###
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print ("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
parameters = model(X_train, Y_train, X_test, Y_test)
import scipy
from PIL import Image
from scipy import ndimage
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "hh.jpg"
## END CODE HERE ##
# We preprocess your image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64)).reshape((1, 64*64*3)).T
my_image_prediction = predict(my_image, parameters)
plt.imshow(image)
print("Your algorithm predicts: y = " + str(np.squeeze(my_image_prediction)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exoplanet properties
Step2: Use np.genfromtxt with a delimiter of ',' to read the data into a NumPy array called data
Step3: Make a histogram of the distribution of planetary masses. This will reproduce Figure 2 in the original paper.
Step4: Make a scatter plot of the orbital eccentricity (y) versus the semimajor axis. This will reproduce Figure 4 of the original paper. Use a log scale on the x axis.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
!head -n 30 open_exoplanet_catalogue.txt
f=np.genfromtxt('open_exoplanet_catalogue.txt',delimiter=',')
data=np.array(f)
assert data.shape==(1993,24)
plt.hist(data)
assert True # leave for grading
assert True # leave for grading
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Для выполнения этого задания нам понадобятся данные о среднемесячных уровнях заработной платы в России
Step2: Проверка стационарности и STL-декомпозиция ряда
Step3: Стабилизация дисперсии
Step4: Стационарность
Step5: Критерий Дики-Фуллера отвергает гипотезу нестационарности, НО полностью избавиться от тренда не удалось. Попробуем добавить ещё обычное дифференцирование
Step6: Гипотеза нестационарности отвергается с ещё большим уровнем значимости, и визуально ряд выглядит лучше — тренда больше нет.
Step7: Начальные приближения
Step8: Лучшая модель
Step9: Её остатки
Step10: Остатки несмещены (подтверждается критерием Стьюдента), стационарны (подтверждается критерием Дики-Фуллера и визуально), неавтокоррелированы (подтверждается критерием Льюнга-Бокса и коррелограммой). Посмотрим, насколько хорошо модель описывает данные
Step11: Прогноз
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from itertools import product
from datetime import *
from dateutil.relativedelta import *
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#Reading data
wage = pd.read_csv('WAG_C_M.csv', sep=';', index_col='month', parse_dates=True, dayfirst=True)
wage.info()
wage.head()
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M.plot()
_ = plt.title('Average nominal wage')
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M)[1])
wage['WAG_C_M_box'], lmbda = stats.boxcox(wage.WAG_C_M)
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M_box.plot()
_ = plt.title(u'Transformed average nominal wage')
print('Optimal parameter of the Box-Cox power transformation: %f' % lmbda)
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box)[1])
wage['WAG_C_M_box_diff'] = wage.WAG_C_M_box - wage.WAG_C_M_box.shift(12)
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M_box_diff.dropna()).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box_diff.dropna())[1])
wage['WAG_C_M_box_diff2'] = wage.WAG_C_M_box_diff - wage.WAG_C_M_box_diff.shift(1)
_ = sm.tsa.seasonal_decompose(wage.WAG_C_M_box_diff2.dropna()).plot()
print('Augmented Dickey-Fuller unit root test p=%f' % sm.tsa.stattools.adfuller(wage.WAG_C_M_box_diff2.dropna())[1])
plt.figure(figsize=(15,10))
ax = plt.subplot(211)
sm.graphics.tsa.plot_acf(wage.WAG_C_M_box_diff2.dropna()[12:].squeeze(), lags=50, ax=ax);
ax = plt.subplot(212)
sm.graphics.tsa.plot_pacf(wage.WAG_C_M_box_diff2.dropna()[12:].squeeze(), lags=50, ax=ax);
ps = range(0, 2)
d=1
qs = range(0, 2)
Ps = range(0, 2)
D=1
Qs = range(0, 1)
parameters = product(ps, qs, Ps, Qs)
parameters_list = list(parameters)
parameters_list
len(parameters_list)
%%time
results = []
best_aic = float("inf")
warnings.filterwarnings('ignore')
for param in parameters_list:
#try except нужен, потому что на некоторых наборах параметров модель не обучается
try:
model=sm.tsa.statespace.SARIMAX(wage.WAG_C_M_box, order=(param[0], d, param[1]),
seasonal_order=(param[2], D, param[3], 12)).fit(disp=-1)
#выводим параметры, на которых модель не обучается и переходим к следующему набору
except ValueError:
print('wrong parameters:', param)
continue
aic = model.aic
#сохраняем лучшую модель, aic, параметры
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
warnings.filterwarnings('default')
result_table = pd.DataFrame(results)
result_table.columns = ['parameters', 'aic']
print(result_table.sort_values(by = 'aic', ascending=True).head())
print(best_model.summary())
_ = plt.figure(figsize=(15,12))
_ = plt.subplot(211)
_ = best_model.resid[13:].plot()
_ = plt.ylabel(u'Residuals')
_ = ax = plt.subplot(212)
_ = sm.graphics.tsa.plot_acf(best_model.resid.values.squeeze(), lags=50, ax=ax)
print("Критерий Стьюдента: p=%f" % stats.ttest_1samp(best_model.resid[13:], 0)[1])
print("Критерий Дики-Фуллера: p=%f" % sm.tsa.stattools.adfuller(best_model.resid[13:])[1])
def invboxcox(y,lmbda):
if lmbda == 0:
return(np.exp(y))
else:
return(np.exp(np.log(lmbda*y+1)/lmbda))
wage['model'] = invboxcox(best_model.fittedvalues, lmbda)
_ = plt.figure(figsize=(15,7))
_ = wage.WAG_C_M.plot()
_ = wage.model[13:].plot(color='r')
_ = plt.title('Average nominal wage')
wage2 = wage[['WAG_C_M']]
date_list = [datetime.strptime("2017-07-01", "%Y-%m-%d") + relativedelta(months=x) for x in range(0,36)]
future = pd.DataFrame(index=date_list, columns=wage2.columns)
wage2 = pd.concat([wage2, future])
wage2['forecast'] = invboxcox(best_model.predict(start=294, end=329), lmbda)
_ = plt.figure(figsize=(15,7))
_ = wage2.WAG_C_M.plot()
_ = wage2.forecast.plot(color='r')
_ = plt.title('Average nominal wage')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: OLS Analysis Using Full PSU dataset
Step3: Partitioning a dataset in training and test sets
Step4: Determine Feature Importances
|
<ASSISTANT_TASK:>
Python Code:
#Import required packages
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
def format_date(df_date):
Splits Meeting Times and Dates into datetime objects where applicable using regex.
df_date['Days'] = df_date['Meeting_Times'].str.extract('([^\s]+)', expand=True)
df_date['Start_Date'] = df_date['Meeting_Dates'].str.extract('([^\s]+)', expand=True)
df_date['Year'] = df_date['Term'].astype(str).str.slice(0,4)
df_date['Quarter'] = df_date['Term'].astype(str).str.slice(4,6)
df_date['Term_Date'] = pd.to_datetime(df_date['Year'] + df_date['Quarter'], format='%Y%m')
df_date['End_Date'] = df_date['Meeting_Dates'].str.extract('(?<=-)(.*)(?= )', expand=True)
df_date['Start_Time'] = df_date['Meeting_Times'].str.extract('(?<= )(.*)(?=-)', expand=True)
df_date['Start_Time'] = pd.to_datetime(df_date['Start_Time'], format='%H%M')
df_date['End_Time'] = df_date['Meeting_Times'].str.extract('((?<=-).*$)', expand=True)
df_date['End_Time'] = pd.to_datetime(df_date['End_Time'], format='%H%M')
df_date['Duration_Hr'] = ((df_date['End_Time'] - df_date['Start_Time']).dt.seconds)/3600
return df_date
def format_xlist(df_xl):
revises % capacity calculations by using Max Enrollment instead of room capacity.
df_xl['Cap_Diff'] = np.where(df_xl['Xlst'] != '',
df_xl['Max_Enrl'].astype(int) - df_xl['Actual_Enrl'].astype(int),
df_xl['Room_Capacity'].astype(int) - df_xl['Actual_Enrl'].astype(int))
df_xl = df_xl.loc[df_xl['Room_Capacity'].astype(int) < 999]
return df_xl
pd.set_option('display.max_rows', None)
df = pd.read_csv('data/PSU_master_classroom_91-17.csv', dtype={'Schedule': object, 'Schedule Desc': object})
df = df.fillna('')
df = format_date(df)
# Avoid classes that only occur on a single day
df = df.loc[df['Start_Date'] != df['End_Date']]
#terms = [199104, 199204, 199304, 199404, 199504, 199604, 199704, 199804, 199904, 200004, 200104, 200204, 200304, 200404, 200504, 200604, 200704, 200804, 200904, 201004, 201104, 201204, 201304, 201404, 201504, 201604]
terms = [200604, 200704, 200804, 200904, 201004, 201104, 201204, 201304, 201404, 201504, 201604]
df = df.loc[df['Term'].isin(terms)]
df = df.loc[df['Online Instruct Method'] != 'Fully Online']
# Calculate number of days per week and treat Sunday condition
df['Days_Per_Week'] = df['Days'].str.len()
df['Room_Capacity'] = df['Room_Capacity'].apply(lambda x: x if (x != 'No Data Available') else 0)
df['Building'] = df['ROOM'].str.extract('([^\s]+)', expand=True)
df_cl = format_xlist(df)
df_cl['%_Empty'] = df_cl['Cap_Diff'].astype(float) / df_cl['Room_Capacity'].astype(float)
# Normalize the results
df_cl['%_Empty'] = df_cl['Actual_Enrl'].astype(np.float32)/df_cl['Room_Capacity'].astype(np.float32)
df_cl = df_cl.replace([np.inf, -np.inf], np.nan).dropna()
from sklearn.preprocessing import LabelEncoder
df_cl = df_cl.sample(n = 15000)
# Save as a 1D array. Otherwise will throw errors.
y = np.asarray(df_cl['%_Empty'], dtype="|S6")
df_cl = df_cl[['Dept', 'Class', 'Days', 'Start_Time', 'ROOM', 'Term', 'Room_Capacity', 'Building']]
cat_columns = ['Dept', 'Class', 'Days', 'Start_Time', 'ROOM', 'Building']
for column in cat_columns:
room_mapping = {label: idx for idx, label in enumerate(np.unique(df_cl['{0}'.format(column)]))}
df_cl['{0}'.format(column)] = df_cl['{0}'.format(column)].map(room_mapping)
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
X = df_cl.iloc[:, 1:].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
# Compare Algorithms
import pandas
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# prepare configuration for cross validation test harness
seed = 7
# prepare models
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=seed)
cv_results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring, n_jobs=-1)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(names)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: These files belong to data publishd by Quick et al. http
Step2: The result is a Pandas DataFrame object, which is too big to comfortably view in its entirety, but still comparatively "small data". Here is a subset of it
Step3: All of the columns available
Step4: The philosopy of porekit is to gather the metadata once and then store it in a different format. This makes it easier and faster to analyse the metadata or use it in another context, for example with alignment data. In general, Fast5 Files don't change after MinKNOW and Metrichor, and possibly some third party programs are done with it.
Step5: Grouping by Device, ASIC and Run Ids
|
<ASSISTANT_TASK:>
Python Code:
!ls /home/andi/nanopore/GenomeRU2/downloads/pass/ | tail -n 10
import porekit
everything = porekit.gather_metadata("/home/andi/nanopore/", workers=4)
everything[['asic_id', 'channel_number', 'template_length', 'complement_length']].head()
everything.columns
everything.to_hdf("everything.h5", "meta")
g = everything.groupby(['device_id', 'asic_id', 'run_id'])
df = g.template_length.agg([lambda v: len(v), np.mean, np.max])
df.columns = ['Count', 'Mean template length', 'Max template_length']
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot the data and see how it looks.
Step2: The data, based on only GRE and GPA scores, doesn't seem very separable. Maybe if we make a plot for each of the ranks, the boundaries will be more clear.
Step3: These plots look a bit more linearly separable, although not completely. But it seems that using a multi-layer perceptron with the rank, gre, and gpa as inputs, may give us a decent solution.
Step4: 3. Split the data into training and testing sets
Step5: 4. Define the model architecture
Step6: 5. Train the model
Step7: 6. Score the model
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data = pd.read_csv('student_data.csv')
data.head(5)
import matplotlib.pyplot as plt
import numpy as np
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
plot_points(data)
plt.show()
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
import keras
from keras.utils import np_utils
# remove NaNs
data = data.fillna(0)
# One-hot encoding the rank
processed_data = pd.get_dummies(data, columns=['rank'])
# Normalizing the gre and the gpa scores to be in the interval (0,1)
processed_data["gre"] = processed_data["gre"]/800
processed_data["gpa"] = processed_data["gpa"]/4
# Splitting the data input into X, and the labels y
X = np.array(processed_data)[:,1:]
X = X.astype('float32')
y = keras.utils.to_categorical(data["admit"],2)
# Checking that the input and output look correct
print("Shape of X:", X.shape)
print("\nShape of y:", y.shape)
print("\nFirst 10 rows of X")
print(X[:10])
print("\nFirst 10 rows of y")
print(y[:10])
# break training set into training and validation sets
(X_train, X_test) = X[50:], X[:50]
(y_train, y_test) = y[50:], y[:50]
# print shape of training set
print('x_train shape:', X_train.shape)
# print number of training, validation, and test images
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
# Note that filling out the empty rank as "0", gave us an extra column, for "Rank 0" students.
# Thus, our input dimension is 7 instead of 6.
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(7,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
# Training the model
model.fit(X_train, y_train, epochs=200, batch_size=100, verbose=0)
# Evaluating the model on the training and testing set
score = model.evaluate(X_train, y_train)
print("\n Training Accuracy:", score[1])
score = model.evaluate(X_test, y_test)
print("\n Testing Accuracy:", score[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There was no simple way to make code in one cell to write output to another cell. Now there is!
Step2: Ok, so far, nothing earth shattering. But what happens if you call display with the same display_id again?
Step3: Fantastic! We have a way of mirroring output in multiple places. But what if you only want update the previously named displays, without creating a new one? Just call display with update=True, like this
Step4: Though we have been working with text so far, this also works for the all other output types. Let's make an HTML-based progress bar!
Step5: The progress bar is drawn and it starts off at 0. Fill it up half way and call its update method to get a redraw.
Step6: Now go half-way again
Step7: Our original bar is kind of far away now, let's get another view of it below.
Step8: This is good, but it would be awesome to have a progress bar that would automatically update whenever its progress was modified - that would be truly progressive. We subclass ProgressBar and now we make progress into a Python property, which will allow us to set it and get it like an attribute, but do that using methods. In particular, whenever we assign a new value to progress, we also call update.
Step9: Much better. No more pesky update calls. Let's make a little animation that Zeno would be proud of
Step10: You might have noticed that each ProgressBar autogenerates a random display_id which is handy if you want to have several of them.
|
<ASSISTANT_TASK:>
Python Code:
print("typical output")
h1 = display('initial display', display_id='some_destination')
h2 = display('spoiler alert: output updated in both', display_id='some_destination')
h3 = display('no output here, update above', display_id='some_destination', update=True)
import os
from binascii import hexlify
class ProgressBar(object):
def __init__(self, capacity):
self._display_id = hexlify(os.urandom(8)).decode('ascii')
self.capacity = capacity
self.progress = 0
def _repr_html_(self):
return "<progress style='width:100%' max='{}' value='{}'></progress>".format(self.capacity, self.progress)
def display(self):
display(self, display_id=self._display_id)
def update(self):
display(self, display_id=self._display_id, update=True)
bar = ProgressBar(100)
bar.display()
bar.progress = 50
bar.update()
bar.progress = 75
bar.update()
bar.display()
class AutoupdatingProgressBar(ProgressBar):
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, value):
self._progress = value
self.update()
better_bar = AutoupdatingProgressBar(100)
better_bar.display()
better_bar.progress = 40
import time
better_bar.progress = 0
for _ in range(10):
time.sleep(.5)
better_bar.progress += (better_bar.capacity - better_bar.progress) / 2
num_bars = 5
bars = [AutoupdatingProgressBar(100) for _ in range(num_bars)]
for b in bars:
b.display()
import random
for x in range(40):
time.sleep(.1)
idx = random.randrange(num_bars)
bars[idx].progress += random.randint(-2, 10)
for b in bars:
b.display()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 오디오 데이터 준비 및 증강
Step2: 사용법
Step3: 위의 예에서 Flac 파일 brooklyn.flac는 Google Cloud에서 공개적으로 액세스할 수 있는 오디오 클립에서 가져온 것입니다.
Step4: 오디오는 다음을 통해 재생할 수 있습니다.
Step5: 텐서를 부동 소수점 숫자로 변환하고 오디오 클립을 그래프로 표시하는 것이 더 편리합니다.
Step6: 노이즈 제거
Step7: 페이드 인 및 페이드 아웃
Step8: 스펙트럼 사진
Step9: 다른 스케일로의 추가 변환도 가능합니다.
Step10: SpecAugment
Step11: 시간 마스킹
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tensorflow-io
import tensorflow as tf
import tensorflow_io as tfio
audio = tfio.audio.AudioIOTensor('gs://cloud-samples-tests/speech/brooklyn.flac')
print(audio)
audio_slice = audio[100:]
# remove last dimension
audio_tensor = tf.squeeze(audio_slice, axis=[-1])
print(audio_tensor)
from IPython.display import Audio
Audio(audio_tensor.numpy(), rate=audio.rate.numpy())
import matplotlib.pyplot as plt
tensor = tf.cast(audio_tensor, tf.float32) / 32768.0
plt.figure()
plt.plot(tensor.numpy())
position = tfio.audio.trim(tensor, axis=0, epsilon=0.1)
print(position)
start = position[0]
stop = position[1]
print(start, stop)
processed = tensor[start:stop]
plt.figure()
plt.plot(processed.numpy())
fade = tfio.audio.fade(
processed, fade_in=1000, fade_out=2000, mode="logarithmic")
plt.figure()
plt.plot(fade.numpy())
# Convert to spectrogram
spectrogram = tfio.audio.spectrogram(
fade, nfft=512, window=512, stride=256)
plt.figure()
plt.imshow(tf.math.log(spectrogram).numpy())
# Convert to mel-spectrogram
mel_spectrogram = tfio.audio.melscale(
spectrogram, rate=16000, mels=128, fmin=0, fmax=8000)
plt.figure()
plt.imshow(tf.math.log(mel_spectrogram).numpy())
# Convert to db scale mel-spectrogram
dbscale_mel_spectrogram = tfio.audio.dbscale(
mel_spectrogram, top_db=80)
plt.figure()
plt.imshow(dbscale_mel_spectrogram.numpy())
# Freq masking
freq_mask = tfio.audio.freq_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(freq_mask.numpy())
# Time masking
time_mask = tfio.audio.time_mask(dbscale_mel_spectrogram, param=10)
plt.figure()
plt.imshow(time_mask.numpy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 2 - Outline of the Assignment
Step4: Expected output
Step6: Expected output
Step8: Expected output
Step10: Expected output
Step12: <table style="width
Step14: Expected Output
Step16: Expected Output
Step18: Expected output with sigmoid
Step20: Expected Output
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v3 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x)*0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
### START CODE HERE ### (≈ 1 line of code)
Z = np.dot(W, A) + b
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = linear_activation_forward(A_prev, parameters["W"+str(l)], parameters["b"+str(l)], activation = "relu")
caches.append(cache)
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = linear_activation_forward(A, parameters["W"+str(L)], parameters["b"+str(L)], activation = "sigmoid")
caches.append(cache)
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = -1/m*np.sum(np.multiply(np.log(AL),Y)+np.multiply(np.log(1-AL),1-Y))
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = 1/m*np.dot(dZ, A_prev.T)
db = 1/m*np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward (dZ, linear_cache)
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward (dZ, linear_cache)
### END CODE HERE ###
return dA_prev, dW, db
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # derivative of cost with respect to AL
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = caches[L-1]
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, activation = "sigmoid")
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = caches[l]
dA_prev_temp, dW_temp, db_temp = linear_activation_backward( grads["dA" + str(l+2)], current_cache, activation = "relu")
grads["dA" + str(l + 1)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
for l in range(1, L+1):
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate*grads["dW"+str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate*grads["db"+str(l)]
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: On any given day, you may want to know not just the value on that day, but all of the historical values as well. And it would be lovely to be able to get that in one data structure, especially if stored in cloud storage, rather than having to iterate over a channel each time.
|
<ASSISTANT_TASK:>
Python Code:
random.seed(1)
chan = IterChannel((i, random.randint(100, 200)) for i in range(10))
print_chans(chan.tee())
from flowz.util import incremental_assembly, NO_VALUE
# NO_VALUE is a special value defined for incremental_assembly() that indicates the start of assembly
def prepend_assembler(new, old):
A simple assembler that prepends new data at the beginning of the tuple of old data.
if old is NO_VALUE:
return (new,)
else:
return (new,) + old
dest = IterChannel([])
out = incremental_assembly(chan.tee(), dest.tee(), prepend_assembler)
print_chans(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ipython_sql
Step2: rdbms-subsetter
|
<ASSISTANT_TASK:>
Python Code:
!head data/provinces.yaml
!ddlgenerator -i -t postgresql data/provinces.yaml | head -20
# !ddlgenerator -i -t postgresql http://github.com/catherinedevlin/pycon2015_sqla_lightning/data/provinces.yaml
!dropdb pycon
!createdb pycon
!ddlgenerator -i postgresql data/provinces.yaml | psql pycon | head -20
%load_ext sql
%sql postgresql://:@/pycon
%sql SELECT name, bird FROM provinces ORDER BY name
%%sql
SELECT c.name || ', ' || p.name AS name,
c.population
FROM cities c
JOIN provinces p
ON (c.provinces_id = p.provinces_id)
ORDER BY c.population DESC
LIMIT 10
cities = _
cities.DataFrame().plot(kind='bar', x='name')
!dropdb pycon_test
!createdb pycon_test
!pg_dump --schema-only pycon | psql pycon_test
!rdbms-subsetter -y postgresql://:@/pycon postgresql://:@/pycon_test 0.1
%%sql postgresql://:@/pycon_test
SELECT c.name || ', ' || p.name AS name,
c.population
FROM cities c
JOIN provinces p
ON (c.provinces_id = p.provinces_id)
ORDER BY c.population DESC
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Implementation
Step3: Question 1
Step4: Question 2
Step5: Question 3
Step6: Observation
Step7: Implementation
Step8: Question 4
Step9: Question 5
Step10: Implementation
Step11: Observation
Step12: Visualizing a Biplot
Step13: Observation
Step14: Question 7
Step15: Implementation
Step16: Question 8
Step17: Answer
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# Display a description of the dataset
display(data.describe())
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [0, 100, 400]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
import seaborn as sns
sns.heatmap((samples-data.mean())/data.std(ddof=0), annot=True, cbar=False, square=True)
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
removed = 'Delicatessen'
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = pd.DataFrame.copy(data)
new_data.drop([removed], axis = 1, inplace = True)
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = train_test_split(new_data, data[removed], test_size=0.33, random_state=42)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=42)
regressor.fit(X_train, y_train)
# TODO: Report the score of the prediction using the testing set
score = regressor.score(X_test, y_test)
print "Score: ", score
# reviewer code
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
def calculate_r_2_for_feature(data,feature):
new_data = data.drop(feature, axis=1)
X_train, X_test, y_train, y_test = train_test_split(
new_data,data[feature],test_size=0.25, random_state=42 # random_state to reproduce
)
regressor = DecisionTreeRegressor(random_state=42) # update to repeat result
regressor.fit(X_train,y_train)
score = regressor.score(X_test,y_test)
return score
def r_2_mean(data,feature,runs=200):
return np.array([calculate_r_2_for_feature(data,feature)
for _ in range(200) ]).mean().round(4)
print "{0:17} {1}".format("Fresh: ", r_2_mean(data,'Fresh'))
print "{0:17} {1}".format("Milk: ", r_2_mean(data,'Milk'))
print "{0:17} {1}".format("Grocery: ", r_2_mean(data,'Grocery'))
print "{0:17} {1}".format("Frozen: ", r_2_mean(data,'Frozen'))
print "{0:17} {1}".format("Detergents_Paper: ", r_2_mean(data,'Detergents_Paper'))
print "{0:17} {1}".format("Delicatessen: ", r_2_mean(data,'Delicatessen'))
zip(new_data, regressor.feature_importances_)
# reviewer feedback
import matplotlib.pyplot as plt
corr = data.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask, 1)] = True
with sns.axes_style("white"):
ax = sns.heatmap(corr, mask=mask, square=True, annot=True,
cmap='RdBu', fmt='+.3f')
plt.xticks(rotation=45, ha='center')
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# TODO: Scale the data using the natural logarithm
log_data = np.log(pd.DataFrame.copy(data))
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(pd.DataFrame.copy(samples))
# reviewer feedback
import matplotlib.pyplot as plt
corr = log_data.corr()
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask, 1)] = True
with sns.axes_style("white"):
ax = sns.heatmap(corr, mask=mask, square=True, annot=True,
cmap='RdBu', fmt='+.3f')
plt.xticks(rotation=45, ha='center')
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# Hint: Change the code to show both sample dataset (better to see)
# Display the log-transformed sample data
print "Original sample dataset:"
display(samples)
print "Log-transformed sample dataset:"
display(log_samples)
# For each feature find the data points with extreme high or low values
from collections import Counter
c = Counter()
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5*(Q3-Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
data_filter = ~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))
o = log_data[data_filter]
# get the index of outliers
i = log_data.index[data_filter] # there is some pythonic way?
c.update(i)
display(o)
# OPTIONAL: Select the indices for data points you wish to remove
outliers = c.keys()
print "Outliers for more then one feature:"
print sorted(list(k for k, v in c.items() if v > 1))
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
from sklearn.decomposition import PCA
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
pca = PCA(n_components=6, random_state=43)
pca.fit(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = vs.pca_results(good_data, pca)
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2, random_state=43)
pca.fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.transform(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
from sklearn import mixture
from sklearn.metrics import silhouette_score
# TODO: Apply your clustering algorithm of choice to the reduced data
n_components = 2
clusterer = mixture.GaussianMixture(n_components = n_components, random_state = 44)
clusterer.fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.means_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data, preds)
print score
# Display the results of the clustering from implementation
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
true_centers = true_centers.append(data.describe().ix['50%'])
true_centers.plot(kind = 'bar', figsize = (16, 4))
print "Clusters"
display(true_centers)
print "Sample Data"
display(samples)
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
print 'The distance between sample point {} and center of cluster {}:'.format(i, pred)
print (samples.iloc[i] - true_centers.iloc[pred])
# Display the clustering results based on 'Channel' data
vs.channel_results(reduced_data, outliers, pca_samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Google Colab & Kaggle interagration
Step2: Use %%ampl_eval to pass the model to AMPL
Step3: Set data
Step4: Use %%ampl_eval to display values
Step5: Use amplpy to retrive values
Step6: Use %%ampl_eval to solve the model
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q amplpy ampltools
MODULES=['ampl', 'gurobi']
from ampltools import cloud_platform_name, ampl_notebook
from amplpy import AMPL, register_magics
if cloud_platform_name() is None:
ampl = AMPL() # Use local installation of AMPL
else:
ampl = ampl_notebook(modules=MODULES) # Install AMPL and use it
register_magics(ampl_object=ampl) # Evaluate %%ampl_eval cells with ampl.eval()
%%ampl_eval
set SIZES;
param capacity >= 0;
param value {SIZES};
var Qty {SIZES} binary;
maximize TotVal: sum {s in SIZES} value[s] * Qty[s];
subject to Cap: sum {s in SIZES} s * Qty[s] <= capacity;
ampl.set['SIZES'] = [5, 4, 6, 3]
ampl.param['value'] = [10, 40, 30, 50]
ampl.param['capacity'] = 10
%%ampl_eval
display SIZES;
display value;
display capacity;
print('SIZES:', ampl.set['SIZES'].getValues().toList())
print('value:', ampl.param['value'].getValues().toDict())
print('capacity:', ampl.param['capacity'].value())
%%ampl_eval
option solver gurobi;
option gurobi_options 'outlev=1';
solve;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Process MEG data
Step2: Compute regularized noise covariance
Step3: Compute the evoked response
Step4: It's also a good idea to look at whitened data
Step5: Inverse modeling
Step6: Next, we make an MEG inverse operator.
Step7: Compute inverse solution
Step8: Visualization
Step9: Examine the original data and the residual after fitting
Step10: Here we use peak getter to move visualization to the time point of the peak
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname) # already has an average reference
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('meg', 'eog'), baseline=baseline, reject=reject)
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True)
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
evoked = epochs.average().pick('meg')
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag',
time_unit='s')
evoked.plot_white(noise_cov, time_unit='s')
del epochs, raw # to save memory
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
inverse_operator = make_inverse_operator(
evoked.info, fwd, noise_cov, loose=0.2, depth=0.8)
del fwd
# You can write it to disk with::
#
# >>> from mne.minimum_norm import write_inverse_operator
# >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
# inverse_operator)
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc, residual = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None,
return_residual=True, verbose=True)
fig, ax = plt.subplots()
ax.plot(1e3 * stc.times, stc.data[::100, :].T)
ax.set(xlabel='time (ms)', ylabel='%s value' % method)
fig, axes = plt.subplots(2, 1)
evoked.plot(axes=axes)
for ax in axes:
ax.texts = []
for line in ax.lines:
line.set_color('#98df81')
residual.plot(axes=axes)
vertno_max, time_max = stc.get_peak(hemi='rh')
subjects_dir = data_path + '/subjects'
surfer_kwargs = dict(
hemi='rh', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[8, 12, 15]), views='lateral',
initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=10)
brain = stc.plot(**surfer_kwargs)
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6, alpha=0.5)
brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title',
font_size=14)
# The documentation website's movie is generated with:
# brain.save_movie(..., tmin=0.05, tmax=0.15, interpolation='linear',
# time_dilation=20, framerate=10, time_viewer=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic plots
Step2: Calculate transmittance and albedo
|
<ASSISTANT_TASK:>
Python Code:
import accuread as ar
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use(['ggplot'])
moddir = '../tests/testdata/'
d = ar.ReadART('demo1', # basename of simulation
basefolder=moddir, # folder where the Output-folder is located
scalar=True, # read scalar irradiance
iops=True, # read iop file
runvarfile='sza.txt' # add contents of this file to d.runvar not very flexible at the moment
)
d.plot(run=2)
d.plot(profile=True)
d.transmittance(layers=(1,2),integrated=True)
d.albedo(layer=1,integrated=True)
fig,ax = plt.subplots(ncols=2,figsize=(8,3))
ax[0].plot(d.wavelengths,d.transmittance(layers=(0,1)))
ax[1].plot(d.wavelengths,d.transmittance(layers=(1,2)))
ax[0].set_title('Atmospheric transmittance')
ax[1].set_title('Transmittance of 1m ocean')
for a in ax:
a.legend([str(x) for x in d.runvar],loc='best',title='SZA')
a.set_xlabel('Wavelength [nm]')
fig,ax = plt.subplots()
ax.plot(d.wavelengths,d.albedo(1))
ax.set_xlabel('Wavelength [nm]')
ax.set_ylabel('Albedo')
ax.legend([str(x) for x in d.runvar],loc='best',title='SZA')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Our dataset
Step2: Use this line to confirm the location of your files
Step3: Let's set some immutable variables
Step6: Next we create pytorch datasets from the training and test data (note that you need to change the root_dir, if you placed the data in a different folder)
Step9: Autoencoder
Step10: Having defined the encoder and decoder network, we can move on to define the Autoencoder.
Step11: The next step is to train the Autoencoder. This is what a generic training loop looks like
Step12: In the next cell we set the training parameters, define the loss function and create DataLoaders. Pytorch DataLoaders manage the data loading for us (break the dataset into batches, keep track of epochs, reshuffle the data after each epoch)
Step13: It's finally time for training
Step14: Let's see how the model is doing. Let's look at
Step15: You can see that the model had a really easy time learning the task and that we haven't overfitted yet
Step16: ... and the average reconstruction error as a function of wavelength
Step17: A few things you might have noticed and that can be useful to keep in mind
Step18: STEP 2
Step19: Since we work with probability distributions in the VAE, we need to import the torch distributions package. We will only need the normal distribution for this exercise.
Step20: STEP 3
Step21: STEP 4
Step22: Let's train!
Step23: STEP 5
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision.transforms import Normalize
from google.colab import drive
import os
drive.mount('/content/drive')
! ls drive/MyDrive/ML_lecture_data/
INPUT_SIZE = 1000
LATENT_SIZE = 6
class SDSS_DR16(Dataset):
De-redshifted and downsampled spectra from SDSS-BOSS DR16
def __init__(self, root_dir='drive/MyDrive/ML_lecture_data/', transform=True, train=True):
Args:
root_dir (string): Directory of data file
transform (callable, optional): Optional transform to be applied
on a sample.
if train:
self.data = np.load(open(os.path.join(root_dir,'DR16_denoised_inpainted_train.npy'),'rb'),allow_pickle=True)
else:
self.data = np.load(open(os.path.join(root_dir,'DR16_denoised_inpainted_test.npy'),'rb'),allow_pickle=True)
self.data = torch.as_tensor(self.data)
self.mean = torch.mean(self.data)
self.std = torch.std(self.data)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = (self.data[idx]-self.mean)/self.std
return sample
#initialize datasets
training_data = SDSS_DR16(train=True)
test_data = SDSS_DR16(train=False)
# we inherit from pytorch Module class; https://pytorch.org/docs/stable/generated/torch.nn.Module.html
class Encoder(nn.Module):
def __init__(self, seed=853):
seed: int, random seed for reproducibility
super(Encoder, self).__init__()
self.seed = torch.manual_seed(seed)
# here we are initializing the linear layers. This registeres the layer parameters (W,b) as parameters of the Module
self.fc1 = nn.Linear(INPUT_SIZE,50)
self.fc2 = nn.Linear(50,LATENT_SIZE)
# this defines a forward pass of the network (="applying" the network to some input data)
def forward(self, x):
x = torch.nn.LeakyReLU()(self.fc1(x))
z = self.fc2(x)
return z
class Decoder(nn.Module):
def __init__(self, seed=620):
seed: int, random seed for reproducibility
super(Decoder, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(LATENT_SIZE,50)
self.fc2 = nn.Linear(50,INPUT_SIZE)
def forward(self, z):
z = torch.nn.LeakyReLU()(self.fc1(z))
x = self.fc2(z)
return x
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# here we are creating instances of the Encoder and Decoder class
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, x):
z = self.encoder(x)
x = self.decoder(z)
return x
# This creates an instance of the Autoencoder class
AE = Autoencoder()
# the training loop takes a function that loads the data batch by batch, a model to train, a loss function to train the model on and an optimizer
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
losses = []
# iterate over the dataset
for batch, X in enumerate(dataloader):
# Compute prediction of the model (in case of the AE the prediction is the reconstructed data)
pred = model(X)
# Compute the loss function (in case of the AE this is the L2 distance to the input data)
loss = loss_fn(pred,X)
# Backpropagation; this is where we take the gradient and update the network parameters
optimizer.zero_grad()
loss.backward()
optimizer.step()
# here we keep track of the loss
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
losses.append(loss)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
return losses
# the test loop is similar to the training loop, only that we don't take any gradients/don't update the network parameters, but only evaluate
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss = 0
with torch.no_grad():
for X in dataloader:
pred = model(X)
test_loss += loss_fn(pred, X).item()
test_loss /= num_batches
print(f" Avg loss: {test_loss:>8f} \n")
return test_loss
BATCHSIZE = 128
BATCHSIZE_TEST = 256
LEARNING_RATE = 1e-3
# MeanSquaredError (L2) Loss
loss_fn = nn.MSELoss()
# Adam Optimizer
optimizer = torch.optim.Adam(AE.parameters(), lr=LEARNING_RATE)
# Dataloaders
train_dataloader = DataLoader(training_data, batch_size=BATCHSIZE, shuffle=True)
test_dataloader = DataLoader(test_data, batch_size=BATCHSIZE_TEST, shuffle=True)
EPOCHS = 30
SEED = 555
train_loss = []
test_loss = []
for t in range(EPOCHS):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
print(f"Epoch {t+1}\n-------------------------------")
train_loss.append(train_loop(train_dataloader, AE, loss_fn, optimizer))
test_loss.append(test_loop(test_dataloader, AE, loss_fn))
print("Done!")
# losses
length = len(np.asarray(train_loss).flatten())
plt.figure()
plt.plot(np.linspace(0,length*100,length), np.asarray(train_loss).flatten(),label='training set')
plt.plot(np.linspace(100,(length)*100,len(test_loss)),test_loss,label='test set')
plt.xlabel('training step')
plt.ylabel('loss')
plt.legend()
plt.show()
### zoom in
length = len(np.asarray(train_loss).flatten())
plt.figure()
plt.plot(np.linspace(0,length*100,length), np.asarray(train_loss).flatten(),label='training set')
plt.plot(np.linspace(100,(length)*100,len(test_loss)),test_loss,label='test set')
plt.xlabel('training step')
plt.ylabel('loss')
plt.legend()
plt.ylim(0,0.1)
plt.show()
test_input = next(iter(test_dataloader))
with torch.no_grad():
recons = AE(test_input)
# This is the mapping from pixel to the de-redshifted (rest) wavelength
wlmin, wlmax = (3388,8318)
fixed_num_bins = 1000
wl_range = (np.log10(wlmin),np.log10(wlmax))
wl = np.logspace(wl_range[0],wl_range[1],fixed_num_bins)
fig, ax = plt.subplots(4,4, figsize=(20,10), sharex=True)
ax = ax.flatten()
for ii in range(16):
ax[ii].plot(wl,test_input[ii], label='input')
ax[ii].plot(wl,recons[ii],alpha=0.7,label='reconstruction')
if ii in np.arange(12,16):
ax[ii].set_xlabel('wavelength [Ångströms]')
if ii in [0,4,8,12]:
ax[ii].set_ylabel('some standardized flux')
if ii==0:
ax[ii].legend()
plt.show()
avg_loss = 0
with torch.no_grad():
for X in test_dataloader:
pred = AE(X)
avg_loss+=np.mean((pred.cpu().numpy()-X.cpu().numpy())**2,axis=0)/(len(test_data)//BATCHSIZE_TEST)
plt.figure()
plt.plot(wl,np.sqrt(avg_loss))
plt.ylabel('average reconstruction error')
plt.xlabel('wavelength [Ångströms]')
plt.show()
#Optional: save the model weights
#torch.save(AE.state_dict(), 'drive/MyDrive/ML_lecture_models/AE_model_weights.pth')
class VAEEncoder(nn.Module):
def __init__(self, seed=853):
super(VAEEncoder, self).__init__()
#TASK: change the output size of the encoder network. How many parameters must it return to define q(z|x)?
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(INPUT_SIZE,50)
self.fc2 = nn.Linear(50,??)
def forward(self, x):
#TASK: change the output of the encoder network. Instead of just returning z, it should return z and ...?
#HINT: Don't forget that the standard deviation/variance must be strictly positive!
#HINT: You might want to use torch.split(): https://pytorch.org/docs/stable/generated/torch.split.html
x = torch.nn.LeakyReLU()(self.fc1(x))
x = self.fc2(x)
mu,std = ??
std = ??
return mu, std
class VAEDecoder(nn.Module):
def __init__(self, seed=620):
super(VAEDecoder, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(LATENT_SIZE,50)
self.fc2 = nn.Linear(50,INPUT_SIZE)
def forward(self, z):
z = torch.nn.LeakyReLU()(self.fc1(z))
x = self.fc2(z)
return x
#TASK: Familiarize yourself with torch.distribution.Normal - you can find the documentation here: https://pytorch.org/docs/stable/distributions.html#normal
#HINT: It takes a standard deviation (scale) not a variance as input
from torch.distributions import Normal as Normal
class VariationalAutoencoder(nn.Module):
#TASK: add parameters mentioned in point 1.
def __init__(self, ??, ??):
super(VariationalAutoencoder, self).__init__()
self.encoder = VAEEncoder()
self.decoder = VAEDecoder()
self."insert name of param1 here" = ??
self."insert name of param2 here" = ??
#TASK: Use the Normal class to define the prior (a standard normal distribution), p(z)
self.prior = ??
def get_q(self,x):
#TASK: write a method that computes q(z,x)
#HINT: use the Normal class we imported above
mu, std = ??
self.q = ??
return True
def sample_q(self):
#TASK: write a method that samples from q
#HINT: use rsample to apply the reparameterization trick
#HINT: rsample takes a list or array of shapes as input, e.g. [sample_size]
z_sample = ??
return z_sample
#TASK (Optional): write a method that allows to change how many samples are drawn from q(z|x)
def change_sample_size(self,??):
?? = ??
return True
def get_avg_log_likelihood(self,recons,x):
#TASK: Write a method that returns the first term in the ELBO (this method should define the likelihood and evaluate the average log likelihood of the reconstruction)
#HINT: Pay attention to shapes. The function should return an average log likelihood (a single number) for every data point in the batch.
#HINT: The output shape of Normal(mu, sigma).log_prob() is a little unintuitive. If mu or sigma are N-dimensional, it returns N results (applies N independent Gaussians).
#HINT: You need to average over samples from q to obtain the final result.
ll = ??
log_p = ??
log_p = ??
return ??
def stochastic_kl_divergence(self,z_sample):
#TASK: Write a method that computes the kl-divergence between q(z|x) and p(z)
#HINT: Pay attention to shape
return ??
def forward(self, x):
#TASK: a forward pass should return the two terms in the ELBO
#HINT: use all the methods we defined above
??
samples = ??
recons = ??
log_likelihood = ??
kl = ??
return log_likelihood, kl
#TASK: create an instance of the Variational Autoencoder with sample_size=4 and sigma=1
VAE = ??
optimizer = torch.optim.Adam(VAE.parameters(), lr=LEARNING_RATE)
#from torch.optim.lr_scheduler import StepLR
#scheduler = StepLR(optimizer, step_size=10, gamma=0.75)
#TASK: define the new loss function
def negative_ELBO(avg_log_likelihood,kl):
negative_ELBO = ??
return negative_ELBO
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
losses = []
for batch, X in enumerate(dataloader):
#TASK: compute the loss from the output of the VAE foward pass
log_likelihood, kl = ??
loss = ??
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
losses.append(loss)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
#scheduler.step()
return losses
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
test_loss, nllh, kl_ = 0, 0, 0
model.eval()
with torch.no_grad():
for X in dataloader:
#TASK: in the test loop we want to keep track not only of the ELBO, but also of the two terms that contribute to the ELBO (kl diveregence and loglikelihood)
log_likelihood, kl = ??
test_loss += ??.item()
nllh += ??
kl_ += ??
test_loss /= num_batches
kl_ /= num_batches
nllh /= num_batches
print(f" Avg test loss : {test_loss:>8f}")
print(f" Avg KL : {kl_:>8f}")
print(f" Avg negative log likelihood : {nllh:>8f} \n")
return test_loss, kl_, nllh
EPOCHS = 8
SEED = 1234
train_loss = []
test_loss = []
for t in range(EPOCHS):
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
print(f"Epoch {t+1}\n-------------------------------")
train_loss.append(train_loop(train_dataloader, VAE, negative_ELBO, optimizer))
test_loss.append(test_loop(test_dataloader, VAE, negative_ELBO))
print("Done!")
test_loss = np.asarray(test_loss)
#TASK: plot the training loss, test loss, and the contributions to the loss from each of the two terms
length = len(np.asarray(train_loss).flatten())
plt.figure()
??
plt.xlabel('training step')
plt.ylabel('loss')
plt.legend()
plt.show()
# TASK: Inspect how the contribution of the kl divergence and log likelihood to the loss change as you change the noise in the likelihood. Some suggested values: sigma=[0.5,1,2]
# TASK: what happens when you change the number of samples?
# What do you observe? Can you interpret it?
#TASK: plot the average reconstruction error of the model as a function of wavelength (similar to above). How does it compare to the Autoencoder?
#HINT: Use the mean of $q(z|x)$ as the latent point for data x
avg_loss = 0
VAE.eval()
with torch.no_grad():
for X in test_dataloader:
pred = ??
avg_loss+=??
plt.figure()
plt.plot(wl,np.sqrt(avg_loss))
plt.ylabel('average reconstruction error')
plt.xlabel('wavelength [Ångströms]')
plt.show()
#TASK: make a corner plot of posterior samples. Does the average posterior match the prior?
import seaborn as sns
import pandas as pd
VAE.eval()
with torch.no_grad():
for ii, X in enumerate(test_dataloader):
VAE.get_q(X)
prior_sample = ??
sample = ??
if ii==0:
samples = sample
prior_samples = prior_sample
else:
samples = np.vstack([samples, sample])
prior_samples = np.vstack([prior_samples, prior_sample])
samples = np.reshape(samples,[-1, LATENT_SIZE])
prior_samples = np.reshape(prior_samples,[-1, LATENT_SIZE])
print(samples.shape)
print(prior_samples.shape)
data1 = pd.DataFrame()
data2 = pd.DataFrame()
for ii in range(LATENT_SIZE):
data1['dim_%d'%ii] = samples[:,ii]
data1['source'] = 'posterior'
for ii in range(LATENT_SIZE):
data2['dim_%d'%ii] = prior_samples[:,ii]
data2['source'] = 'prior'
data = pd.concat([data1,data2]).reset_index(drop=True)
#HINT: to get a density estimate you can set kind='kde', but you'll probably have to reduce the number of samples, KDE optimization scales pretty badly with number of samples
sns.pairplot(data,corner=True,kind='scatter', hue='source', plot_kws={'s':4})
plt.show()
#TASK: Generate artificial data: sample from the prior and foward model the sample thorugh the decoder. Do the samples look realistic? Why?/Why not?
VAE.eval()
with torch.no_grad():
samples = ??
data_samples = ??
# This is the mapping from pixel to the de-redshifted (rest) wavelength
wlmin, wlmax = (3388,8318)
fixed_num_bins = 1000
wl_range = (np.log10(wlmin),np.log10(wlmax))
wl = np.logspace(wl_range[0],wl_range[1],fixed_num_bins)
fig, ax = plt.subplots(4,4, figsize=(20,10), sharex=True)
ax = ax.flatten()
for ii in range(16):
ax[ii].plot(wl,data_samples[ii], label='artificial data')
if ii in np.arange(12,16):
ax[ii].set_xlabel('wavelength [Ångströms]')
if ii in [0,4,8,12]:
ax[ii].set_ylabel('some standardized flux')
if ii==0:
ax[ii].legend()
plt.show()
#Optional: Save model weights
# torch.save(VAE.state_dict(), 'drive/MyDrive/ML_lecture_models/VAE_model_weights.pth')
# torch.save(VAE.encoder.state_dict(), 'drive/MyDrive/ML_lecture_models/Encoder_model_weights.pth')
# torch.save(VAE.decoder.state_dict(), 'drive/MyDrive/ML_lecture_models/Decoder_model_weights.pth')
# VAE.load_state_dict(torch.load('drive/MyDrive/ML_lecture_models/VAE_model_weights.pth'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use this to automate the process. Be carefull it can overwrite current results
Step2: Now we will obtain the data from the calculated empirical variogram.
Step3: restricted w/ all data spatial correlation parameters
Step4: Instantiating the variogram object
Step5: Instantiating theoretical variogram model
|
<ASSISTANT_TASK:>
Python Code:
# Load Biospytial modules and etc.
%matplotlib inline
import sys
sys.path.append('/apps')
sys.path.append('..')
sys.path.append('../spystats')
import django
django.setup()
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
## Use the ggplot style
plt.style.use('ggplot')
import tools
from HEC_runs.fit_fia_logbiomass_logspp_GLS import prepareDataFrame,loadVariogramFromData,buildSpatialStructure, calculateGLS, initAnalysis, fitGLSRobust
section = initAnalysis("/RawDataCSV/idiv_share/FIA_Plots_Biomass_11092017.csv",
"/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv",
-130,-60,30,40)
#section = initAnalysis("/RawDataCSV/idiv_share/plotsClimateData_11092017.csv",
# "/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv",
# -85,-80,30,35)
# IN HEC
#section = initAnalysis("/home/hpc/28/escamill/csv_data/idiv/FIA_Plots_Biomass_11092017.csv","/home/hpc/28/escamill/spystats/HEC_runs/results/variogram/data_envelope.csv",-85,-80,30,35)
section.shape
gvg,tt = loadVariogramFromData("/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv",section)
gvg.plot(refresh=False,with_envelope=True)
resum,gvgn,resultspd,results = fitGLSRobust(section,gvg,num_iterations=1,distance_threshold=1000000)
resum.as_text
plt.plot(resultspd.rsq)
plt.title("GLS feedback algorithm")
plt.xlabel("Number of iterations")
plt.ylabel("R-sq fitness estimator")
resultspd.columns
a = map(lambda x : x.to_dict(), resultspd['params'])
paramsd = pd.DataFrame(a)
paramsd
plt.plot(paramsd.Intercept.loc[1:])
plt.get_yaxis().get_major_formatter().set_useOffset(False)
fig = plt.figure(figsize=(10,10))
plt.plot(paramsd.logSppN.iloc[1:])
variogram_data_path = "/apps/external_plugins/spystats/HEC_runs/results/variogram/data_envelope.csv"
thrs_dist = 100000
emp_var_log_log = pd.read_csv(variogram_data_path)
gvg = tools.Variogram(section,'logBiomass',using_distance_threshold=thrs_dist)
gvg.envelope = emp_var_log_log
gvg.empirical = emp_var_log_log.variogram
gvg.lags = emp_var_log_log.lags
#emp_var_log_log = emp_var_log_log.dropna()
#vdata = gvg.envelope.dropna()
matern_model = tools.MaternVariogram(sill=0.34,range_a=100000,nugget=0.33,kappa=4)
whittle_model = tools.WhittleVariogram(sill=0.34,range_a=100000,nugget=0.0,alpha=3)
exp_model = tools.ExponentialVariogram(sill=0.34,range_a=100000,nugget=0.33)
gaussian_model = tools.GaussianVariogram(sill=0.34,range_a=100000,nugget=0.33)
spherical_model = tools.SphericalVariogram(sill=0.34,range_a=100000,nugget=0.33)
gvg.model = whittle_model
#gvg.model = matern_model
#models = map(lambda model : gvg.fitVariogramModel(model),[matern_model,whittle_model,exp_model,gaussian_model,spherical_model])
gvg.fitVariogramModel(whittle_model)
import numpy as np
xx = np.linspace(0,1000000,1000)
gvg.plot(refresh=False,with_envelope=True)
plt.plot(xx,whittle_model.f(xx),lw=2.0,c='k')
plt.title("Empirical Variogram with fitted Whittle Model")
def randomSelection(n,p):
idxs = np.random.choice(n,p,replace=False)
random_sample = new_data.iloc[idxs]
return random_sample
#################
n = len(new_data)
p = 3000 # The amount of samples taken (let's do it without replacement)
random_sample = randomSelection(n,100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, let's implement the rolling validation.
Step2: So, I could use a training period based in an amount of market days, or in an amount of sample base periods. The first approach would be taking into consideration the temporal correlation of the data, the second would consider that the amount of samples should be large enough. Not to lose sight of the real problem at hand, I will use the market days approach, and then check that the amount of samples is big enough.
Step3: A lot of attention should be paid to the effect of filling the missing data. It may change the whole results.
Step5: That last number is an approximation of the number of train/evaluation sets that are being considered.
Step6: Let's test the whole process
Step7: It seems like the weird point, in which the model is predicting terribly may be the 2008 financial crisis. And the big unpredictability is limited to one symbol. I should implement a way to trace the symbols...
|
<ASSISTANT_TASK:>
Python Code:
def run_single_val(x, y, ahead_days, estimator):
multiindex = x.index.nlevels > 1
x_y = pd.concat([x, y], axis=1)
x_y_sorted = x_y.sort_index()
if multiindex:
x_y_train = x_y_sorted.loc[:fe.add_market_days(x_y_sorted.index.levels[0][-1], -ahead_days)]
x_y_val = x_y_sorted.loc[x_y_sorted.index.levels[0][-1]:]
else:
x_y_train = x_y_sorted.loc[:fe.add_market_days(x_y_sorted.index[-1], -ahead_days)]
x_y_val = x_y_sorted.loc[x_y_sorted.index[-1]:]
x_train = x_y_train.iloc[:,:-1]
x_val = x_y_val.iloc[:,:-1]
y_train_true = x_y_train.iloc[:,-1]
y_val_true = x_y_val.iloc[:,-1]
estimator.fit(x_train)
y_train_pred = estimator.predict(x_train)
y_val_pred = estimator.predict(x_val)
y_train_true_df = pd.DataFrame(y_train_true)
y_train_pred_df = pd.DataFrame(y_train_pred)
y_val_true_df = pd.DataFrame(y_val_true)
y_val_pred_df = pd.DataFrame(y_val_pred)
return y_train_true, \
y_train_pred, \
y_val_true, \
y_val_pred
y_train_true, y_train_pred, y_val_true, y_val_pred = run_single_val(x, y, 1, predictor)
print(y_train_true.shape)
print(y_train_pred.shape)
print(y_val_true.shape)
print(y_val_pred.shape)
print(y_train_true.shape)
y_train_true.head()
y = y_train_true
multiindex = y.index.nlevels > 1
if multiindex:
DATE_LEVEL_NAME = 'level_0'
else:
DATE_LEVEL_NAME = 'index'
DATE_LEVEL_NAME
y.reset_index()
reshape_by_symbol(y_train_true)
train_eval_days = -1 # In market days
base_days = 7 # In market days
step_days = 7 # market days
ahead_days = 1 # market days
today = data_df.index[-1] # Real date
train_days = 252 # market days per training period
step_eval_days = 30 # market days between training periods beginings
filled_data_df = pp.fill_missing(data_df)
tic = time()
x, y = fe.generate_train_intervals(filled_data_df,
train_eval_days,
base_days,
step_days,
ahead_days,
today,
fe.feature_close_one_to_one)
toc = time()
print('Elapsed time: %i seconds.' % (toc-tic))
x_y_sorted = pd.concat([x, y], axis=1).sort_index()
x_y_sorted
start_date = x_y_sorted.index.levels[0][0]
start_date
end_date = fe.add_market_days(start_date, 252)
end_date
end_date = fe.add_index_days(start_date, 252, x_y_sorted)
end_date
end_date = fe.add_market_days(start_date, 252)
x_i = x_y_sorted.loc[start_date:end_date].iloc[:,:-1]
y_i = x_y_sorted.loc[start_date:end_date].iloc[:,-1]
print(x_i.shape)
print(x_i.head())
print(y_i.shape)
print(y_i.head())
ahead_days
predictor = dmp.DummyPredictor()
y_train_true, y_train_pred, y_val_true, y_val_pred = run_single_val(x_i, y_i, ahead_days, predictor)
print(y_train_true.shape)
print(y_train_pred.shape)
print(y_val_true.shape)
print(y_val_pred.shape)
y_train_pred.head()
y_train_pred.dropna(axis=1, how='all').shape
scores = r2_score(pp.fill_missing(y_train_pred), pp.fill_missing(y_train_true), multioutput='raw_values')
print('R^2 score = %f +/- %f' % (np.mean(scores), 2*np.std(scores)))
scores = r2_score(y_train_pred, y_train_true, multioutput='raw_values')
print('R^2 score = %f +/- %f' % (np.mean(scores), np.std(scores)))
len(scores)
y_val_true_df = pd.DataFrame()
y_val_true
y_val_true_df.append(y_val_true)
x.index.min()
x.index.max()
x.index.max() - x.index.min()
(x.index.max() - fe.add_market_days(x.index.min(), train_days)).days // step_days
def roll_evaluate(x, y, train_days, step_eval_days, ahead_days, verbose=False):
Warning: The final date of the period should be no larger than the final date of the SPY_DF
# calculate start and end date
# sort by date
x_y_sorted = pd.concat([x, y], axis=1).sort_index()
start_date = x_y_sorted.index[0]
end_date = fe.add_market_days(start_date, train_days)
final_date = x_y_sorted.index[-1]
# loop: run_single_val(x,y, ahead_days, estimator)
r2_train_means = []
r2_train_stds = []
y_val_true_df = pd.DataFrame()
y_val_pred_df = pd.DataFrame()
num_training_sets = (252/365) * (x.index.max() - fe.add_market_days(x.index.min(), train_days)).days // step_eval_days
set_index = 0
if verbose:
print('Evaluating approximately %i training/evaluation pairs' % num_training_sets)
while end_date < final_date:
x = x_y_sorted.loc[start_date:end_date].iloc[:,:-1]
y = x_y_sorted.loc[start_date:end_date].iloc[:,-1]
y_train_true, y_train_pred, y_val_true, y_val_pred = run_single_val(x, y, ahead_days, predictor)
# Calculate R^2 for training and append
scores = r2_score(y_train_true, y_train_pred, multioutput='raw_values')
r2_train_means.append(np.mean(scores))
r2_train_stds.append(np.std(scores))
# Append validation results
y_val_true_df = y_val_true_df.append(y_val_true)
y_val_pred_df = y_val_pred_df.append(y_val_pred)
# Update the dates
start_date = fe.add_market_days(start_date, step_eval_days)
end_date = fe.add_market_days(end_date, step_eval_days)
set_index += 1
if verbose:
sys.stdout.write('\rApproximately %2.1f percent complete. ' % (100.0 * set_index / num_training_sets))
sys.stdout.flush()
return r2_train_means, r2_train_stds, y_val_true_df, y_val_pred_df
train_eval_days = -1 # In market days
base_days = 14 # In market days
step_days = 30 # market days
ahead_days = 1 # market days
today = data_df.index[-1] # Real date
filled_data_df = pp.fill_missing(data_df)
tic = time()
x, y = fe.generate_train_intervals(filled_data_df,
train_eval_days,
base_days,
step_days,
ahead_days,
today,
fe.feature_close_one_to_one)
toc = time()
print('Elapsed time: %i seconds.' % (toc-tic))
train_days = 252 # market days per training period
step_eval_days = 10 # market days between training periods beginings
tic = time()
r2_train_means, r2_train_stds, y_val_true_df, y_val_pred_df = roll_evaluate(x, y, train_days, step_eval_days, ahead_days, verbose=True)
toc = time()
print('Elapsed time: %i seconds.' % (toc-tic))
y_val_true_df.head()
pd.DataFrame(r2_train_means).describe()
scores = r2_score(y_val_true_df.T, y_val_pred_df.T, multioutput='raw_values')
print('R^2 score = %f +/- %f' % (np.mean(scores), np.std(scores)))
pd.DataFrame(scores).describe()
plt.plot(y_val_true_df.index, r2_train_means, label='r2_train_means')
plt.plot(y_val_true_df.index, scores, label='r2 validation scores')
plt.legend(loc='lower left')
scores_val = r2_score(y_val_true_df, y_val_pred_df, multioutput='raw_values')
print('R^2 score = %f +/- %f' % (np.mean(scores_val), np.std(scores_val)))
plt.plot(scores_val, label='r2 validation scores')
sorted_means = x.sort_index().mean(axis=1)
sorted_means.head()
sorted_means.plot()
sub_period = sorted_means['2009-03-01':]
plt.scatter(sub_period.index, sub_period)
from sklearn.metrics import mean_absolute_error
scores = mean_absolute_error(y_val_true_df.T, y_val_pred_df.T, multioutput='raw_values')
print('MAE score = %f +/- %f' % (np.mean(scores), np.std(scores)))
plt.plot(y_val_true_df.index, scores, label='MAE validation scores')
plt.legend(loc='lower left')
pd.DataFrame(scores).describe()
scores = mean_absolute_error(y_val_true_df, y_val_pred_df, multioutput='raw_values')
print('MAE score = %f +/- %f' % (np.mean(scores), np.std(scores)))
plt.plot(scores, label='MAE validation scores')
plt.legend(loc='lower left')
pd.DataFrame(scores).describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load skill_score
Step2: Clusters
Step3: Model and observations plots
Step4: Map
|
<ASSISTANT_TASK:>
Python Code:
import os
try:
import cPickle as pickle
except ImportError:
import pickle
run_name = '2015-08-17'
fname = os.path.join(run_name, 'config.pkl')
with open(fname, 'rb') as f:
config = pickle.load(f)
try:
import cPickle as pickle
except ImportError:
import pickle
fname = os.path.join(run_name, 'skill_score.pkl')
with open(fname, 'rb') as f:
skill_score = pickle.load(f)
import numpy as np
from glob import glob
from pandas import Panel
from utilities import nc2df
def load_ncs(run_name):
fname = '{}-{}.nc'.format
ALL_OBS_DATA = nc2df(os.path.join(run_name,
fname(run_name, 'OBS_DATA')))
index = ALL_OBS_DATA.index
dfs = dict(OBS_DATA=ALL_OBS_DATA)
for fname in glob(os.path.join(run_name, "*.nc")):
if 'OBS_DATA' in fname:
continue
else:
model = fname.split('.')[0].split('-')[-1]
df = nc2df(fname)
# FIXME: Horrible work around duplicate times.
if len(df.index.values) != len(np.unique(df.index.values)):
kw = dict(subset='index', take_last=True)
df = df.reset_index().drop_duplicates(**kw).set_index('index')
kw = dict(method='time', limit=30)
df = df.reindex(index).interpolate(**kw).ix[index]
dfs.update({model: df})
return Panel.fromDict(dfs).swapaxes(0, 2)
from mpld3 import save_html
import matplotlib.pyplot as plt
from mpld3.plugins import LineLabelTooltip, connect
from utilities import make_map
bbox = config['bbox']
units = config['units']
run_name = config['run_name']
kw = dict(zoom_start=12, line=True, states=False, secoora_stations=False)
mapa = make_map(bbox, **kw)
from glob import glob
from operator import itemgetter
import iris
from pandas import DataFrame, read_csv
fname = '{}-all_obs.csv'.format(run_name)
all_obs = read_csv(os.path.join(run_name, fname), index_col='name')
big_list = []
for fname in glob(os.path.join(run_name, "*.nc")):
if 'OBS_DATA' in fname:
continue
cube = iris.load_cube(fname)
model = fname.split('-')[-1].split('.')[0]
lons = cube.coord(axis='X').points
lats = cube.coord(axis='Y').points
stations = cube.coord('station name').points
models = [model]*lons.size
lista = zip(models, lons.tolist(), lats.tolist(), stations.tolist())
big_list.extend(lista)
big_list.sort(key=itemgetter(3))
df = DataFrame(big_list, columns=['name', 'lon', 'lat', 'station'])
df.set_index('station', drop=True, inplace=True)
groups = df.groupby(df.index)
for station, info in groups:
sta_name = all_obs['station'][all_obs['station'].astype(str) == station].index[0]
for lat, lon, name in zip(info.lat, info.lon, info.name):
location = lat, lon
popup = '<b>{}</b>\n{}'.format(sta_name, name)
mapa.simple_marker(location=location, popup=popup,
clustered_marker=True)
mean_bias = skill_score['mean_bias'].applymap('{:.2f}'.format).replace('nan', '--')
skill = skill_score['rmse'].applymap('{:.2f}'.format).replace('nan', '--')
resolution, width, height = 75, 7, 3
def make_plot():
fig, ax = plt.subplots(figsize=(width, height))
ax.set_ylabel('Sea surface Temperature ({})'.format(units))
ax.grid(True)
return fig, ax
dfs = load_ncs(run_name)
dfs = dfs.swapaxes('items', 'major').resample('30min').swapaxes('items', 'major')
for station in dfs:
sta_name = all_obs['station'][all_obs['station'].astype(str) == station].index[0]
df = dfs[station].dropna(axis=1, how='all')
if df.empty:
continue
labels = []
fig, ax = make_plot()
for col in df.columns:
serie = df[col].dropna()
lines = ax.plot(serie.index, serie, label=col,
linewidth=2.5, alpha=0.5)
if 'OBS_DATA' not in col:
text0 = col
text1 = mean_bias[sta_name][col]
text2 = skill[sta_name][col]
tooltip = '{}:\nbias {}\nskill: {}'.format
labels.append(tooltip(text0, text1, text2))
else:
labels.append('OBS_DATA')
kw = dict(loc='upper center', bbox_to_anchor=(0.5, 1.05), numpoints=1,
ncol=2, framealpha=0)
l = ax.legend(**kw)
l.set_title("") # Workaround str(None).
[connect(fig, LineLabelTooltip(line, name))
for line, name in zip(ax.lines, labels)]
html = 'station_{}.html'.format(station)
save_html(fig, '{}/{}'.format(run_name, html))
plt.close(fig)
popup = "<div align='center'> {} <br><iframe src='{}' alt='image'"
popup += "width='{}px' height='{}px' frameBorder='0'></div>"
popup = popup.format('{}'.format(sta_name), html,
(width*resolution)+75, (height*resolution)+50)
kw = dict(popup=popup, width=(width*resolution)+75)
if (df.columns == 'OBS_DATA').all():
kw.update(dict(marker_color="blue", marker_icon="ok"))
else:
kw.update(dict(marker_color="green", marker_icon="ok"))
obs = all_obs[all_obs['station'].astype(str) == station].squeeze()
mapa.simple_marker(location=[obs['lat'], obs['lon']], **kw)
from utilities import inline_map
mapa.create_map(path=os.path.join(run_name, 'mapa.html'))
inline_map(os.path.join(run_name, 'mapa.html'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.1. Linear Regression with the Advertising database
Step2: What are the features?
Step3: The TV vs. Sales plot shows that, aparently, there is a proportional increasement in Sales as the TV advertising is increassing. Therefore, it is suggested that Sales increass lineraly with the increassment of the TV advertising. The model wich can exlpain the behavior of the data is
Step4: Now, let's see if the data fits a lineal regression model.
Step5: 2.2. Evaluation of the linear regression
Step6: From de summary it is posble to say that the model apparently explain the behavior of the data. Also, if it is chosen a value for X (it must betwen the extreme values of data) the model will predict the answer with 61 % of probability of being right. This is from the $ R^2 $ value. It is important to know that despite linear model has low variability it has a high bias because of the presumtion of data behavior.
Step7: Visualizing iris data
Step8: Preparing data using the split module
Step9: Important steps using scikit-learn to prepare the model
Step10: 3.2. Evaluating The clasification
Step11: 4. KNN algorithm
Step12: KNN with 6 neighbors
Step13: Selection of the best K value
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np #libreria de datos numericos
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import statsmodels.formula.api as smf
import pylab as pl
from sklearn import datasets
%matplotlib inline
from sklearn import datasets
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col= 0) # Data from the book: An introduction to Statistical learning
data.head()
## SEABORN ##
sns.pairplot(data, x_vars=['TV','Radio','Newspaper'], y_vars='Sales', size=7, aspect=0.8)
# Seaborn library allows to display data, manipulate and analyze
### STATSMODELS ###
# create a fitted model
lm1 = smf.ols(formula='Sales ~ TV', data=data).fit()
# print the coefficients
lm1.params
### SCIKIT-LEARN ###
from sklearn.linear_model import LinearRegression
# create X and y
feature_cols = ['TV']
X = data[feature_cols]
y = data.Sales
# instantiate and fit
lm2 = LinearRegression()
lm2.fit(X, y)
# print the coefficients
print lm2.intercept_
print lm2.coef_
sns.pairplot(data, x_vars=['TV','Radio','Newspaper'], y_vars='Sales', size=7, aspect=0.8, kind='reg')
# the parameter kind allow us not only to plot the liner regression model, but also to display the confidence intervals
lm1.summary()
# This method from STATMODELS sumarize all the results of the regression
from sklearn.datasets import load_iris
iris = load_iris()
type(iris) # special object bunch which is optimized to python and has DATA and TARGET ndarrays
X1 = iris.data # store features (sepal and petal lenght and widht) in a matrix of 4 columns
y1 = iris.target # store what is called the response (target) in this case there are 3 responses (0,1,2) wich correspond to a target name
plt.scatter(X1[:, 0], X1[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.scatter(X1[:, 2], X1[:, 3], c=y1, cmap=plt.cm.Paired)
plt.xlabel('Petal length')
plt.ylabel('Petal width')
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X1, y1, test_size= 0.4, random_state=4)
## 1. IMPORT THE CLASS/MODEL YOU PLAN TO USE ##
from sklearn.linear_model import LogisticRegression
## 2. Instantiate ## (Bring the model into an object or instance)
logreg = LogisticRegression()
## 3. Fit the model with data ##
logreg.fit(X_train,y_train)
## 4. Predict the response for a new observation
X_new = [[3,5,4,2],[5,4,3,2]]
logreg.predict(X_new)
y_pred_log = logreg.predict(X_test)
from sklearn import metrics
X_test.shape
print metrics.accuracy_score(y_test,y_pred_log)
from sklearn.neighbors import KNeighborsClassifier
## 2. Instantiate ## (Bring the model into an object or instance)
knn = KNeighborsClassifier() # by default this algorith compare an observation with the five nearest observation
## 3. Fit the model with data ##
knn.fit(X_train,y_train)
## 4. Predict the response for a new observation
y_pred_knn = knn.predict(X_test)
print metrics.accuracy_score(y_test,y_pred_knn)
knn.predict(X_new)
knn_2 = KNeighborsClassifier(n_neighbors=6)
knn_2.fit(X_train,y_train)
y_pred_knn_2 = knn_2.predict(X_test)
print metrics.accuracy_score(y_test,y_pred_knn)
knn_2.predict(X_new)
k_range = range(1,26)
scores = []
for k in k_range:
knn_k = KNeighborsClassifier(n_neighbors=k)
knn_k.fit(X_train, y_train)
y_pred_k = knn_k.predict(X_test)
scores.append(metrics.accuracy_score(y_test, y_pred_k))
plt.plot(k_range, scores)
plt.xlabel('K values')
plt.ylabel('Testing accuracy')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get epochs
Step2: Run beamformers and look at maximum outputs
Step3: We can also look at the spatial distribution
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import make_lcmv, apply_lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd)
forward = mne.convert_forward_solution(forward, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk',
rank=None)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk', rank=None)
evoked.plot(time_unit='s')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation, voxel: %i', 'Normal orientation, voxel: %i',
'Max-power orientation, voxel: %i']
colors = ['b', 'k', 'r']
fig, ax = plt.subplots(1)
max_voxs = list()
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
# compute unit-noise-gain beamformer with whitening of the leadfield and
# data (enabled by passing a noise covariance matrix)
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori=pick_ori,
weight_norm='unit-noise-gain', rank=None)
print(filters)
# apply this spatial filter to source-reconstruct the evoked data
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
# View activation time-series in maximum voxel at 100 ms:
time_idx = stc.time_as_index(0.1)
max_idx = np.argmax(np.abs(stc.data[:, time_idx]))
# we know these are all left hemi, so we can just use vertices[0]
max_voxs.append(stc.vertices[0][max_idx])
ax.plot(stc.times, stc.data[max_idx, :], color, label=desc % max_idx)
ax.set(xlabel='Time (ms)', ylabel='LCMV value',
title='LCMV in maximum voxel')
ax.legend(loc='lower right')
mne.viz.utils.plt_show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', views='lat', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s', smoothing_steps=5)
for color, vertex in zip(colors, max_voxs):
brain.add_foci([vertex], coords_as_verts=True, scale_factor=0.5,
hemi='lh', color=color)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we must import the data from CitiBike's website. The data accessed throught the 'Get the data' link at the bottom left corner of the following page http
Step2: We see that this data is has much more information than we need. For example, it includes total annual membership, which we do not need for this analysis. Thus, we have removed this column to prioritize the data that will most impact daily and monthly usership.
Step3: In order to manipulate and sort the data based on day of the week and month, we must convert the date information from a .csv format to python datetime format.
Step4: Now that Python recognizes the data in the Date column as calendar dates, we can add a column to classify each data point by day of the week and by month. This will ultimately allow us to compare usage on Monday vs. Tuesday, e.g., or January vs. February.
Step5: In order to get a sense for how much data we are working with, we need to pull the size and shape. This is relevant to see how many data points we have.
Step6: We now have all the useful data columns, but the index column needs to be replaced. We want to analyze this data by date, so we need to make the date column the index.
Step7: Next, we will retitle each column so that it's easier to understand what we're looking at.
Step8: To begin our analysis, we will add a column that shows the average mileage per trip each day. This can be done using a formula that divides the total miles for each day by the number of corresponding trips for each day, to derive an average trip length for each day.
Step9: To finalize the daily average comparisons, we need to create individual dataframes for each day of the week.
Step10: Now that we have individual dataframes for each day of the week, we can create larger dataframes for week days and weekends.
Step11: Now that we have these dataframes compiled, we can start to pull some insights. For instance, we can calculate the average number of miles a rider travels on a weekday vs. a weekend.
Step12: From this comparison, we can see that riders typically travel 50% farther on weekend trips than weekdays.
Step13: Based on the averages calculated above, we can plot how far riders travel on weekend rides vs. weekday rides.
Step14: Another interesting comparison is between months. We would like to examine and compare the total number of miles traveled by CitiBike users in January, February, and March. A higher number of miles traveled in a given month would indicate more rentals and/or more miles traveled per use. Either way, there is a trend toward heavier bike usage.
Step15: Based on the analysis, total miles traveled was actually highest in February, disproving our original hypothesis. One theory for why this may be the case is that riders are on vacation in the beginning of February and, therefore, are not commuting to work. Alternatively, blizzards and poor weather may have kept them on the train and in cabs, or working from home. Finally, it could be the case that February had more opportunities for bike rides (perhaps this was popular on Valentine’s Day weekend as couples sought out activities to do together), or CitiBike ran a promotion for part of the month to encourage bike rentals.
Step16: We now use the values above to create a dataframe that we can use to plot daily average miles by the day of the week.
Step17: The analysis shows a steady downward trend throughout the week – riders are much more likely to rent a CitiBike on Monday than on Thursday. This is fairly logical. It’s easy to imagine that riders are energized on Monday after a relaxing weekend, and tired, busy, and distracted later in the week. This means that they have more energy and are more willing to ride a bike to work at the beginning of the week. Only those who are going short distances want to ride a bike.
Step18: We already know that weekend riders travel much farther than weekday riders, but we anticipated that there would be some difference in Saturday vs. Sunday usage. Instead, data on average miles per trip was almost identical between the two days. Sunday is marginally higher, but it is fair to conclude that weekend travel distance is split evenly across Saturday and Sunday.
|
<ASSISTANT_TASK:>
Python Code:
import sys # system module
import pandas as pd # data package
import matplotlib as mpl # graphics package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np # foundation for pandas
import csv # package for converting csv
from collections import defaultdict # will be used to convert dates
import seaborn as sns # advanced graphics
import urllib.request # package to read url
%matplotlib inline
url = "data:application/octet-stream;charset=utf-8,Date%2CTrips%20over%20the%20past%2024-hours%20(midnight%20to%2011%3A59pm)%2CMiles%20traveled%20today%20(midnight%20to%2011%3A59%20pm)%2CTotal%20Annual%20Members%20(All%20Time)%2C24-Hour%20Passes%20Purchased%20(midnight%20to%2011%3A59%20pm)%2C3-Day%20Passes%20Purchased%20(midnight%20to%2011%3A59%20pm)%0A1%2F1%2F17%2C16009%2C50746%2C206623%2C1161%2C16%0A1%2F2%2F17%2C8918%2C21394%2C206672%2C127%2C10%0A1%2F3%2F17%2C14187%2C25058%2C206724%2C40%2C12%0A1%2F4%2F17%2C34006%2C69445%2C206774%2C448%2C23%0A1%2F5%2F17%2C28382%2C52401%2C206840%2C197%2C9%0A1%2F6%2F17%2C24173%2C48939%2C206873%2C152%2C11%0A1%2F7%2F17%2C4425%2C23556%2C206884%2C17%2C4%0A1%2F8%2F17%2C6416%2C22756%2C206897%2C21%2C2%0A1%2F9%2F17%2C15852%2C35069%2C206922%2C28%2C2%0A1%2F10%2F17%2C23218%2C46074%2C206943%2C61%2C10%0A1%2F11%2F17%2C32400%2C62766%2C206967%2C195%2C22%0A1%2F12%2F17%2C39766%2C84313%2C207003%2C435%2C22%0A1%2F13%2F17%2C33602%2C75661%2C207039%2C361%2C22%0A1%2F14%2F17%2C13819%2C43090%2C207067%2C227%2C21%0A1%2F15%2F17%2C17383%2C50738%2C207100%2C454%2C13%0A1%2F16%2F17%2C24106%2C51613%2C207141%2C410%2C23%0A1%2F17%2F17%2C18643%2C40692%2C207176%2C61%2C10%0A1%2F18%2F17%2C30847%2C57814%2C207202%2C134%2C12%0A1%2F19%2F17%2C36543%2C71690%2C207239%2C351%2C17%0A1%2F20%2F17%2C26736%2C55837%2C207263%2C172%2C14%0A1%2F21%2F17%2C26610%2C64662%2C207310%2C901%2C24%0A1%2F22%2F17%2C20523%2C49368%2C207340%2C500%2C12%0A1%2F23%2F17%2C15816%2C37551%2C207370%2C47%2C15%0A1%2F24%2F17%2C13165%2C35067%2C207385%2C24%2C3%0A1%2F25%2F17%2C36918%2C70769%2C207423%2C274%2C20%0A1%2F26%2F17%2C32991%2C63675%2C207470%2C205%2C19%0A1%2F27%2F17%2C32385%2C70104%2C207508%2C211%2C18%0A1%2F28%2F17%2C21300%2C53951%2C207534%2C381%2C14%0A1%2F29%2F17%2C22522%2C62773%2C207588%2C540%2C19%0A1%2F30%2F17%2C29607%2C59796%2C207641%2C157%2C23%0A1%2F31%2F17%2C25163%2C48787%2C207671%2C54%2C13%0A2%2F1%2F17%2C35198%2C77344%2C207712%2C180%2C11%0A2%2F2%2F17%2C34511%2C70595%2C207756%2C193%2C21%0A2%2F3%2F17%2C29622%2C64375%2C207784%2C169%2C25%0A2%2F4%2F17%2C19204%2C45120%2C207819%2C366%2C8%0A2%2F5%2F17%2C20059%2C50532%2C207845%2C310%2C15%0A2%2F6%2F17%2C33405%2C72546%2C207884%2C272%2C33%0A2%2F7%2F17%2C23047%2C57685%2C207914%2C48%2C13%0A2%2F8%2F17%2C40416%2C90384%2C207962%2C448%2C26%0A2%2F9%2F17%2C0%2C0%2C207969%2C0%2C0%0A2%2F10%2F17%2C1995%2C8308%2C207981%2C11%2C0%0A2%2F11%2F17%2C8958%2C26272%2C207988%2C159%2C3%0A2%2F12%2F17%2C5800%2C26468%2C208005%2C24%2C3%0A2%2F13%2F17%2C23851%2C58682%2C208028%2C68%2C14%0A2%2F14%2F17%2C29656%2C77900%2C208050%2C152%2C11%0A2%2F15%2F17%2C32046%2C72348%2C208087%2C151%2C23%0A2%2F16%2F17%2C29116%2C63479%2C208112%2C98%2C9%0A2%2F17%2F17%2C28730%2C67472%2C208148%2C222%2C27%0A2%2F18%2F17%2C30569%2C99111%2C208205%2C1896%2C79%0A2%2F19%2F17%2C36962%2C125000%2C208290%2C3450%2C49%0A2%2F20%2F17%2C32286%2C91400%2C208371%2C1355%2C46%0A2%2F21%2F17%2C32637%2C84708%2C208444%2C292%2C37%0A2%2F22%2F17%2C38111%2C90486%2C208519%2C399%2C25%0A2%2F23%2F17%2C42335%2C98392%2C208580%2C752%2C60%0A2%2F24%2F17%2C46946%2C117022%2C208685%2C1400%2C69%0A2%2F25%2F17%2C33342%2C104309%2C208803%2C1870%2C38%0A2%2F26%2F17%2C24713%2C75196%2C208880%2C771%2C21%0A2%2F27%2F17%2C35975%2C79961%2C208946%2C348%2C28%0A2%2F28%2F17%2C41719%2C92669%2C209027%2C443%2C43%0A3%2F1%2F2017%2C40487%2C85690%2C209136%2C363%2C33%0A3%2F2%2F2017%2C35627%2C73635%2C209214%2C287%2C24%0A3%2F3%2F2017%2C31042%2C68544%2C209274%2C261%2C29%0A3%2F4%2F2017%2C15645%2C43108%2C209307%2C230%2C10%0A3%2F5%2F2017%2C15919%2C50924%2C209353%2C275%2C12%0A3%2F6%2F2017%2C32456%2C72517%2C209438%2C301%2C25%0A3%2F7%2F2017%2C29605%2C63538%2C209486%2C166%2C21%0A3%2F8%2F2017%2C43339%2C101349%2C209554%2C621%2C36%0A3%2F9%2F2017%2C45070%2C99193%2C209625%2C688%2C22%0A3%2F10%2F2017%2C18394%2C41929%2C209654%2C79%2C13%0A3%2F11%2F2017%2C15185%2C48379%2C209676%2C154%2C21%0A3%2F12%2F2017%2C13437%2C38128%2C209693%2C149%2C5%0A3%2F13%2F2017%2C27343%2C58708%2C209721%2C180%2C11%0A3%2F14%2F2017%2C0%2C0%2C209738%2C0%2C0%0A3%2F15%2F2017%2C0%2C0%2C209757%2C0%2C0%0A3%2F16%2F2017%2C0%2C0%2C209769%2C0%2C0%0A3%2F17%2F2017%2C7096%2C25327%2C209791%2C132%2C12%0A3%2F18%2F2017%2C4105%2C17356%2C209813%2C79%2C6%0A3%2F19%2F2017%2C10550%2C33344%2C209845%2C304%2C10%0A3%2F20%2F2017%2C27285%2C60121%2C209898%2C266%2C19%0A3%2F21%2F2017%2C36732%2C77937%2C209966%2C420%2C36%0A3%2F22%2F2017%2C26805%2C55434%2C210014%2C120%2C8%0A3%2F23%2F2017%2C29881%2C74320%2C210053%2C246%2C17%0A3%2F24%2F2017%2C34009%2C82641%2C210094%2C436%2C27%0A3%2F25%2F2017%2C29645%2C86293%2C210161%2C1540%2C44%0A3%2F26%2F2017%2C19893%2C55139%2C210212%2C483%2C17%0A3%2F27%2F2017%2C26288%2C59584%2C210279%2C293%2C29%0A3%2F28%2F2017%2C21463%2C45798%2C210338%2C75%2C9%0A3%2F29%2F2017%2C42398%2C91378%2C210444%2C762%2C86%0A3%2F30%2F2017%2C39732%2C90917%2C210504%2C601%2C47%0A3%2F31%2F2017%2C6943%2C23459%2C210549%2C19%2C4"
data_file = urllib.request.urlopen(url) # this code allows python to access the information directly from the source website
CitiBike = pd.read_csv(data_file)
print ('Variable dtypes:\n', CitiBike.dtypes)
CitiBike.head()
CitiBike.drop(CitiBike.columns[[3,4,5]], axis = 1, inplace = True)
CitiBike.head()
CitiBike['Date'] = pd.to_datetime(CitiBike['Date'])
CitiBike.head ()
CitiBike.dtypes
CitiBike['Day of Week'] = CitiBike['Date'].dt.weekday_name
CitiBike.head()
CitiBike['Month'] = CitiBike['Date'].dt.month
CitiBike.head()
print ("The number of rows and columns are ", CitiBike.shape, "respectively")
CitiBike = CitiBike.set_index ('Date')
CitiBike.head()
titles = ['Total Trips', 'Total Miles', 'Day of Week', 'Month']
CitiBike.columns = titles
CitiBike.head()
CitiBike['Average Miles per Trip'] = CitiBike['Total Miles'] / CitiBike['Total Trips']
CitiBike.head()
CitiBike.shape
CitiBike [CitiBike['Day of Week'] == 'Sunday']
CitiBike [CitiBike['Day of Week'] == 'Monday']
CitiBike [CitiBike['Day of Week'] == 'Tuesday']
CitiBike [CitiBike['Day of Week'] == 'Wednesday']
CitiBike [CitiBike['Day of Week'] == 'Thursday']
CitiBike [CitiBike['Day of Week'] == 'Friday']
CitiBike [CitiBike['Day of Week'] == 'Saturday']
CitiBike [CitiBike['Month'] == 1]
CitiBike [CitiBike['Month'] == 2].head ()
Weekend_List = ['Saturday', 'Sunday']
Weekday_List = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
CitiBike [CitiBike ['Day of Week'].isin (Weekend_List)].head ()
CitiBike [CitiBike ['Day of Week'].isin (Weekday_List)].head ()
Weekend_Chart = CitiBike [CitiBike ['Day of Week'].isin (Weekend_List)]
Weekend_Average = Weekend_Chart[['Average Miles per Trip']].mean ()
Weekday_Chart = CitiBike [CitiBike ['Day of Week'].isin (Weekday_List)].head ()
Weekday_Average = Weekday_Chart[['Average Miles per Trip']].mean ()
print ("The average miles riders cover on the weekend are", Weekend_Average)
print ("The average miles riders cover on weekdays are", Weekday_Average)
Average_Mileage = pd.DataFrame ({'Weekdays' : Weekday_Average, 'Weekends' : Weekend_Average})
Average_Mileage = Average_Mileage [['Weekdays', 'Weekends']]
print (Average_Mileage)
fig, ax = plt.subplots(1)
Average_Mileage.plot(ax=ax, kind = 'bar', title = 'Average Miles on weekends vs. Weekdays Q1')
ax.legend(['Weekdays', 'Weekends'], loc = 'best')
ax.set_ylabel('Miles')
ax.set_ylim (0,3.5)
January_Miles = CitiBike [CitiBike['Month'] == 1]
January_Miles_Total = January_Miles [['Total Miles']].sum ()
February_Miles = CitiBike [CitiBike['Month'] == 2]
February_Miles_Total = February_Miles [['Total Miles']].sum ()
March_Miles = CitiBike [CitiBike['Month'] == 3]
March_Miles_Total = March_Miles [['Total Miles']].sum ()
print (January_Miles_Total)
print (February_Miles_Total)
print (March_Miles_Total)
Total_Mileage = pd.DataFrame ({'January' : January_Miles_Total,
'February' : February_Miles_Total,
'March' : March_Miles_Total})
Total_Mileage = Total_Mileage[['January', 'February', 'March']]
print (Total_Mileage)
fig, ax = plt.subplots(1)
Total_Mileage.plot(ax=ax, kind = 'bar', title = 'Total Miles Covered per Month Q1')
ax.legend(['JAN', 'FEB', 'MAR'], loc='best')
ax.set_xlabel('Month')
ax.set_ylabel('Total Miles')
ax.set_ylim (0,2100000)
# Monday
Monday_Data = CitiBike [CitiBike['Day of Week'] == 'Monday']
Monday_Miles = Monday_Data[['Average Miles per Trip']].mean ()
# Tuesday
Tuesday_Data = CitiBike [CitiBike['Day of Week'] == 'Tuesday']
Tuesday_Miles = Tuesday_Data[['Average Miles per Trip']].mean ()
# Wednesday
Wednesday_Data = CitiBike [CitiBike['Day of Week'] == 'Wednesday']
Wednesday_Miles = Wednesday_Data[['Average Miles per Trip']].mean ()
# Thursday
Thursday_Data = CitiBike [CitiBike['Day of Week'] == 'Thursday']
Thursday_Miles = Thursday_Data[['Average Miles per Trip']].mean ()
# Friday
Friday_Data = CitiBike [CitiBike['Day of Week'] == 'Friday']
Friday_Miles = Friday_Data[['Average Miles per Trip']].mean ()
# Saturday
Saturday_Data = CitiBike [CitiBike['Day of Week'] == 'Saturday']
Saturday_Miles = Saturday_Data[['Average Miles per Trip']].mean ()
# Sunday
Sunday_Data = CitiBike [CitiBike['Day of Week'] == 'Sunday']
Sunday_Miles = Sunday_Data[['Average Miles per Trip']].mean ()
print (Monday_Miles) # to confirm that code is working as intended and returning desired results
Weekday_Daily_Mileage = pd.DataFrame ({'Monday' : Monday_Miles,
'Tuesday' : Tuesday_Miles,
'Wednesday' : Wednesday_Miles,
'Thursday' : Thursday_Miles,
'Friday' : Friday_Miles})
Weekend_Daily_Mileage = pd.DataFrame ({'Saturday' : Saturday_Miles,
'Sunday' : Sunday_Miles})
Weekday_Daily_Mileage = Weekday_Daily_Mileage[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']]
Weekend_Daily_Mileage = Weekend_Daily_Mileage[['Saturday', 'Sunday']]
print (Weekday_Daily_Mileage)
print (Weekend_Daily_Mileage)
Weekday_Daily_Mileage.head ()
fig, ax = plt.subplots(1)
Weekday_Daily_Mileage.plot(ax=ax, kind = 'bar', title = 'Daily Weekday Average Miles per Rider Q1')
ax.legend(['MON', 'TUE', 'WED', 'THU', 'FRI'], loc='best')
ax.set_xlabel('Days of the week (Weekday)')
ax.set_ylabel('Average number of miles')
ax.set_ylim (0,3.0)
fig, ax = plt.subplots(1)
Weekend_Daily_Mileage.plot(ax=ax,
kind = 'barh',
title = 'Daily Weekend Average Miles per Rider Q1')
ax.legend(['SAT', 'SUN'], loc='best')
ax.set_ylabel('Day')
ax.set_xlabel('Average number of miles')
ax.set_xlim (0,3.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ejercicio Weigthed Netwroks
Step2: Imprima la matriz de adyasencia
|
<ASSISTANT_TASK:>
Python Code:
edges = set([(1,2), (2,3), (2,4), (2,5), (4,5), (4,6), (5,6), (4,7)])
def get_vecinos(nodo):
vecinos = set() #Se crea un conjunto vacio para vecinos
for f,t in edges:
if f == nodo:
vecinos.add(t)
if t == nodo:
vecinos.add(f)
return vecinos
vecinos = get_vecinos(2)
N = len(vecinos)
posibles_links_entre_vecinos = N*(N-1)/2
posibles_links_entre_vecinos
nodos = set()
for f,t in edges:
nodos.add(f)
nodos.add(t)
nodos
vecinos
(1,2) in edges
#Numero de enlaces reales entre los vecinos
def get_links_vecinos(vecinos):
enlaces_vecinos = 0
for v in vecinos:
for i in vecinos:
if (v,i) in edges:
enlaces_vecinos = enlaces_vecinos+1
return enlaces_vecinos
# obtener coeficiente de clustering
def get_clustering_coeficient(nodo):
numero_vecinos = get_vecinos(nodo)
enlaces_entre_vecinos = get_links_vecinos(numero_vecinos)
if len(numero_vecinos) == 1:
ci = 0
else:
ci = (2*enlaces_entre_vecinos)/(len(numero_vecinos)*(len(numero_vecinos)-1))
return ci
get_clustering_coeficient(2)
# Obtener coeficiente de clustering para todos los nodos
for j in nodos:
coeficiente = get_clustering_coeficient(j)
print("para el nodo: "+ str(j) + " El coeficiente de clustering es: " + str(coeficiente))
import matplotlib
%matplotlib inline
import networkx as nx
#edges = set([('a','b'), ('a','c'), ('a','d'), ('a','e'), ('a','f'), ('c','f'), ('b','h'), ('f','j'), ('f','g'), ('j','g'), ('g','k'), ('g','h'), ('k','h')])
G = nx.Graph()
G.add_weighted_edges_from([('a','b',0.3),('a','c',1),('a','d',0.9),('a','e',1),('a','f',0.4),('c','f',0.2),('c','f',0.2),('b','h',0.2),('f','j',0.8),('f','g',0.9),('j','g',0.6),('g','k',0.4),('g','h',0.2),('k','h',1)])
nx.draw_networkx(G)
G.size(weight='weight')
A = nx.adjacency_matrix(G)
print(A.todense())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We begin by defining a set of training points. This is the set which the classifier will use to infer the data classification function. Each row represents a data point, with x,y coordinates and classification value.
Step2: To understand the data set, we can plot the points from both classes (1 and 0). Points of class 1 are in black, and points from class 0 in red.
Step3: The SVC uses pandas data frames to represent data. The data frame is a convenient data structure for tabular data, which enables column labels.
Step4: We need to select the set of columns with the data features. In our example, those are the x and y coordinates.
Step5: We are now able to build and train the classifier.
Step6: The classifier is now trained with the fit points, and is ready to be evaluated with a set of test points, which have a similiar structure as the fit points
Step7: We separate the features and values to make clear were the data comes from.
Step8: We build the test points dataframe with the features.
Step9: We can add the values to the dataframe.
Step10: Right now we have a dataframe similar to the one with the fit points. We'll use the classifier to add a fourth column with the predicted values. Our goal is to have the same value in both real_value and predicted_value columns.
Step11: THe classifier is pretty successfull at predicting values from the x and ycoordinates. We may also apply the classifier to the fit points - it's somewhat pointless, because those are the points used to infer the data classification function.
Step12: To better understand the data separation between values 1 and 0, we'll plot both the fit points and the test points.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import svm
import pandas as pd
import pylab as pl
import seaborn as sns
%matplotlib inline
fit_points = [
[2,1,1],
[1,2,1],
[3,2,1],
[4,2,0],
[4,4,0],
[5,1,0]
]
sns.set(style="darkgrid")
pl.scatter([point[0] if point[2]==1 else None for point in fit_points],
[point[1] for point in fit_points],
color = 'black')
pl.scatter([point[0] if point[2]==0 else None for point in fit_points],
[point[1] for point in fit_points],
color = 'red')
pl.grid(True)
pl.show()
df_fit = pd.DataFrame(fit_points, columns=["x", "y", "value"])
print(df_fit)
train_cols = ["x", "y"]
clf = svm.SVC()
clf.fit(df_fit[train_cols], df_fit.value)
test_points = [
[5,3],
[4,5],
[2,5],
[2,3],
[1,1]
]
test_points_values = [0,0,0,1,1]
df_test = pd.DataFrame(test_points, columns=['x','y'])
print(df_test)
df_test['real_value'] = test_points_values
print(df_test)
df_test['predicted_value'] = clf.predict(test_points)
print(df_test)
df_fit[''] = clf.predict([x[0:2] for x in fit_points])
print(df_fit)
sns.set(style="darkgrid")
for i in range(0,2):
pl.scatter(df_fit[df_fit.value==i].x,
df_fit[df_fit.value==i].y,
color = 'black' if i == 1 else 'red')
pl.scatter(df_test[df_test.predicted_value==i].x,
df_test[df_test.predicted_value==i].y,
marker='o',
facecolor='none',
color='black' if i == 1 else 'red')
pl.grid(True)
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Réponse
Step2: Réponse
Step3: Réponse
Step4: Question 3
Step5: Question 4
Step6: Réponse
Step7: Question 5
Step8: Réponse
Step9: Expliquez comment prolonger $f$ par continuité aux points $x=0$ et $x=1$.
Step10: Le vecteur $w=(0, -6, -1, -8, 10)$ est-il dans ce sous-espace vectoriel?
|
<ASSISTANT_TASK:>
Python Code:
from sympy import factorint
factorint(6)
factorint(24480)
factorint(88305875025920)
from sympy import init_printing
init_printing(use_latex='mathjax')
from sympy import solve,N
from sympy.abc import x
racines = solve(x**3-3*x**2-5)
racines
for racine in racines:
print racine.n()
%matplotlib inline
from sympy.plotting import plot3d_parametric_surface
from sympy import cos,sin,tan,log,pi
from sympy.abc import u,v
plot3d_parametric_surface(cos(u)*sin(v), sin(u)*sin(v),cos(v)+log(tan(v/2))+u, (u,0,5*pi), (v,0.01,1))
from sympy.abc import x
p = -x**4+28*x**3-221*x**2+350*x+600
p
from sympy import diff,solve
p_optimum = solve(diff(p, x), x)
p_optimum
from sympy import N
map(N, p_optimum)
p_xx = diff(p, x, x)
[(a, p_xx.subs(x, a).n()) for a in sorted(p_optimum)]
from sympy import plot
plot(p, (x,-5, 20), ylim=(-1000,1000))
p_racines = solve(p, x)
p_racines
from sympy import integrate
aire = integrate(p, (x,-1,4)) + integrate(p, (x,10, 15))
aire
aire.n()
from sympy import oo
from sympy.abc import x
f = x**(x/(1-x))
f.subs(x,0), f.subs(x,1), f.subs(x,oo)
from sympy import limit
limit(f, x, 0)
limit(f, x, 1)
limit(f, x, oo)
v1=(-8, 1, -10, 1, 6)
v2=(-6, -10, 2, 10, -3)
v3=(-2, 8, 10, 1, 10)
v4=(-14, -9, -8, 11, 3)
v5=(-2, -3, 5, -8, -6)
from sympy import Matrix
M = Matrix([v1, v2, v3, v4, v5])
M
Mreduite, pivots = M.rref()
Mreduite
w = (0, -6, -1, -8, 10)
M = Matrix([v1, v2, v3, v4, v5, w])
M.transpose().rref()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: step 1. read in the data, create word dictionary, created one-hot vectors for each word
Step2: step 2. create tensorflow word2vec model
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
# load data
with open('darksouls_training.txt', 'r') as fh:
training = [sent.replace('.','').replace('\n', '').lower() for sent in fh.readlines()]
# with open('darksouls_test.txt', 'r') as fh:
# test = [sent.replace('.','').replace('\n', '').lower() for sent in fh.readlines()]
# create vocabulary
word_list = []
for sent in training:
for word in sent.split(' '):
word_list.append(word)
# for sent in test:
# for word in sent.split(' '):
# word_list.append(word)
voc = set(word_list)
# create one-hot vector for each word
word2int = {}
int2word = {}
for ind, word in enumerate(voc):
word2int[word] = ind
int2word[ind] = word
# split the sentences
sent_train = []
for sent in training:
sent_train.append(sent.split(' '))
# sent_test = []
# for sent in test:
# sent_test.append(sent.split(' '))
# create word pairs
data_train = []
WINDOW_SIZE = 5
for sentence in sent_train:
for ind, word in enumerate(sentence):
for nb_word in sentence[max(ind - WINDOW_SIZE, 0) : min(ind + WINDOW_SIZE, len(sentence)) + 1] :
if nb_word != word:
data_train.append([word, nb_word])
# convert to one-hot
def to_one_hot(data_point_index, vocab_size):
temp = np.zeros(vocab_size)
temp[data_point_index] = 1
return temp
data_train[0]
x_train = []
y_train = []
for word_pair in data_train:
x_train.append(to_one_hot(word2int[word_pair[0]], len(voc)))
y_train.append(to_one_hot(word2int[word_pair[1]], len(voc)))
x_train = np.asarray(x_train)
y_train = np.asarray(y_train)
x = tf.placeholder(dtype=tf.float32, shape=(None, len(voc)))
y_label = tf.placeholder(dtype=tf.float32, shape=(None, len(voc)))
# hidden layer
EMBEDDING_DIM = 5
W1 = tf.Variable(tf.random_normal([len(voc), EMBEDDING_DIM]))
b1 = tf.Variable(tf.random_normal([EMBEDDING_DIM]))
hidden_rep = tf.add(tf.matmul(x, W1), b1)
W2 = tf.Variable(tf.random_normal([EMBEDDING_DIM, len(voc), ]))
b2 = tf.Variable(tf.random_normal([len(voc)]))
pred = tf.nn.softmax(tf.add(tf.matmul(hidden_rep, W2), b2))
# run the model
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
#loss function
cross_entropy_loss = tf.reduce_mean(-tf.reduce_sum(y_label * tf.log(pred),
reduction_indices=1))
# training step
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy_loss)
# epoch number
n_epoch = 10000
for epoch in xrange(n_epoch):
sess.run(train_step,
feed_dict={x: x_train, y_label:y_train})
if epoch % 100 == 0:
print('epoch {}: loss is '.format(epoch), sess.run(cross_entropy_loss,
feed_dict={x: x_train, y_label: y_train}))
saver = tf.train.Saver()
saver.save(sess, 'dark_souls_word2vec_model.ckpt')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lm', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create/Open a SampleData dataset, and activate verbose mode
Step2: Copy dataset and get class instance synchronized with new dataset
Step3: Create dataset, overwrite existing file, and automatic removal of dataset files at class instance destruction (autodelete option)
Step4: Getting information on datasets
Step5: Command line tools to get information on dataset
Step6: Dataset interactive visualization
Step7: Basic data items
Step8: Group data items
Step9: Data item attributes (metadata)
Step10: Data arrays
Step11: String arrays
Step12: Structured arrays
Step13: Remove data items
Step14: Image Groups and Image fields
Step16: Creating image groups from image objects
Step18: Creating empty images
Step19: Get image object from Image Group
Step20: Creating and getting image Fields
Step21: Creating a field time serie
Step22: Mesh Groups and Mesh Fields
Step23: Creating a Mesh Group in a dataset
Step24: Creating and getting Mesh Fields
Step25: Getting Mesh objects
Step26: Data Compression
|
<ASSISTANT_TASK:>
Python Code:
from pymicro.core.samples import SampleData as SD
# CREATE dataset: the file `filename` must not exist. Verbose mode OFF
data = SD(filename='my_first_dataset', verbose=False)
# OPEN dataset: the file `filename` must exist. Verbose mode ON
data = SD(filename='my_first_dataset', verbose=True)
data = SD.copy_sample(src_sample_file='source_dataset', dst_sample_file='destination_dataset', get_object=True)
# Create new dataset and overwrite already existing dataset files
data = SD(filename='my_first_dataset', verbose=True, overwrite_hdf5=True)
# Create new dataset with autodelete option ON
data = SD(filename='my_first_dataset', verbose=True, autodelete=True)
# Set autodelete option on
data.autodelete = True
# Print dataset index
data.print_index() # --> no option = local root '/' and max depth 3
data.print_index(max_depth=2, local_root='local_root_name') # --> with specified local root and depth
# Print dataset content (list all groups, nodes with detailed information)
data.print_dataset_content() # detailed output, printed in standard output
data.print_dataset_content(short=True, to_file='dataset_information.txt') # short output, written in text file
# Print both index and dataset content in short version --> class string representation
print(data)
# Print only grid groups informations
data.print_grids_info()
# Print content of XDMF file
data.print_xdmf()
# Get the memory disk size of the HDF5 dataset
size, unit = data.get_file_disk_size(convert=True, print_flag=False)
# value not printed, returned in method output and converted to most readable memory unit
# recursive (-r) and detailed (-d) output of h5ls --> also print the content of the data arrays
!h5ls -rd ../data/test_sampledata_ref.h5
# h5dump
!ptdump -d ../data/test_sampledata_ref.h5
# detailed (-d) and verbose (-v) output of ptdump
!ptdump -dv ../data/test_sampledata_ref.h5
# Print information on data items content
data.print_node_info(nodename='node_name') # detailed information on a data item (group or array node)
data.print_group_content('group_name') # print information on group childrens
data.print_group_content('group_name', recursive=True) # print information on group childrens recursively
# get data item disk size
size, unit = data.get_node_disk_size(nodename='node_name', print_flag=False, convert=False)
# Visualize dataset organization and content with Vitables
data.pause_for_visualization(Vitables=True, Vitables_path='Path_to_Vitables_executable')
# Visualize spatially organized data with Paraview
data.pause_for_visualization(Paraview=True, Paraview_path='Path_to_Paraview_executable')
# Dictionary like access
data['data_item_name']
# Attribute like access
data.data_item_name
# generic getter method
data.get_node('data_item_name') # --> returns a Pytables Group or Node object
data.get_node('data_item_name', as_numpy=True) # --> for array data items, returns a numpy array
# Create a group in a dataset with name `group_name`, stored in the group `parent_name`
data.add_group(groupname='group_name', location='parent_name', indexname='group_indexname')
# Create a group and overwrite pre-existing group with the same name + get the created Pytables Group object
group = data.add_group(groupname='group_name', location='parent_name', indexname='group_indexname', replace=True)
# Add attributes from a python dictionary (metadata_dictionary)
data.add_attributes(metadata_dictionary, nodename='node_name')
# get data item attributes (metadata)
data.print_node_attributes(nodename='node_name') # print all attributes of note
attribute_value = data.get_attribute(attrname='attribute_name', nodename='node_name') # get value of one attribute
mesh_attrs = data.get_dic_from_attributes(nodename='node_name') # get all attributes as a dictionary
# set and get specific `description` attribute for node `node_name`
data.set_description(description="Write your description text here.", node='node_name')
data.get_description('node_name')
# add a numpy array `array` in data item `node_name`
data.add_data_array(location='parent_name', name='node_name', indexname='array_indexname', array=array)
# replace = True allows to overwrite preexisting field with same name
data.add_data_array(location='parent_name', name='node_name', indexname='array_indexname', array=array, replace= True)
# get data array from data item `node_name`
array_node = data.get_node('node_name') # --> returns a Pytables Node object
array = data.get_node('node_name', as_numpy=True) # --> returns a Numpy array
array = data.['node_name'] # --> returns a Numpy array
array = data.node_name # --> returns a Numpy array
# Add list of strings `List` as a string array `node_name`
data.add_string_array(name='node_name', location='parent_name', indexname='Sarray_indexname', data=List)
# get and decode binary strings stored in a String array
sarray = data['Sarray_indexname']
for string in sarray:
print(string.decode('utf-8'), end=' ') # prints each string of the String array
# Add structured array from Numpy structured array `structured_array` with Numpy.dtype `table_type`
data.add_table(name='node_name', location='parent_name', indexname='table_indexname', description=table_type,
data=structured_array)
# Add lines to a structured array node from Numpy array `structured_array` (same dtype as the table)
data.append_table(name='table_indexname', data=structured_array)
# Add columns to a structured array node from a Numpy array `structured_array` with Numpy.dtype ``
data.add_tablecols(tablename='table_indexname', description=cols_dtype, data=structured_array)
# Get structured array just like Data arrays
data.remove_node('node_name') # removes a group without childrens or a data array item
data.remove_node('group_name', recursive=True) # remove a Group data item and all its childrens recursively
# remove one or a list of attributes (metadata) from a node
data.remove_attribute(attrname='attribute_name', nodename='node_name')
data.remove_attributes(attr_list=['list','of','attributes','to','remove','from','node'], nodename='node_name')
# Create an Image Group from a Numpy array `field_array` interpreted as a pixel/voxel wise constant scalar field
data.add_image_from_field(field_array=field_array, fieldname='node_name', imagename='group_name',
indexname='image_indexname', location='parent_name',
description="Write image group description here.", origin=[0.,10.], spacing=[2.,2.])
# Create an Image Group from a Numpy array `field_array` interpreted as a node value scalar field
data.add_image_from_field(field_array=field_array, fieldname='node_name', imagename='group_name',
indexname='image_indexname', location='parent_name', is_elemField=False,
description="Write image group description here.", origin=[0.,10.], spacing=[2.,2.])
# Create an Image Group from a Numpy array `field_array` interpreted as a non scalar field
data.add_image_from_field(field_array=field_array, fieldname='node_name', imagename='group_name',
indexname='image_indexname', location='parent_name', is_scalar=False,
description="Write image group description here.", origin=[0.,10.], spacing=[2.,2.])
# Set image position and dimensions
data.set_voxel_size(image_group='image_indexname', voxel_size=np.array([4.,4.]))
data.set_origin(image_group='image_indexname', origin=np.array([10.,0.]))
# import BasicTools image object `ConstantRectilinearMesh`
from BasicTools.Containers.ConstantRectilinearMesh import ConstantRectilinearMesh
# Initialize image object, image dimension, origin and pixel/voxel size
image_object = ConstantRectilinearMesh(dim=3)
image_object.SetDimensions((50,50,3))
image_object.SetOrigin([0.,0.,0.])
image_object.SetSpacing([1.,1.,1.]) # pixel/voxel size in each dimension
# Create Image Group in dataset
data.add_image(image_object, imagename='group_name', indexname='image_indexname', location='parent_name',
description=Write image group description here.)
data.add_image(imagename='group_name', indexname='image_indexname', location='parent_name',
description=Write image group description here.)
# Get BasicTools image object from SampleData image group `group_name` including image group fields data arrays
im_object = data.get_image('group_name', with_fields=True)
# Creating a field for image group `group_name` from Numpy array `tensor_field`
data.add_field(gridname='group_name', fieldname='node_name', location='parent_name', indexname='field_indexname',
array=tensor_field, replace=True) # replace = True allows to overwrite preexisting field with same name
# Getting image fields
# --> field returned as Numpy array
field = data.get_field('node_name')
field = data.get_node('node_name', as_numpy=True)
field = data['node_name']
field = data.node_name
# --> field returned as a Pytables Node object
field = data.get_node('node_name')
instants = [1.,10., 100.]
# Add three temporal values for the field `node_name` in image group `group_name` for 3 different time values given in
# `instants` array. Field values are stored in *Numpy* arrays temporal_field_0, temporal_field_1, temporal_field_2
data.add_field(gridname='group_name', fieldname='node_name', location='parent_name', indexname='Field',
array=temporal_field_0, time=instants[0])
# instant 1
data.add_field(gridname='group_name', fieldname='node_name', location='parent_name', indexname='Field',
array=temporal_field_1, time=instants[1])
# instant 2
data.add_field(gridname='group_name', fieldname='node_name', location='parent_name', indexname='Field',
array=temporal_field_2, time=instants[2])
# Import Basictools mesh creation tools
import BasicTools.Containers.UnstructuredMeshCreationTools as UMCT
# Create a Node and Connectivity (elements) array, then create a mesh:
mesh = UMCT.CreateMeshOfTriangles(mesh_nodes, mesh_elements) # mesh of triangles
mesh = UMCT.CreateMeshOf(mesh_nodes, mesh_elements, elemName='tet4') # mesh of tetrahedra
# Create a mesh of a cube with tetrahedron elements
mesh = UMCT.CreateCube(dimensions=[5,5,5],spacing=[2.,2.,2.],ofTetras=True)
# adding node and element tags to the mesh
mesh.nodesTags.CreateTag('nodetag_name', False).SetIds(nodetag_Id_list) # Node tag
mesh.GetElementsOfType('tri3').GetTag('elemtag_name').SetIds(elemtag_Id_list) # Element tag ( of type `tri3`)
# adding fields
mesh.nodeFields['nodal_fieldname'] = nodal_field_array
mesh.elemFields['element_fieldname'] = elem_field_array
# Creating Mesh Group from Mesh object
# mesh is a Basictools mesh object. bin_fields_from_sets options allows to load node and element tags in mesh Group
data.add_mesh(mesh_object=mesh, meshname='meshname', indexname='mesh_indexname', location='mesh_parent',
bin_fields_from_sets=True)
# Creating Mesh group from file
data.add_mesh(file=meshfile_name, meshname='meshname', indexname='mesh_indexname', location='mesh_parent',
bin_fields_from_sets=True)
# creation of the mesh field
data.add_field(gridname='meshname', fieldname='fieldname', array=field_data_array, indexname='field_indexname')
# Creation of a field part of a time serie
data.add_field(gridname='meshname', fieldname='fieldname', array=field_data_array, indexname='field_indexname',
time)
# Force element field to be defined on boundary elements if the mesh has same number of bulk and boundary elements
data.add_field(gridname='meshname', fieldname='fieldname', array=field_data_array, indexname='field_indexname',
time)
# getting the inputed array --> no options
field_data_array = data.get_field('fieldname')
# getting the visualization array of an integration point field
field_data_array = data.get_field('fieldname', get_visualisation_field=True)
# getting the unpadded visualization array of an integration point field
field_data_array = data.get_field('fieldname', unpad_field=False, get_visualisation_field=True)
# Get a Basictools mesh object with all content of Mesh group 'meshname' (fields, tags, nodes, elements)
mesh = data.get_mesh('meshname')
# Get a Basictools mesh object without fields (tags, nodes, elements) from Mesh group 'meshname'
mesh = data.get_mesh('meshname', with_fields=False)
# Get a Basictools mesh object without fields and tags (just nodes, elements) from Mesh group 'meshname'
mesh = data.get_mesh('meshname', with_fields=False, with_tags=False)
# Set chunckshape and compression settings for one data item
compression_options = {'complib':'zlib', 'complevel':1, 'shuffle':True}
chunkshape = c_shap # tuple
data.set_chunkshape_and_compression(nodename='nodename_to_compress', compression_options=compression_options,
chunkshape=chunkshape)
# Set chunckshape and compression settings forseveral nodes
compression_options = {'complib':'zlib', 'complevel':1, 'shuffle':True}
data.set_nodes_compression_chunkshape(node_list=['nodename_to_compress1', 'nodename_to_compress2',...],
compression_options=compression_options,
chunkshape=chunkshape)
# Apply lossy compression
compression_options = {'complib':'zlib', 'complevel':1, 'shuffle':True, 'least_significant_digit':2}
data.set_chunkshape_and_compression(nodename='nodename_to_compress', compression_options=compression_options,
chunkshape=chunkshape)
# Apply lossy compression with normalization
compression_options = {'complib':'zlib', 'complevel':1, 'shuffle':True, 'least_significant_digit':2,
normalization='standard'}
data.set_chunkshape_and_compression(nodename='nodename_to_compress', compression_options=compression_options,
chunkshape=chunkshape)
# Apply lossy compression with per-component normalization
compression_options = {'complib':'zlib', 'complevel':1, 'shuffle':True, 'least_significant_digit':2,
normalization='standard-per-component'}
data.set_chunkshape_and_compression(nodename='nodename_to_compress', compression_options=compression_options,
chunkshape=chunkshape)
# Create an array with predefined chunkshape and compression settings
data.add_data_array(name='arrayname', indexname='array_indexname', location='parent_name', array=array,
chunkshape=chunkshape, compression_options=compression_options)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='red'>Python 3 Alert!</font>
Step2: We could also "cast" the type using a function that basically turns integers into floats. This function, unsurprisingly, is called float().
Step3: We will go over functions in much more detail later on in this course, so don't worry if you are confused by the syntax here. Consider this a sneak preview.
Step4: When you import division from the future you won't need to worry about classic division occurring anymore anywhere in your code!
Step5: Variable Assignments
Step6: Now if I call a in my Python script, Python will treat it as the number 5.
Step7: What happens on reassignment? Will Python let us write it over?
Step8: Yes! Python allows you to write over assigned variable names. We can also use the variables themselves when doing the reassignment. Here is an example of what I mean
Step9: The names you use when creating these labels need to follow a few rules
|
<ASSISTANT_TASK:>
Python Code:
# Addition
2+1
# Subtraction
2-1
# Multiplication
2*2
# Division
3/2
# Specifying one of the numbers as a float
3.0/2
# Works for either number
3/2.0
# We can use this float() function to cast integers as floats:
float(3)/2
from __future__ import division
3/2
# Powers
2**3
# Can also do roots this way
4**0.5
# Order of Operations followed in Python
2 + 10 * 10 + 3
# Can use parenthesis to specify orders
(2+10) * (10+3)
# Let's create an object called "a" and assign it the number 5
a = 5
# Adding the objects
a+a
# Reassignment
a = 10
# Check
a
# Check
a
# Use A to redefine A
a = a + a
# Check
a
# Use object names to keep better track of what's going on in your code!
my_income = 100
tax_rate = 0.1
my_taxes = my_income*tax_rate
# Show my taxes!
my_taxes
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The full mapping function can be used to perform either iterative or fragment-based mapping, or a combination of both.
Step2: And for the second side of the read
Step3: Fragment-based mapping
|
<ASSISTANT_TASK:>
Python Code:
from pytadbit.mapping.full_mapper import full_mapping
r_enz = 'HindIII'
! mkdir -p results/iterativ/$r_enz
! mkdir -p results/iterativ/$r_enz/01_mapping
# for the first side of the reads
full_mapping(gem_index_path='/media/storage/db/reference_genome/Homo_sapiens/hg38/hg38.gem',
out_map_dir='results/iterativ/{0}/01_mapping/mapped_{0}_r1/'.format(r_enz),
fastq_path='/media/storage/FASTQs/K562_%s_1.fastq' % (r_enz),
r_enz='hindIII', frag_map=False, clean=True, nthreads=20,
windows=((1,25),(1,30),(1,35),(1,40),(1,45),(1,50),(1,55),(1,60),(1,65),(1,70),(1,75)),
temp_dir='results/iterativ/{0}/01_mapping/mapped_{0}_r1_tmp/'.format(r_enz))
# for the second side of the reads
full_mapping(gem_index_path='/media/storage/db/reference_genome/Homo_sapiens/hg38/hg38.gem',
out_map_dir='results/iterativ/{0}/01_mapping/mapped_{0}_r2/'.format(r_enz),
fastq_path='/media/storage/FASTQs/K562_%s_2.fastq' % (r_enz),
r_enz=r_enz, frag_map=False, clean=True, nthreads=20,
windows=((1,25),(1,30),(1,35),(1,40),(1,45),(1,50),(1,55),(1,60),(1,65),(1,70),(1,75)),
temp_dir='results/iterativ/{0}/01_mapping/mapped_{0}_r2_tmp/'.format(r_enz))
! mkdir -p results/fragment/$r_enz
! mkdir -p results/fragment/$r_enz/01_mapping
# for the first side of the reads
full_mapping(gem_index_path='/media/storage/db/reference_genome/Homo_sapiens/hg38/hg38.gem',
out_map_dir='results/fragment/{0}/01_mapping/mapped_{0}_r1/'.format(r_enz),
fastq_path='/media/storage/FASTQs/K562_%s_1.fastq' % (r_enz),
r_enz=r_enz, frag_map=True, clean=True, nthreads=20,
temp_dir='results/fragment/{0}/01_mapping/mapped_{0}_r1_tmp/'.format(r_enz))
# for the second side of the reads
full_mapping(gem_index_path='/media/storage/db/reference_genome/Homo_sapiens/hg38/hg38.gem',
out_map_dir='results/fragment/{0}/01_mapping/mapped_{0}_r2/'.format(r_enz),
fastq_path='/media/storage/FASTQs/K562_%s_2.fastq' % (r_enz),
r_enz=r_enz, frag_map=True, clean=True, nthreads=20,
temp_dir='results/fragment/{0}/01_mapping/mapped_{0}_r2_tmp/'.format(r_enz))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two common-mid-point (CMP) gathers
Step2: Step 2
Step3: Step 3
Step4: Step 4
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from geoscilabs.seismic.NMOwidget import ViewWiggle, InteractClean, InteractNosiy, NMOstackthree
from SimPEG.utils import download
# Define path to required data files
synDataFilePath = 'http://github.com/geoscixyz/geosci-labs/raw/main/assets/seismic/syndata1.npy'
obsDataFilePath = 'https://github.com/geoscixyz/geosci-labs/raw/main/assets/seismic/obsdata1.npy'
timeFilePath= 'https://github.com/geoscixyz/geosci-labs/raw/main/assets/seismic/time1.npy'
# Download the data
synData = download(synDataFilePath,overwrite=True,verbose=False)
obsData = download(obsDataFilePath,overwrite=True,verbose=False)
timeData = download(timeFilePath,overwrite=True,verbose=False)
# Plot the data
ViewWiggle(synData, obsData)
# Fit hyperbola to clean data
clean = InteractClean(synData,timeData)
clean
noisy = InteractNosiy(obsData,timeData)
noisy
NMOstackthree(obsData, noisy.kwargs["t0"], noisy.kwargs["v"]-200., noisy.kwargs["v"], noisy.kwargs["v"]+200.,timeData)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data generation
Step2: First we set a number of parameters for the run.
Step3: Data analysis
Step4: Trajectory analysis and assignment
Step5: MSM
Step6: MSM
Step7: The first mode captured by $\lambda_1$ is significantly slower than the others. That mode, which is described by the right eigenvector $\psi^R_1$ as the transition between the two main minima. Next plots correspond to the eigenvector and its projection on the mueller potential.
Step8: HDBSCAN clustering
Step9: MSM validation
Step10: RATE matrix analysis
Step11: COUNT and TRANSITION matrices
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
import time
import numpy as np
import h5py
import scipy.stats
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks", color_codes=True, font_scale=1.5)
sns.set_style({"xtick.direction": "in", "ytick.direction": "in"})
from collections import OrderedDict
import mueller
import simtk.openmm as mm
from simtk.unit import kelvin, picosecond, femtosecond, nanometer, dalton
# each particle is totally independent, propagating under the same potential
mass = 1.0*dalton
temperature = 750 * kelvin
friction = 50/picosecond
timestep = 10.0*femtosecond
# Choose starting conformations uniform on the grid between (-1.5, -0.2) and (1.2, 2)
nParticles = 4
startingPositions = (np.random.rand(nParticles, 3)*np.array([2.7, 1.8, 1])) \
+ np.array([-1.5, -0.2, 0])
system = mm.System()
mullerforce = mueller.MullerForce()
for i in range(nParticles):
system.addParticle(mass)
mullerforce.addParticle(i, [])
system.addForce(mullerforce)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
context = mm.Context(system, integrator)
context.setPositions(startingPositions)
context.setVelocitiesToTemperature(temperature)
import cProfile, pstats, io
from pstats import SortKey
# Run the simulation
traj = []
for i in range(int(1e7)):
coords = context.getState(getPositions=True).getPositions(asNumpy=True).value_in_unit(nanometer)
integrator.step(50)
traj.append(coords)
for i in range(nParticles):
data = np.vstack([x[i] for x in traj])
h5file = "data/mueller_test_n%i.h5"%i
with h5py.File(h5file, "w") as hf:
hf.create_dataset("data", data=data)
import h5py
i = 0
h5file = "data/mueller_test_n%g.h5"%i
f = h5py.File(h5file, 'r')
data = np.array(f['data'])
f.close()
fig, ax = plt.subplots(figsize=(4,4))
mullerforce.plot(ax=ax, cmap='binary_r', levels=np.arange(-150,50,10))
for i in range(nParticles):
h5file = "data/mueller_test_n%g.h5"%i
f = h5py.File(h5file, 'r')
data = np.array(f['data'])
f.close()
ax.plot(data[:-1:10,0], data[:-1:10,1], lw=0.1)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.tight_layout()
def discretize(data):
# Discretize trajectory and plot onto the mueller potential
discret = np.zeros(data.shape[0])
statistic, x_edge, y_edge, binnumber = \
scipy.stats.binned_statistic_2d(data[:,0], data[:,1],\
discret, 'count', \
bins = [np.linspace(-1.5, 1, 6), np.linspace(-0.5, 2, 6)], \
expand_binnumbers=False)
return binnumber
data.shape
binned_trajs = []
fig, ax = plt.subplots(3,1, figsize=(8,4), sharex=True)
for i in range(nParticles):
h5file = "data/mueller_test_n%g.h5"%i
f = h5py.File(h5file, 'r')
data = np.array(f['data'])
f.close()
print (data.shape)
binnumber = discretize(data)
binned_trajs.append(list(binnumber))
ax[0].plot(data[:,0], lw=0.2)
ax[1].plot(data[:,1], lw=0.2)
ax[2].plot(binnumber, lw=0.2)
#ax[0].set_xlim(0, 500000)
ax[0].set_ylabel('x')
ax[0].set_ylabel('y')
plt.tight_layout()
data[:,1]
len(binnumber), len(data)
binnumber
# MASTER EQUATION MODEL
from mastermsm.trajectory import traj
trajectories = [traj.TimeSeries(distraj=binned_trajs[i], dt=1) \
for i in range(nParticles)]
[trajectories[i].find_keys() for i in range(nParticles)]
from mastermsm.msm import msm
msm_2D = msm.SuperMSM(trajectories,sym=False)
msm_2D.convergence_test(time=[1, 2, 5, 10, 20, 50], error=False)
fig, ax = plt.subplots()
for i in range(0,6):
tau_vs_lagt = np.array([[x, msm_2D.msms[x].tauT[i]] \
for x in sorted(msm_2D.msms.keys())])
ax.plot(tau_vs_lagt[:,0], tau_vs_lagt[:,1], marker='o', ms=7)
ax.set_xlabel(r'$\Delta$t ', fontsize=16)
ax.set_ylabel(r'$\tau_i$', fontsize=16)
ax.set_xscale('log')
ax.set_yscale('log')
ax.fill_between(10**np.arange(-1,4,0.2), 1e-1, 10**np.arange(-1,4,0.2), facecolor='lightgray', alpha=0.5)
ax.set_ylim(1e-1,1000)
ax.set_xlim(0.7,1400)
plt.tight_layout()
lagt=10
msm_2D.do_msm(lagt)
msm_2D.msms[lagt].do_trans(evecs=True)
# VALIDATION - AUTOCORRELATION FUNCTION (acf)
acf = msm_2D.msms[lagt].acf_mode()
time = np.arange(len(acf[1]))*msm_2D.data[0].dt
fig, ax = plt.subplots()
ax.plot(time, acf[1], 'o', label='ACF(left-eig)')
ax.plot(time,np.exp(-time*1./msm_2D.msms[lagt].tauT[0]), \
label='exp(-t/%5.3f)' % msm_2D.msms[lagt].tauT[0])
ax.set_ylim(0,1)
ax.set_xlim(0,1300)
ax.set_xlabel('Time')
ax.set_ylabel('C$_{11}$(t)')
ax.legend()
plt.show
# Spectrum of relaxation times
fig, ax = plt.subplots()
ax.plot(range(1,len(msm_2D.msms[lagt].tauT)+1), \
np.log(msm_2D.msms[lagt].tauT), 'x-', markersize=12, linewidth=4)
ax.set_xlabel('Eigenvalue')
ax.set_ylabel(r'log $\tau_i$')
plt.tight_layout()
plt.show
fig, ax = plt.subplots(figsize=(10,4))
ax.plot(msm_2D.msms[lagt].rvecsT[:,1])
ax.fill_between(range(len(msm_2D.msms[lagt].rvecsT[:,1])), 0, \
msm_2D.msms[lagt].rvecsT[:,1], \
where=msm_2D.msms[lagt].rvecsT[:,1]>0,\
facecolor='c', interpolate=True,alpha=.4)
ax.fill_between(range(len(msm_2D.msms[lagt].rvecsT[:,1])), 0, \
msm_2D.msms[lagt].rvecsT[:,1], \
where=msm_2D.msms[lagt].rvecsT[:,1]<0,\
facecolor='g', interpolate=True,alpha=.4)
ax.set_ylabel("$\Psi^R_1$")
plt.show
# 2D projection
binnumber2 = np.zeros(len(data[:,1]))
for i in range(len(binnumber)-1):
j = binnumber[i]
for k in range(len(msm_2D.msms[lagt].keep_keys)-1):
if j==msm_2D.msms[lagt].keep_keys[k]:
binnumber2[i] = np.log(msm_2D.msms[lagt].peqT[k])
break
y = np.linspace(-0.5,2.,200)
x = np.linspace(-2.,1.1,200)
M = np.zeros((len(x), len(y)))
lines = []
for i, x_i in enumerate(x):
for j, y_i in enumerate(y):
aux = fmueller(x_i,y_i)
if aux<100: M[i, j] = aux
X, Y = np.meshgrid(x,y)
fig, ax = plt.subplots()
levels = np.arange(-130.0, 40.0, 20)
CS = ax.contour(X, Y, M.T, levels)
ax.clabel(CS, inline=0.9, fontsize=6)
cmaps = OrderedDict()
cmaps['Diverging'] = ['PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']
plt.scatter(data[:,1],data[:,2],c=binnumber2,cmap=plt.get_cmap('seismic'))
plt.colorbar()
plt.title('log of equilibrium distribution')
plt.show
import hdbscan
X = data[:,[1,2]]
hb = hdbscan.HDBSCAN(min_cluster_size=200).fit(X)
labels = hb.labels_
for i, x_i in enumerate(labels):
if hb.probabilities_[i] < 0.1:
labels[i] = -1
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
colors = ['royalblue', 'maroon', 'forestgreen', 'mediumorchid', \
'tan', 'deeppink', 'olive', 'goldenrod', 'lightcyan', 'lightgray']
vectorizer = np.vectorize(lambda x: colors[x % len(colors)])
plt.scatter(X[:,0],X[:,1], c=vectorizer(labels))
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show
hb.condensed_tree_.plot()
plt.title('tree')
plt.show
# remove noise (label=-1) to obtain a trajectory for MasterMSM
last = labels[0]
for i, x_i in enumerate(labels):
if x_i == -1:
labels[i] = last
else:
last = x_i
labels[0] = labels[1]
cluster = traj.TimeSeries(distraj=list(labels), dt=1)
cluster.find_keys()
cluster.keys.sort()
msm_cluster = msm.SuperMSM([cluster],sym=True)
lagt=5 # lag time
msm_cluster.do_msm(lagt)
msm_cluster.msms[lagt].do_trans(evecs=True)
fig, ax = plt.subplots()
ax.plot(range(1,len(msm_cluster.msms[lagt].tauT)+1), \
np.log(msm_cluster.msms[lagt].tauT), 'o-', label='HDBSCAN')
# Note for msm_2D we chose lagt=10
ax.plot(range(1,len(msm_cluster.msms[lagt].tauT)+1), \
np.log(msm_2D.msms[10].tauT[:len(msm_cluster.msms[lagt].tauT)]), \
'x-', label='2D binning')
ax.set_xlabel('Eigenvalue')
ax.set_ylabel(r'log $\tau_i$')
plt.tight_layout()
plt.legend()
plt.title('log_mueller_relax_times.png')
plt.show
msm_cluster.do_lbrate()
msm_cluster.msms[lagt].do_rate(method='MLPB', evecs=True, init=msm_cluster.lbrate)
fig, ax = plt.subplots(2,1, figsize=(6,4), sharex=False)
plt.subplots_adjust(wspace=0.9, hspace=0.9)
ax[0].plot(msm_cluster.msms[lagt].rvecsK[:,1])
ax[0].fill_between(range(len(msm_cluster.msms[lagt].rvecsK[:,1])), \
0, msm_cluster.msms[lagt].rvecsK[:,1], alpha=.4)
ax[1].plot(msm_cluster.msms[lagt].lvecsK[:,1])
ax[1].axhline(0,0,25, c='k', ls='--', lw=1)
ax[1].fill_between(range(len(msm_cluster.msms[lagt].lvecsK[:,1])), \
msm_cluster.msms[lagt].lvecsK[:,1], 0, alpha=.4)
ax[0].set_ylabel("$\Psi^R_1$")
ax[1].set_ylabel("$\Psi^L_1$")
plt.tight_layout(h_pad=0)
ax[0].set_title('1st_eigenvectors_R_L.png')
plt.show
plt.figure()
plt.imshow(np.log10(msm_cluster.msms[lagt].count), \
interpolation='none', cmap='viridis_r', origin='lower')
plt.ylabel('$\it{j}$')
plt.xlabel('$\it{i}$')
plt.colorbar()
plt.title('Count matrix (log), $\mathbf{N}$')
plt.show
plt.imshow(np.log10(msm_cluster.msms[lagt].trans), interpolation='none', \
cmap='viridis_r', origin='lower')
plt.ylabel('$\it{j}$')
plt.xlabel('$\it{i}$')
plt.colorbar()
plt.title('Transition matrix (log), $\mathbf{T}$')
plt.show
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Neural Network Layers
Step2: The output can be treated like normalized probability when the input of output layer is calculated by probability.
Step3: Hidden Layers
Step4: To create a hidden layer object, there are several attributes need to be specified
Step5: This layer mapped input of size 4 to output of size 3.
Step6: Which can be deemed as a one-dimensional image with three channels.
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from deep_learning4e import *
from notebook4e import *
layer = OutputLayer(size=4)
example = [1,2,3,4]
print(layer.forward(example))
layer = InputLayer(size=3)
example = [1,2,3]
print(layer.forward(example))
s = sigmoid()
print("Sigmoid at 0:", s.f(0))
print("Deriavation of sigmoid at 0:", s.derivative(0))
layer = DenseLayer(in_size=4, out_size=3, activation=sigmoid())
example = [1,2,3,4]
print(layer.forward(example))
layer = ConvLayer1D(size=3, kernel_size=3)
example = [[1]*3 for _ in range(3)]
print(layer.forward(example))
layer = MaxPoolingLayer1D(size=3, kernel_size=3)
example = [[1,2,3,4], [2,3,4,1],[3,4,1,2]]
print(layer.forward(example))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Загрузка датасета digits с помощью функции load_digits из sklearn.datasets и подготовка матрицы признаков X и ответов на обучающей выборке y (потребуются поля data и target в объекте, который возвращает load_digits).
Step2: Для оценки качества далее нужно использовать cross_val_score из sklearn.cross_validation с параметром cv=10. Эта функция реализует k-fold cross validation c k равным значению параметра cv. Предлагается использовать k=10, чтобы полученные оценки качества имели небольшой разброс, и было проще проверить полученные ответы. На практике же часто хватает и k=5. Функция cross_val_score будет возвращать numpy.ndarray, в котором будет k чисел - качество в каждом из k экспериментов k-fold cross validation. Для получения среднего значения (которое и будет оценкой качества работы) вызовите метод .mean() у массива, который возвращает cross_val_score.
Step3: 2.
Step4: 3.
Step5: 4.
Step6: 5.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import cross_val_score, validation_curve
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
import numpy as np
import seaborn as sns
digits = load_digits()
print(digits.data.shape)
print(digits.DESCR)
plt.gray()
plt.plot(digits.images[0])
plt.show()
X = digits.data
y = digits.target
dt_classifier = DecisionTreeClassifier()
cvs = cross_val_score(dt_classifier, X, y, cv=10)
print(cvs)
print('Mean model quality value: ' + str(cvs.mean()))
with open("answer1.txt", "w") as fout:
fout.write(str(cvs.mean()))
bagging = BaggingClassifier(dt_classifier, n_estimators=100)
cvs = cross_val_score(bagging, X, y, cv=10)
print(cvs)
print('Mean model quality value: ' + str(cvs.mean()))
with open("answer2.txt", "w") as fout:
fout.write(str(cvs.mean()))
n_features = digits.data.shape[1]
bagging = BaggingClassifier(dt_classifier, n_estimators=100, max_features=int(np.sqrt(n_features)))
cvs = cross_val_score(bagging, X, y, cv=10)
print(cvs)
print('Mean model quality value: ' + str(cvs.mean()))
with open("answer3.txt", "w") as fout:
fout.write(str(cvs.mean()))
dt_classifier = DecisionTreeClassifier(max_features=int(np.sqrt(n_features)))
bagging = BaggingClassifier(dt_classifier, n_estimators=100)
cvs = cross_val_score(bagging, X, y, cv=10)
print(cvs)
print('Mean model quality value: ' + str(cvs.mean()))
with open("answer4.txt", "w") as fout:
fout.write(str(cvs.mean()))
rf_classifier = RandomForestClassifier()
bagging = BaggingClassifier(rf_classifier, n_estimators=100)
cvs = cross_val_score(bagging, X, y, cv=10)
print(cvs)
print('Mean model quality value: ' + str(cvs.mean()))
with open("answer5.txt", "w") as fout:
answer = str(2) + ' ' + str(3) + ' ' + str(4) + ' ' + str(7)
fout.write(answer)
param_range = np.array([10, 50, 100, 150])
train_scores, test_scores = validation_curve(bagging, X, y, param_name="n_estimators", param_range=param_range, cv=10, scoring="accuracy")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
print(train_scores_mean, test_scores_mean)
param_range = np.array([5, 10, 20, 40])
train_scores, test_scores = validation_curve(bagging, X, y, param_name="max_features", param_range=param_range, cv=10, scoring="accuracy")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
print(train_scores_mean, test_scores_mean)
param_range = np.array([5, 10, 50, 100])
train_scores, test_scores = validation_curve(bagging, X, y, param_name="base_estimator__max_depth", param_range=param_range, cv=10, scoring="accuracy")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
print(train_scores_mean, test_scores_mean)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading a NetCDF file into a dataset
Step2: Inspecting and selecting from dataset
Step3: Now if we are only interested in soil moisture at the upper depth at a specific time (don't forget that the time is in UTC unless the timezone is explicit), we can pull out just that one data point
Step4: And if we are only interested in the actual value rather than all the attributes
Step5: Concatenate files
Step6: Convert to pandas dataframe
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
Image(url='http://xray.readthedocs.org/en/latest/_images/dataset-diagram.png', embed=True, width=950, height=300)
import os
import posixpath # similar to os, but less dependant on operating system
import numpy as np
import pandas as pd
import xray
NETCDF_DIR = os.getcwd().replace('\\','/')+'/raw_netCDF_output/'
datafile = 'soil'
nc_file = os.listdir(NETCDF_DIR+datafile)[-1]
nc_path = posixpath.join(NETCDF_DIR, datafile, nc_file)
ds = xray.open_dataset(nc_path)
ds
ds.sel(site='Open_W').coords
print ds.VW_05cm_Avg.sel(site='Open_W', time='2015-06-02T06:10:00')
print ds.VW_05cm_Avg.sel(site='Open_W', time='2015-06-02T06:10:00').values
ds_dict = {}
nc_files = os.listdir(NETCDF_DIR+datafile)
for nc_file in nc_files:
nc_path = posixpath.join(NETCDF_DIR, datafile, nc_file)
ds = xray.open_dataset(nc_path)
date = nc_file.split('Tower_')[1].split('.')[0]
ds_dict.update({date: ds})
ds_dict.keys()
xray.concat(ds_dict.values(), dim='time')
df = ds.to_dataframe()
for i in range(len(df.index.levels)):
print 'df.index.levels[{i}]\n{index}\n'.format(i=i, index=df.index.levels[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic rich display
Step2: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
from IPython.display import HTML
from IPython.display import display
assert True # leave this to grade the import statements
Image(url='http://images.mentalfloss.com/sites/default/files/styles/insert_main_wide_image/public/einstein1_7.jpg', embed=True, width=600, height=600)
assert True # leave this to grade the image display
%%html
<caption>Quarks</caption>
<table>
<thead>
<tr>
<th>Name</th>
<th>Symbol</th>
<th>Antiparticle</th>
<th>Charge (e)</th>
<th>Mass ($MeV/c^2$)</th>
</tr>
</thead>
<tbody>
<tr>
<td>up</td>
<td>$u$</td>
<td>$\overline{u}$</td>
<td>$+\frac{2}{3}$</td>
<td>$1.5-3.3$</td>
</tr>
<tr>
<td>down</td>
<td>$d$</td>
<td>$\overline{d}$</td>
<td>$-\frac{1}{3}$</td>
<td>$3.5-6.0$</td>
</tr>
<tr>
<td>charm</td>
<td>$c$</td>
<td>$\overline{c}$</td>
<td>$+\frac{2}{3}$</td>
<td>$1,160-1,340$</td>
</tr>
<tr>
<td>strange</td>
<td>$s$</td>
<td>$\overline{s}$</td>
<td>$-\frac{1}{3}$</td>
<td>$70-130$</td>
</tr>
<tr>
<td>top</td>
<td>$t$</td>
<td>$\overline{t}$</td>
<td>$+\frac{2}{3}$</td>
<td>$169,100-173,300$</td>
</tr>
<tr>
<td>bottom</td>
<td>$b$</td>
<td>$\overline{b}$</td>
<td>$-\frac{1}{3}$</td>
<td>$4,130-4,370$</td>
</tr>
</tbody>
assert True # leave this here to grade the quark table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two challenges
Step2: Colin will provide more details about function, such as variable scope, and multiple return values.
|
<ASSISTANT_TASK:>
Python Code:
# Packages
from urllib import request
import os
import pandas as pd
# Constants used in analysis
TRIP_DATA = "https://data.seattle.gov/api/views/tw7j-dfaw/rows.csv?accessType=DOWNLOAD"
TRIP_FILE = "pronto_trips.csv"
WEATHER_DATA = "http://uwseds.github.io/data/pronto_weather.csv"
WEATHER_FILE = "pronto_weather.csv"
# Get the URL data
#request.urlretrieve(TRIP_DATA, TRIP_FILE)
!ls -lh
# Example function
def xyz(input): # The function's name is "func". It has one argument "input".
return int(input) + 1 # The function returns one value, input + 1
print (xyz("3"))
#a = xyz(3)
#print (xyz(a))
def addTwo(input1, input2):
return input1 + input2
#
addTwo(1, 2)
# Function to download from a URL
def download(url, filename):
print("Downloading", filename)
#request.urlretrieve(url, filename)
download(TRIP_DATA, TRIP_FILE)
# Enhancing function to detect file already present
import os.path
def download(url, filename):
if os.path.isfile(filename):
print("Already present %s." % filename)
else:
print("Downloading %s" % filename)
#request.urlretrieve(url, filename)
download(TRIP_DATA, "none.csv")
import download
download.download_file(TRIP_DATA, "none.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Similarly, the rate parameters can automatically be given exponential priors
Step2: PyMC includes most of the probability density functions (for continuous variables) and probability mass functions (for discrete variables) used in statistical modeling. Continuous variables are represented by a specialized subclass of Distribution called Continuous and discrete variables by the Discrete subclass.
Step3: As we previewed in the introduction, Distribution has a class method dist that returns a probability distribution of that type, without being wrapped in a PyMC random variable object. Sometimes we wish to use a particular statistical distribution, without using it as a variable in a model; for example, to generate random numbers from the distribution. This class method allows that.
Step5: Manual
Step6: A couple of things to notice
Step7: The ObservedRV Class
Step8: We have already seen manual instantiation, from the melanoma survial model where the exponential survival likelihood was implemented manually
Step9: so rate's value can be computed exactly from the values of its parents early_mean, late_mean and switchpoint.
Step10: Or, let's say we wanted to use the mean of the early_mean and late_mean variables somehere in our model
Step11: These are called anonymous variables because we did not wrap it with a call to Determinstic, which gives it a name as its first argument. We simply specified the variable as a Python (or, Theano) expression. This is therefore the simplest way to construct a determinstic variable. The only caveat is that the values generated by anonymous determinstics at every iteration of a MCMC algorithm, for example, are not recorded to the resulting trace. So, this approach is only appropriate for intermediate values in your model that you do not wish to obtain posterior estimates for, alongside the other variables in the model.
Step12: Factor Potentials
Step13: The function takes just a name as its first argument and an expression returning the appropriate log-probability as the second argument.
Step14: This parameterization would not be compatible with an observed stochastic, because the err term would become fixed in the likelihood and not be allowed to change during sampling.
Step15: Sampling with MCMC
Step16: If we are sampling more than one Markov chain from our model, it is often recommended to initialize each chain to different starting values, so that lack of convergence can be more easily detected (see Model Checking section).
Step17: This MultiTrace object is a data structure that stores the samples from an MCMC run in a tabular structure. By default, sample will create a new MultiTrace object that stores its samples in memory, as a NumPy ndarray. We can override the default behavior by specifying the trace argument. There are three options
Step18: We will look at the various database backends in greater detail in the next section.
Step19: Running $n$ iterations with $c$ chains will result in $n \times c$ samples.
Step20: If you want to specify different arguments for each chain, a list of argument values can be passed to sample as appropriate. For example, if we want to initialize random variables to particular (e.g. dispersed) values, we can pass a list of dictionaries to start
Step21: Generating several chains is generally recommended because it aids in model checking, allowing statistics such as the potential scale reduction factor ($\hat{R}$) and effective sample size to be calculated.
Step22: Setting the same seed for another run of the same model will generate the same sequence of samples
Step23: Step methods
Step method classes handle individual stochastic variables, or sometimes groups of them. They are responsible for making the variables they handle take single MCMC steps conditional on the rest of the model. Each PyMC step method (usually subclasses of ArrayStep) implements a method called astep(), which is called iteratively by sample.
Step24: There is rarely a reason to use HamiltonianMC rather than NUTS. It is the default sampler for continuous variables in PyMC3.
Step25: PyMC3 also includes an implementation of adaptive transitional Markov chain Monte Carlo (ATMCMC, Ching & Chen 2007), which we will not cover here. Consult the documentation for details.
Step26: To estimate these values in PyMC, we cast the data to a masked array. These are specialised NumPy arrays that contain a matching True or False value for each element to indicate if that value should be excluded from any computation. Masked arrays can be generated using NumPy's ma.masked_equal function
Step27: This masked array, in turn, can then be passed to one of PyMC's data stochastic variables, which recognizes the masked array and replaces the missing values with stochastic variables of the desired type. For the coal mining disasters problem, recall that disaster events were modeled as Poisson variates
Step28: Here, we have used the masked_array function, rather than
Step29: Generalized Linear Models
Step30: The model can then be very concisely specified in one line of code.
Step31: The error distribution, if not specified via the family argument, is assumed to be normal. In the case of logistic regression, this can be modified by passing in a Binomial family object.
Step32: Backends
Step33: The stored trace can then later be loaded using the load command
|
<ASSISTANT_TASK:>
Python Code:
import pymc3 as pm
with pm.Model() as disaster_model:
switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=110)
with disaster_model:
early_mean = pm.Exponential('early_mean', lam=1)
late_mean = pm.Exponential('late_mean', lam=1)
switchpoint.distribution.defaults
pm.Exponential.dist(1)
import numpy as np
with pm.Model():
def uniform_logp(value, lower=0, upper=111):
The switchpoint for the rate of disaster occurrence.
return pm.switch((value > upper) | (value < lower), -np.inf, -np.log(upper - lower + 1))
switchpoint = pm.DensityDist('switchpoint', logp=uniform_logp, dtype='int64')
switchpoint.logp({'switchpoint':4})
switchpoint.logp({'switchpoint': 44})
switchpoint.logp({'switchpoint':-1})
from pymc3.distributions import Continuous
import theano.tensor as tt
from theano import as_op
class Beta(Continuous):
def __init__(self, mu, *args, **kwargs):
super(Beta, self).__init__(*args, **kwargs)
self.mu = mu
self.mode = mu
def logp(self, value):
mu = self.mu
return beta_logp(value - mu)
@as_op(itypes=[tt.dscalar], otypes=[tt.dscalar])
def beta_logp(value):
return -1.5 * np.log(1 + (value)**2)
with pm.Model() as model:
beta = Beta('slope', mu=0, testval=0)
with disaster_model:
disasters = pm.Poisson('disasters', mu=3, observed=[3,4,1,2,0,2,2])
with disaster_model:
rate = pm.Deterministic('rate', pm.switch(switchpoint >= np.arange(112), early_mean, late_mean))
with disaster_model:
rate = pm.switch(switchpoint >= np.arange(112), early_mean, late_mean)
with disaster_model:
mean_of_means = (early_mean + late_mean)/2
with disaster_model:
rate = pm.Deterministic('rate', pm.switch(switchpoint >= np.arange(112), early_mean, late_mean))
disaster_model.named_vars
with disaster_model:
rate_constraint = pm.Potential('rate_constraint', pm.switch(pm.abs_(early_mean-late_mean)>1, -np.inf, 0))
y = np.array([15, 10, 16, 11, 9, 11, 10, 18, 11])
x = np.array([1, 2, 4, 5, 6, 8, 19, 18, 12])
with pm.Model() as arma_model:
sigma = pm.HalfCauchy('sigma', 5)
beta = pm.Normal('beta', 0, sd=2)
mu = pm.Normal('mu', 0, sd=10)
err = y - (mu + beta*x)
like = pm.Potential('like', pm.Normal.dist(0, sd=sigma).logp(err))
# Log dose in each group
log_dose = [-.86, -.3, -.05, .73]
# Sample size in each group
n = 5
# Outcomes
deaths = [0, 1, 3, 5]
## Write your answer here
from pymc3.examples.gelman_bioassay import model as bioassay_model
with bioassay_model:
start = pm.find_MAP()
start
with bioassay_model:
trace = pm.sample(100, step=pm.Metropolis(), start=start)
trace
with bioassay_model:
db_trace = pm.sample(100, trace='sqlite')
with bioassay_model:
ptrace = pm.sample(100, njobs=4)
ptrace['alpha'].shape
with bioassay_model:
ptrace = pm.sample(100, njobs=2, start=[{'alpha':-2}, {'alpha':2}])
[chain[:5] for chain in ptrace.get_values('alpha', combine=False)]
with bioassay_model:
rtrace = pm.sample(100, random_seed=42)
rtrace['beta', -5:]
with bioassay_model:
rtrace = pm.sample(100, random_seed=42)
rtrace['beta', -5:]
with bioassay_model:
trace_90 = pm.sample(100, step=pm.NUTS(target_accept=0.9))
%matplotlib inline
pm.traceplot(trace_90, varnames=['alpha']);
with bioassay_model:
trace_99 = pm.sample(100, step=pm.NUTS(target_accept=0.99))
pm.traceplot(trace_99, varnames=['alpha']);
%matplotlib inline
with bioassay_model:
slice_trace = pm.sample(5000, step=pm.Slice())
pm.traceplot(slice_trace[1000:], varnames=['alpha','beta']);
disasters_missing = np.array([ 4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, -999, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, -999, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
disasters_masked = np.ma.masked_values(disasters_missing, value=-999)
disasters_masked
with pm.Model() as missing_data_model:
# Prior for distribution of switchpoint location
switchpoint = pm.DiscreteUniform('switchpoint', lower=0, upper=len(disasters_masked))
# Priors for pre- and post-switch mean number of disasters
early_mean = pm.Exponential('early_mean', lam=1.)
late_mean = pm.Exponential('late_mean', lam=1.)
# Allocate appropriate Poisson rates to years before and after current
# switchpoint location
idx = np.arange(len(disasters_masked))
rate = pm.Deterministic('rate', pm.switch(switchpoint >= idx, early_mean, late_mean))
# Data likelihood
disasters = pm.Poisson('disasters', rate, observed=disasters_masked)
with missing_data_model:
trace_missing = pm.sample(2000)
missing_data_model.vars
from pymc3 import forestplot
pm.forestplot(trace_missing, varnames=['disasters_missing'])
import pandas as pd
# Generate data
size = 50
true_intercept = 1
true_slope = 2
x = np.linspace(0, 1, size)
y = true_intercept + x*true_slope + np.random.normal(scale=.5, size=size)
data = pd.DataFrame(dict(x=x, y=y))
from pymc3.glm import glm
with pm.Model() as model:
glm('y ~ x', data)
fit = pm.advi(n=50000)
fit[0]
from pymc3.glm.families import Binomial
df_logistic = pd.DataFrame({'x': x, 'y': y > np.median(y)})
with pm.Model() as model_glm_logistic:
glm('y ~ x', df_logistic, family=Binomial())
from pymc3.backends import SQLite
with pm.Model() as model_glm_logistic:
glm('y ~ x', df_logistic, family=Binomial())
backend = SQLite('trace.sqlite')
trace = pm.sample(2000, trace=backend)
from pymc3.backends.sqlite import load
with model_glm_logistic:
trace_loaded = load('trace.sqlite')
trace_loaded
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Array slicing
Step2: nD array slicing
Step3: Array dicing
Step4: Thus, you specify
Step5: Array broadcasting
Step6: Deep copy
Step7: Notice they are same obj in memory
Step8: Notice they are different objects in memory. Thus changing arr3_subset will not affect its source
Step9: Array searching
Step10: Get all numbers greater than 15
Step11: just the condition returns a boolean matrix of same dimension as the one being queried
Step12: Array operations
Step13: Similarly, you can add a scalar to an array and NumPy will broadcast that operation on all the elements.
Step14: Caveats
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
arr1 = np.random.randint(10,30, size=8)
arr1
arr2 = np.random.randint(20,200,size=50).reshape(5,10) #method chaining - numbers from 0 to 50
arr2
arr1[0]
arr1[3]
arr1[:3] #get the first 3 elements. Gets lower bounds inclusive, upper bound exclusive
arr1[2:] #lower bound inclusive
arr1[2:5] #get elements at index 2,3,4
arr2
arr2[0,0] #style 1 - you pass in a list of indices
arr2[0][0] #style 2 - parse it as list of lists - not so popular
arr2[1] # get a full row
#get the second column
arr2[:,1]
#get the 3rd row
arr2[2,:] #which is same as arr2[2]
#get the center 3,3 elements - columns 4,5,6 and rows 1,2,3
arr2[1:4, 4:7]
arr2
arr2_subset = arr2[1:4, 4:7]
arr2_subset
arr2_subset[:,:] = 999 #assign this entire numpy the same values
arr2_subset
arr2 #notice the 999 in the middle
arr2_subset_a = arr2_subset
arr2_subset_a is arr2_subset
arr3_subset = arr2_subset.copy()
arr3_subset
arr3_subset is arr2_subset
arr3_subset[:,:] = 0.1
arr2_subset
arr1
arr1[arr1 > 15]
arr1[arr1 > 12]
arr1 > 12
arr2[arr2 > 50] #looses the original shape as its impossible to keep the 2D shape
arr2[arr2 < 30]
arr_sum = arr1 + arr1
arr_sum
arr_cubed = arr1 ** 2
arr_cubed
arr_cubed - 100
arr_cubed[0] = 0
arr_cubed
arr_cubed / 0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating an initial query
Step2: With the above, we extract information of interest about the syllable, the word it is in, the utterance it is in, the
|
<ASSISTANT_TASK:>
Python Code:
from polyglotdb import CorpusContext
with CorpusContext('pg_tutorial') as c:
q = c.query_graph(c.syllable)
q = q.filter(c.syllable.stress == '1')
q = q.filter(c.syllable.begin == c.syllable.word.begin)
q = q.filter(c.syllable.word.end == c.syllable.word.utterance.end)
q = q.columns(c.syllable.label.column_name('syllable'),
c.syllable.duration.column_name('syllable_duration'),
c.syllable.word.label.column_name('word'),
c.syllable.word.begin.column_name('word_begin'),
c.syllable.word.end.column_name('word_end'),
c.syllable.word.num_syllables.column_name('word_num_syllables'),
c.syllable.word.stress_pattern.column_name('word_stress_pattern'),
c.syllable.word.utterance.speech_rate.column_name('utterance_speech_rate'),
c.syllable.speaker.name.column_name('speaker'),
c.syllable.speaker.gender.column_name('speaker_gender'),
c.syllable.discourse.name.column_name('file'),
)
q = q.limit(10)
results = q.all()
print(results)
export_path = '/mnt/e/pg_tutorial.csv'
with CorpusContext('pg_tutorial') as c:
q = c.query_graph(c.syllable)
q = q.filter(c.syllable.stress == 1)
q = q.filter(c.syllable.begin == c.syllable.word.begin)
q = q.filter(c.syllable.word.end == c.syllable.word.utterance.end)
q = q.columns(c.syllable.label.column_name('syllable'),
c.syllable.duration.column_name('syllable_duration'),
c.syllable.word.label.column_name('word'),
c.syllable.word.begin.column_name('word_begin'),
c.syllable.word.end.column_name('word_end'),
c.syllable.word.num_syllables.column_name('word_num_syllables'),
c.syllable.word.stress_pattern.column_name('word_stress_pattern'),
c.syllable.word.utterance.speech_rate.column_name('utterance_speech_rate'),
c.syllable.speaker.name.column_name('speaker'),
c.syllable.speaker.gender.column_name('speaker_gender'),
c.syllable.discourse.name.column_name('file'),
)
q.to_csv(export_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot the phantom data, lowpassed to get rid of high-frequency artifacts.
Step2: Now we can clean the data with OTP, lowpass, and plot. The flux jumps have
Step3: We can also look at the effect on single-trial phantom localization.
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import mne
import numpy as np
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
print(__doc__)
dipole_number = 1
data_path = bst_phantom_elekta.data_path()
raw = read_raw_fif(
op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif'))
raw.crop(40., 50.).load_data()
order = list(range(160, 170))
raw.copy().filter(0., 40.).plot(order=order, n_channels=10)
raw_clean = mne.preprocessing.oversampled_temporal_projection(raw)
raw_clean.filter(0., 40.)
raw_clean.plot(order=order, n_channels=10)
def compute_bias(raw):
events = find_events(raw, 'STI201', verbose=False)
events = events[1:] # first one has an artifact
tmin, tmax = -0.2, 0.1
epochs = mne.Epochs(raw, events, dipole_number, tmin, tmax,
baseline=(None, -0.01), preload=True, verbose=False)
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None,
verbose=False)
cov = mne.compute_covariance(epochs, tmax=0, method='oas',
rank=None, verbose=False)
idx = epochs.time_as_index(0.036)[0]
data = epochs.get_data()[:, :, idx].T
evoked = mne.EvokedArray(data, epochs.info, tmin=0.)
dip = fit_dipole(evoked, cov, sphere, n_jobs=1, verbose=False)[0]
actual_pos = mne.dipole.get_phantom_dipoles()[0][dipole_number - 1]
misses = 1000 * np.linalg.norm(dip.pos - actual_pos, axis=-1)
return misses
bias = compute_bias(raw)
print('Raw bias: %0.1fmm (worst: %0.1fmm)'
% (np.mean(bias), np.max(bias)))
bias_clean = compute_bias(raw_clean)
print('OTP bias: %0.1fmm (worst: %0.1fmm)'
% (np.mean(bias_clean), np.max(bias_clean),))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Vertex constants
Step12: Labeling constants
Step13: Hardware Accelerators
Step14: Container (Docker) image
Step15: Machine Type
Step16: Tutorial
Step17: Dataset
Step18: Now save the unique dataset identifier for the Dataset resource instance you created.
Step19: Data preparation
Step20: Quick peek at your data
Step21: Import data
Step22: Export dataset index
Step23: Train a model
Step24: Prepare your disk specification
Step25: Assemble a job specification
Step26: Examine the training package
Step27: Store training script on your Cloud Storage bucket
Step28: Train the model
Step29: Now get the unique identifier for the custom job you created.
Step30: Get information on a custom job
Step31: Deployment
Step32: Load the saved model
Step33: Evaluate the model
Step34: Perform the model evaluation
Step35: Upload the model for serving
Step36: Upload the model
Step37: Get Model resource information
Step38: Deploy the Model resource
Step39: Now get the unique identifier for the Endpoint resource you created.
Step40: Compute instance scaling
Step41: Deploy Model resource to the Endpoint resource
Step42: Make a online prediction request
Step43: Prepare the request content
Step44: Send the prediction request
Step45: Undeploy the Model resource
Step46: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Tabular Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml"
# Tabular Labeling type
LABEL_SCHEMA = (
"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml"
)
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["job"] = create_job_client()
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
TIMEOUT = 90
def create_dataset(name, schema, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
dataset = aip.Dataset(
display_name=name, metadata_schema_uri=schema, labels=labels
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("iris-" + TIMESTAMP, DATA_SCHEMA)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
IMPORT_FILE = "gs://cloud-samples-data/tables/iris_1000.csv"
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
def import_data(dataset, gcs_sources, schema):
config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}]
print("dataset:", dataset_id)
start_time = time.time()
try:
operation = clients["dataset"].import_data(
name=dataset_id, import_configs=config
)
print("Long running operation:", operation.operation.name)
result = operation.result()
print("result:", result)
print("time:", int(time.time() - start_time), "secs")
print("error:", operation.exception())
print("meta :", operation.metadata)
print(
"after: running:",
operation.running(),
"done:",
operation.done(),
"cancelled:",
operation.cancelled(),
)
return operation
except Exception as e:
print("exception:", e)
return None
import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA)
EXPORT_FILE = BUCKET_NAME + "/export"
def export_data(dataset_id, gcs_dest):
config = {"gcs_destination": {"output_uri_prefix": gcs_dest}}
start_time = time.time()
try:
operation = clients["dataset"].export_data(
name=dataset_id, export_config=config
)
print("Long running operation:", operation.operation.name)
result = operation.result()
print("result:", result)
print("time:", int(time.time() - start_time), "secs")
print("error:", operation.exception())
print("meta :", operation.metadata)
print(
"after: running:",
operation.running(),
"done:",
operation.done(),
"cancelled:",
operation.cancelled(),
)
return operation, result
except Exception as e:
print("exception:", e)
return None, None
_, result = export_data(dataset_id, EXPORT_FILE)
if TRAIN_GPU:
machine_spec = {
"machine_type": TRAIN_COMPUTE,
"accelerator_type": TRAIN_GPU,
"accelerator_count": TRAIN_NGPU,
}
else:
machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0}
DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard]
DISK_SIZE = 200 # GB
disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE}
JOB_NAME = "custom_job_" + TIMESTAMP
MODEL_DIR = "{}/{}".format(BUCKET_NAME, JOB_NAME)
if not TRAIN_NGPU or TRAIN_NGPU < 2:
TRAIN_STRATEGY = "single"
else:
TRAIN_STRATEGY = "mirror"
EPOCHS = 20
STEPS = 100
PARAM_FILE = BUCKET_NAME + "/params.txt"
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
"--distribute=" + TRAIN_STRATEGY,
]
worker_pool_spec = [
{
"replica_count": 1,
"machine_spec": machine_spec,
"disk_spec": disk_spec,
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [BUCKET_NAME + "/trainer_iris.tar.gz"],
"python_module": "trainer.task",
"args": CMDARGS,
},
}
]
if DIRECT:
job_spec = {"worker_pool_specs": worker_pool_spec}
else:
job_spec = {
"worker_pool_specs": worker_pool_spec,
"base_output_directory": {"output_uri_prefix": MODEL_DIR},
}
custom_job = {"display_name": JOB_NAME, "job_spec": job_spec}
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Iris tabular classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_iris.tar.gz
def create_custom_job(custom_job):
response = clients["job"].create_custom_job(parent=PARENT, custom_job=custom_job)
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = create_custom_job(custom_job)
# The full unique ID for the custom job
job_id = response.name
# The short numeric ID for the custom job
job_short_id = job_id.split("/")[-1]
print(job_id)
def get_custom_job(name, silent=False):
response = clients["job"].get_custom_job(name=name)
if silent:
return response
print("name:", response.name)
print("display_name:", response.display_name)
print("state:", response.state)
print("create_time:", response.create_time)
print("update_time:", response.update_time)
return response
response = get_custom_job(job_id)
while True:
response = get_custom_job(job_id, True)
if response.state != aip.JobState.JOB_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_path_to_deploy = None
if response.state == aip.JobState.JOB_STATE_FAILED:
break
else:
if not DIRECT:
MODEL_DIR = MODEL_DIR + "/model"
model_path_to_deploy = MODEL_DIR
print("Training Time:", response.update_time - response.create_time)
break
time.sleep(60)
print("model_to_deploy:", model_path_to_deploy)
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_DIR)
x_test = []
y_test = []
data_items = export_data_items[-10:]
for data_item in data_items:
data_item = json.loads(data_item)
print("FILE", data_item["imageGcsUri"])
with tf.io.gfile.GFile(data_item["imageGcsUri"], "rb") as f:
data = f.read()
image = tf.io.decode_image(data)
image = tf.image.resize(image, (128, 128))
image = (image.numpy() / 255.0).astype(np.float32)
cls = data_item["classificationAnnotation"]["displayName"]
label = cls2label[cls]
x_test.append(image)
y_test.append(label)
x_test = np.asarray(x_test)
y_test = np.asarray(y_test)
model.evaluate(x_test, y_test)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
IMAGE_URI = DEPLOY_IMAGE
def upload_model(display_name, image_uri, model_uri):
model = {
"display_name": display_name,
"metadata_schema_uri": "",
"artifact_uri": model_uri,
"container_spec": {
"image_uri": image_uri,
"command": [],
"args": [],
"env": [{"name": "env_name", "value": "env_value"}],
"ports": [{"container_port": 8080}],
"predict_route": "",
"health_route": "",
},
}
response = clients["model"].upload_model(parent=PARENT, model=model)
print("Long running operation:", response.operation.name)
upload_model_response = response.result(timeout=180)
print("upload_model_response")
print(" model:", upload_model_response.model)
return upload_model_response.model
model_to_deploy_id = upload_model("iris-" + TIMESTAMP, IMAGE_URI, model_path_to_deploy)
def get_model(name):
response = clients["model"].get_model(name=name)
print(response)
get_model(model_to_deploy_id)
ENDPOINT_NAME = "iris_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "iris_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"dedicated_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
"machine_spec": machine_spec,
},
"disable_container_logging": False,
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
# Last data item in exported dataset index
data_items = export_data_items[-1:]
data_item = json.loads(data_items[0])
image_path = data_item["imageGcsUri"]
print("IMAGE PATH", image_path)
! gsutil cp $image_path tmp.jpg
import base64
import cv2
test_image = cv2.imread("tmp.jpg", cv2.IMREAD_COLOR)
print("before:", test_image.shape)
test_image = cv2.resize(test_image, (128, 128))
print("after:", test_image.shape)
cv2.imwrite("tmp.jpg", test_image.astype(np.uint8))
# bytes = tf.io.read_file('tmp.jpg')
with open("tmp.jpg", "rb") as f:
bytes = f.read()
b64str = base64.b64encode(bytes).decode("utf-8")
def predict_data(data, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{serving_input: data.tolist()}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", prediction)
predict_data(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='cond_dens_def'></a>
Step2: The following simulates many $X, Y$ pairs. Note that unconditionally there are 9 possible values.
Step3: However, we only want to consider pairs for which $X+Y$ is 3. We accomplish this through conditioning as in the following code. Note
Step4: The previous code illustrates the basic functionality of conditioning. RVs and simulations with conditioning can be manipulated using the simulation tools just as those without conditioning. For example, to find and plot the conditional distribution of $X$ given $X+Y=3$
Step5: <a id='event'></a>
Step6: Example. Soccer teams X and Y score goals independently of each other, and the number of goals each team scores has a Poisson distribution with mean 1 for X and mean 2 for Y. Find the expected number of goals that team X scores given that they score at least as many goals as team Y.
Step7: <a id='combine'></a>
Step8: <a id='outcome'></a>
Step9: Note that FirstCard is an RV, but it takes non-numerical values ('clubs', etc). The following conditions on the FirstCard and SecondCard RVs taking the value 'hearts'.
Step10: <a id='continuous'></a>
Step11: We can also find the conditional joint distribution of $X$ and $Y$ given $Y=1$.
Step12: In this example, $Y$ and $Z$ are independent.
Step13: But $Y$ and $Z$ are not conditionally independent given $X = 2$.
Step14: <a id='conditional'></a>
Step15: Use the joint distribution to simulate the conditional distribution of $Y$ given $X=0.3$ (and compare to Binomial(10, 0.3)).
Step16: Simulate the conditional distribution of $X$ given $Y=3$.
|
<ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
X, Y = RV(Binomial(2, 0.5) ** 2)
(X & Y).sim(10000).tabulate()
((X & Y) | (X + Y == 3)).sim(10000).tabulate()
x_given_y3 = (X | (X + Y == 3)).sim(10000)
x_given_y3.tabulate(normalize=True)
x_given_y3.plot()
X = RV(Exponential(1))
(X - 5 | (X > 5)).sim(10000).plot()
RV(Exponential(1)).sim(10000).plot()
X, Y = RV(Poisson(1) * Poisson(2))
(X | (X >= Y)).sim(10000).mean()
X, Y = RV(Exponential(scale=0.5) ** 2)
(X | ((X < 2) & (X + Y > 2)) ).sim(10000).plot()
cards = ['clubs', 'diamonds', 'hearts', 'spades'] * 13 # 13 cards of each suit
FirstCard, SecondCard, ThirdCard = RV(BoxModel(cards, size=3, replace=False))
(FirstCard & SecondCard & ThirdCard | ((FirstCard == 'hearts') & (SecondCard == 'hearts')) ).sim(100000).tabulate()
mu = [1, 2, 3]
Sigma = [[1, 1, -2],
[1, 4, 0],
[-2, 0, 9]]
X, Y, Z = RV(MultivariateNormal(mean=mu, cov=Sigma))
(X | (abs(Y - 1) < 0.01)).sim(1000).plot()
(X & Z | (abs(Y - 1) < 0.01)).sim(1000).plot()
yz = (Y & Z).sim(10000)
yz.plot()
print('The correlation coefficient of Y and Z is approximately {:.3f}'.format(yz.corr()))
yz_given_x = (Y & Z | (abs(X - 2) < 0.01)).sim(1000)
yz_given_x.plot()
print('The correlation coefficient of Y and Z given X equals 2 is approximately {:.3f}'.format(yz_given_x.corr()))
def binomial_given_uniform():
x = Uniform(0,1).draw()
y = Binomial(10, x).draw()
return x, y
X, Y = RV(ProbabilitySpace(binomial_given_uniform))
(X & Y).sim(10000).plot(jitter=True, alpha=0.1)
(Y | (abs(X - 0.3) < 0.01) ).sim(10000).plot()
RV(Binomial(10, 0.3)).sim(10000).plot(jitter=True)
(X | (Y == 3) ).sim(10000).plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.sparse import csr_matrix
np.random.seed(10)
arr = np.random.randint(4,size=(988,988))
sA = csr_matrix(arr)
col = sA.getcol(0)
n = col.shape[0]
val = col.data
for i in range(n-len(val)):
val = np.append(val,0)
Median, Mode = np.median(val), np.argmax(np.bincount(val))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Cleaning
Step2: Random Forest
Step3: Random Forest Results
Step4: SVM
Step5: mean
Step6: Gradient Boosting
Step7: BEST PARAMS
Step8: Adaptive Boosting
|
<ASSISTANT_TASK:>
Python Code:
test.info()
train.describe()
# train.Cabin.str.split().str.get(-1).str[0]
# train.Cabin.str.split(expand=True)
# train.Ticket.str.split().str.get(0).str.extract
train.Ticket.str.split()[0:].str[0].head()
print train[train['Survived']==1]["Age"].mean(),
print train[train['Survived']==0]["Age"].mean(),
print test.Age.mean()
def clean_data(titanic):
titanic = titanic.copy()
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
titanic["Fare"] = titanic["Fare"].fillna(titanic["Fare"].median())
titanic['Cabin'] = titanic['Cabin'].str.split().str.get(-1).str[0]
titanic['Ticket'] = titanic.Ticket.str.split()[0:].str[0]
titanic.loc[titanic["Sex"] == "male", "Sex"] = -10
titanic.loc[titanic["Sex"] == "female", "Sex"] = 10
titanic["Embarked"] = titanic["Embarked"].fillna("S")
titanic['Title'] = titanic['Name'].apply(lambda x: x.split(',')[1].split()[0])
# d = {'Mr.':'Mr', 'Mrs.':'Mrs', 'Miss.':'Miss', 'Master.':'Master', 'Don.':'Mr', 'Rev.':'Mr', 'Dr.':'Dr', 'Mme.':'Mrs',
# 'Ms.':'Miss', 'Major.':'Mr', 'Lady.':'Miss', 'Sir.':'Mr', 'Mlle.':'Miss', 'Col.':'Mr', 'Capt.':'Mr', 'the':'Mr',
# 'Jonkheer.':'Mr', 'Dona.':'Mrs'}
d = {'Mr.':28, 'Mrs.':80, 'Miss.':50, 'Master.':28, 'Don.':40, 'Rev.':60, 'Dr.':60, 'Mme.':80,
'Ms.':50, 'Major.':60, 'Lady.':70, 'Sir.':40, 'Mlle.':50, 'Col.':60, 'Capt.':60, 'the':28,
'Jonkheer.':28, 'Dona.':70}
titanic['Title'].replace(d, inplace =True)
colnames = ['Embarked','Cabin','Ticket']
for colname in colnames:
titanic[colname] = pd.Categorical(titanic[colname]).codes
# # Grab all the features that can be included in a Random Forest Regressor
# age_titanic = titanic[['Age','Fare','Ticket','Pclass','Cabin','Title']]
# # Split into sets with known and unknown Age values
# knownAge = age_titanic.loc[ (titanic.Age.notnull()) ]
# unknownAge = age_titanic.loc[ (titanic.Age.isnull()) ]
# # All age values are stored in a target array
# y = knownAge.pop('Age').values
# # All the other values are stored in the feature array
# X = knownAge.values
# # Create and fit a model
# rtr = RandomForestRegressor(20)
# rtr.fit(X, y)
# # Use the fitted model to predict the missing values
# predictedAges = rtr.predict(unknownAge.values[:, 1::])
# # Assign those predictions to the full data set
# titanic.loc[ (titanic.Age.isnull()), 'Age' ] = predictedAges
# StandardScaler will subtract the mean from each value then scale to the unit variance
# scaler = StandardScaler()
# titanic['Age_scaled'] = scaler.fit_transform(titanic['Age'])
# titanic['Fare_scaled'] = scaler.fit_transform(titanic['Fare'])
titanic.Age = titanic.Age/titanic.Age.max()
titanic.Fare = titanic.Fare/titanic.Fare.max()
titanic['AgeSex'] = titanic.Age * titanic.Sex
titanic['AgeSexFare'] = titanic.Age * titanic.Sex * titanic.Fare
# titanic['TitlePclass'] = titanic.Title * titanic.Pclass
# titanic['CabinPclass'] = titanic.Cabin * titanic.Pclass
# titanic['PclassSq'] = titanic.Pclass ** 2
# titanic['SexFare'] = titanic.Sex * titanic.Fare
# titanic["FamilySize"] = titanic['Parch'] + titanic['SibSp']
# titanic.loc[(titanic["Sex"] == "female") , "Age"] = \
# titanic.loc[(titanic["Sex"] == "female") , "Age"].fillna(28.34)
# titanic.loc[(titanic["Sex"] == "male") , "Age"] = \
# titanic.loc[(titanic["Sex"] == "male") , "Age"].fillna(30.62)
# (titanic[titanic['Survived']==0]["Age"].mean())
# titanic.loc[titanic["Embarked"] == "S", "Embarked"] = 1
# titanic.loc[titanic["Embarked"] == "C", "Embarked"] = 2
# titanic.loc[titanic["Embarked"] == "Q", "Embarked"] = 3
titanic.drop(titanic[['Name',
# 'Ticket',
# 'Cabin',
# 'Age',
# 'Sex',
# 'Fare',
'SibSp',
'Parch',
# 'Title',
# 'Pclass',
]], axis = 1, inplace=True)
return titanic
df = clean_data(train)
df_train = df.copy()
df_train.drop('PassengerId', axis=1, inplace=True)
df_test = clean_data(test)
df.describe().T
df_train.info()
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
train[train['Survived']==1]["Age"].hist(bins=20, label='survived')
plt.title('Survived')
plt.subplot(1,2,2)
train[train['Survived']==0]["Age"].hist(bins=20)
plt.title('Did not survive')
df.head()
y = df_train.pop('Survived').values
X = df_train.values
X_test = df_test.values
rf = RandomForestClassifier(40, n_jobs=-1)
rf.fit(X,y)
feat_rank = np.argsort(rf.feature_importances_)[::-1]
feat_rank
df_train.columns[feat_rank]
df_features = pd.DataFrame(rf.feature_importances_,df_train.columns, columns = ['feature_value'])
df_features.sort_values('feature_value', ascending=False)
scores = np.zeros((feat_rank.shape[0],2))
for i in range(1,feat_rank.shape[0]+1):
features = [df_train.columns[feat_rank][x] for x in range(i)]
scores[i-1:] = (i,(cross_val_score(rf, df[features], df['Survived'], cv=10)).mean())
scores
plt.plot(scores[:,:1],scores[:,1:2])
cross_val_score(rf, df[features], df['Survived'], cv=10).mean()
importances = rf.feature_importances_
std = np.std([tree.feature_importances_ for tree in rf.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure(figsize=(12,5))
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), df_train.columns[indices])
plt.xlim([-1, X.shape[1]])
plt.show()
features = [df_train.columns[feat_rank][x] for x in range(9)]
features
# features = [df_train.columns[indices][x] for x in range(9)]
# features
X = df_train[features].values
X
def create_submission(model, train, test, features, filename):
# model.fit(train[features], train['Survived'])
predictions = model.predict(test[features])
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": predictions
})
submission.to_csv(filename, index=False)
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# build a classifier
clf = RandomForestClassifier()
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 6),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
'n_estimators': [10, 40, 50, 60],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search, n_jobs=-1)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {'max_depth': [1, 2, 4, None],
'max_features': ['sqrt', 'log2', None],
'min_samples_split': [1, 2, 6, 8, 10],
'min_samples_leaf': [1, 2, 4, 6],
'bootstrap': [True, False],
'n_estimators': [30, 40, 50, 60, 100],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid, n_jobs=-1)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
grid_search.best_estimator_
create_submission(grid_search.best_estimator_,
df, df_test, features, "../submissions/rf_submission.csv")
trees_accuracy = []
for i in xrange(1,X.shape[1]):
rf = RandomForestClassifier(50, max_features = i, min_samples_split=4, min_samples_leaf=2)
rf.fit(X, y)
trees_accuracy.append(rf.score(X,y))
plt.plot(range(1, X.shape[1]), trees_accuracy, '-o')
pipeline = Pipeline([('scaler', StandardScaler()),
('svc', SVC(kernel='linear'))])
pipeline.fit(X, y)
parameters = {'kernel':['linear','rbf'],
'C':np.linspace(.001,10,5),'degree':np.linspace(0,10,5)}
gsCV = GridSearchCV(estimator=pipeline.steps[1][1],
param_grid=parameters,scoring='accuracy', cv=5)
X = pipeline.steps[0][1].fit_transform(X)
gsCV.fit(X,y)
gsCV.grid_scores_, gsCV.best_params_
def svm_submission(model, train, test, features, filename):
model.fit(train[features], train['Survived'])
predictions = model.predict(test[features])
submission = pd.DataFrame({
"PassengerId": test["PassengerId"],
"Survived": predictions
})
submission.to_csv(filename, index=False)
svm_features = [df_train.columns[feat_rank][x] for x in range(8)]
svm_features
create_submission(Pipeline([('scaler', StandardScaler()),
('svc', SVC(kernel='rbf', C=2.5, degree=2.5))]), \
df, df_test, svm_features, "../submissions/svm_submission.csv")
X = df_train
X.head()
gdb = GradientBoostingClassifier(
n_estimators=3000,
learning_rate = 0.01,
max_depth = 4,
max_features = 0.1,
min_samples_leaf = 17)
gdb.fit(X,y)
feat_rank = np.argsort(gdb.feature_importances_)[::-1]
feat_rank
df_train.columns[feat_rank]
boost_features = [df_train.columns[feat_rank][x] for x in range(8)]
boost_features
df_train[boost_features].head()
X = df_train[boost_features]
X.head()
param_grid = {'learning_rate': [0.1, 0.05, 0.02, 0.01],
'max_depth': [4, 6],
'min_samples_leaf': [3, 5, 9, 17],
'max_features': [1.0, 0.3, 0.1]}
gdb_grid = GradientBoostingClassifier(n_estimators=6000)
gs_cv = GridSearchCV(gdb_grid, param_grid).fit(X,y)
gs_cv.best_params_
gs_cv.grid_scores_
create_submission(GradientBoostingClassifier(
n_estimators=3000,
learning_rate = 0.01,
max_depth = 4,
max_features = 0.1,
min_samples_leaf = 9),
df, df_test, boost_features, "../submissions/gdboost_submission.csv")
X = df_train
X.head()
ada = AdaBoostClassifier(
n_estimators=3000,
learning_rate = 0.01)
ada.fit(X,y)
feat_rank = np.argsort(ada.feature_importances_)[::-1]
ada_features = [df_train.columns[feat_rank][x] for x in range(6)]
ada_features
X = df_train[ada_features]
X.head()
param_grid = {'learning_rate': [1, 0.1, 0.05, 0.02, 0.01]}
ada_grid = AdaBoostClassifier(n_estimators=6000)
ada_cv = GridSearchCV(ada_grid, param_grid).fit(X,y)
ada_cv.best_params_
create_submission(AdaBoostClassifier(
n_estimators=3000,
learning_rate = 0.01),
df, df_test, ada_features, "../submissions/adaboost_submission.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Everything is setup and we are ready to apply some transformations but first, as a sanity check we ensure that dark skin properly converts back to CIE XYZ tristimulus values
Step2: We multiply dark skin colour by green $n$ times and scale the result to obtain displayable values
Step3: The CIE XYZ colourspace tristimulus values are not equal!
Step4: The chromaticity coordinates are also out of the boundaries of their respective colourspaces
Step5: Finally, we mathematically quantify the difference by calculating the colour difference $\Delta_{E_{ab}}$
Step6: This more than two times the just noticable threshold ($JND=2.3$).
Step7: We can then compute the colour difference $\Delta_{E_{ab}}$ for each sample
Step8: As you can see, depending the samples colour difference $\Delta_{E_{ab}}$ can reach dramatic values. The average colour difference on the whole colour rendition chart is as follows
Step9: With Rec. 2020...
Step10: We again compute the colour difference $\Delta_E$
Step11: Finally, we compute the average colour difference
Step12: Analytical Explanation
Step13: They match each other very well, which is the expected and correct behavior, now we re-plot the functions while only incrementing the $R$ component
Step14: The slopes are obviously very different now, ACES RGB is steeper than sRGB. This is the result of the respective colourspaces basis vectors not being orthogonal. In this case it leads to colours approaching the spectral locus boundaries in ACES RGB faster than sRGB. It is the reason why in some instances secondary bounces quickly lead to very saturated colours.
Step15: Conclusion
Step16: While most of this is tied to basic linear algebra, it has been overlooked by most of VFX industry veterans. We were assuming wrongly that rendering engines are colourspace agnostic.
Step17: Unfortunately the same code was also used for the tristimulus values conversion post-multiplication operation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pylab
import colour
from colour.utilities.verbose import message_box
name, data, illuminant = colour.COLOURCHECKERS['ColorChecker 2005']
sRGB_w = colour.sRGB_COLOURSPACE.whitepoint
sRGB_XYZ_to_RGB = colour.sRGB_COLOURSPACE.XYZ_to_RGB_matrix
sRGB_RGB_to_XYZ = colour.sRGB_COLOURSPACE.RGB_to_XYZ_matrix
adobe98_w = colour.ADOBE_RGB_1998_COLOURSPACE.whitepoint
adobe98_XYZ_to_RGB = colour.ADOBE_RGB_1998_COLOURSPACE.XYZ_to_RGB_matrix
adobe98_RGB_to_XYZ = colour.ADOBE_RGB_1998_COLOURSPACE.RGB_to_XYZ_matrix
# Preparing *dark skin* colour in various colourspaces.
index, name, x, y, Y = data[0]
XYZ_r1 = colour.xyY_to_XYZ((x, y, Y))
# *sRGB* colourspace 0-255 values with OETF applied.
sRGB_rd1 = colour.XYZ_to_sRGB(XYZ_r1, illuminant)
# *sRGB* colourspace linear values.
sRGB_r1 = colour.XYZ_to_RGB(XYZ_r1,
illuminant,
sRGB_w,
sRGB_XYZ_to_RGB)
# *Adobe RGB 1998* colourspace linear values.
adobe98_r1 = colour.XYZ_to_RGB(XYZ_r1,
illuminant,
adobe98_w,
adobe98_XYZ_to_RGB)
message_box(('Reference "dark skin" "CIE XYZ" colourspace tristimulus '
'values:\n'
'\t{0}\n'
'\n"sRGB" colourspace values (OETF):\n'
'\n\t{1}\n'
'\n"sRGB" and "Adobe RGB 1998" colourspaces (Linear):\n'
'\tsRGB: {2}\n\tAdobe RGB 1998: {3}').format(
XYZ_r1,
np.around(sRGB_rd1 * 255),
sRGB_r1,
adobe98_r1))
# Preparing *green* colour in various colourspaces.
index, name, x, y, Y = data[13]
XYZ_r2 = colour.xyY_to_XYZ((x, y, Y))
# *sRGB* colourspace 0-255 values with OETF applied.
sRGB_rd2 = colour.XYZ_to_sRGB(XYZ_r2, illuminant)
# *sRGB* colourspace linear values.
sRGB_r2 = colour.XYZ_to_RGB(XYZ_r2,
illuminant,
sRGB_w,
sRGB_XYZ_to_RGB)
# *Adobe RGB 1998* colourspace linear values.
adobe98_r2 = colour.XYZ_to_RGB(XYZ_r2,
illuminant,
adobe98_w,
adobe98_XYZ_to_RGB)
message_box(('Reference "green" "CIE XYZ" colourspace tristimulus '
'values:\n'
'\t{0}\n'
'\n"sRGB" colourspace values (OETF):\n'
'\t{1}\n'
'\n"sRGB" and "Adobe RGB 1998" colourspaces (Linear):\n'
'\tsRGB: {2}\n\tAdobe RGB 1998: {3}').format(
XYZ_r2,
np.around(sRGB_rd2 * 255),
sRGB_r2,
adobe98_r2))
XYZ_sRGB1 = colour.RGB_to_XYZ(sRGB_r1,
sRGB_w,
illuminant,
sRGB_RGB_to_XYZ)
XYZ_adobe981 = colour.RGB_to_XYZ(adobe98_r1,
adobe98_w,
illuminant,
adobe98_RGB_to_XYZ)
message_box(('Converting back "dark skin" "CIE XYZ" colourspace '
'tristimulus values from "sRGB" and "Adobe RGB 1998" '
'colourspaces:\n'
'\tFrom sRGB: {0}\n\tFrom Adobe RGB 1998: {1}\n'
'\nEverything looks fine!').format(
XYZ_sRGB1, XYZ_adobe981))
k = np.array([500, 500, 500])
sRGB_k = colour.XYZ_to_RGB(k,
illuminant,
sRGB_w,
sRGB_XYZ_to_RGB)
adobe98_k = colour.XYZ_to_RGB(k,
illuminant,
adobe98_w,
adobe98_XYZ_to_RGB)
sRGB_m = sRGB_r1 * sRGB_r2 * sRGB_r2 * sRGB_r2 * sRGB_k
adobe98_m = adobe98_r1 * adobe98_r2 * adobe98_r2 * adobe98_r2 * adobe98_k
XYZ_sRGB_m1 = colour.RGB_to_XYZ(sRGB_m,
sRGB_w,
sRGB_w,
sRGB_RGB_to_XYZ)
XYZ_adobe98_m1 = colour.RGB_to_XYZ(adobe98_m,
adobe98_w,
adobe98_w,
adobe98_RGB_to_XYZ)
message_box(('Multiplying "dark skin" with "green" and converting back to '
'"CIE XYZ" colourspace tristimulus values from "sRGB" and '
'"Adobe RGB 1998" colourspaces:\n'
'\tFrom sRGB: {0}\n\tFrom Adobe RGB 1998: {1}\n'
'\nHouston? We have a problem!').format(
XYZ_sRGB_m1, XYZ_adobe98_m1))
from colour.plotting import *
sRGB_m1 = colour.XYZ_to_sRGB(XYZ_sRGB_m1)
adobe98_m1 = colour.XYZ_to_sRGB(XYZ_adobe98_m1)
sRGB_difference = sRGB_m1 - adobe98_m1
# The difference is producing negative values thus we clip the result, although the
# remaining data should illustrate the effect.
sRGB_difference = np.clip(sRGB_difference, 0, 1)
single_colour_plot(colour_parameter('sRGB - Adobe RGB 1998', sRGB_difference), text_size=24)
colourspaces_CIE_1931_chromaticity_diagram_plot(
['sRGB', 'Adobe RGB 1998'],
standalone=False,
title='"dark skin" Colour Computation')
for name, XYZ in (('"dark skin"', XYZ_r1),
('"dark skin" * "green" - sRGB', XYZ_sRGB_m1),
('"dark skin" * "green" - Adobe RGB 1998', XYZ_adobe98_m1)):
xy = colour.XYZ_to_xy(XYZ)
pylab.plot(xy[0], xy[1], 'o', color='white')
pylab.annotate(name,
xy=xy,
xytext=(50, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3, rad=0.2'))
display(standalone=True)
Lab1 = colour.XYZ_to_Lab(XYZ_sRGB_m1, illuminant)
Lab2 = colour.XYZ_to_Lab(XYZ_adobe98_m1, illuminant)
print(colour.delta_E_CIE2000(Lab1, Lab2))
from pprint import pprint
aces_w = colour.ACES_RGB_COLOURSPACE.whitepoint
aces_XYZ_to_RGB = colour.ACES_RGB_COLOURSPACE.XYZ_to_RGB_matrix
aces_RGB_to_XYZ = colour.ACES_RGB_COLOURSPACE.RGB_to_XYZ_matrix
aces_r2 = colour.XYZ_to_RGB(XYZ_r2,
illuminant,
aces_w,
aces_XYZ_to_RGB)
aces_k = colour.XYZ_to_RGB(k,
illuminant,
aces_w,
aces_XYZ_to_RGB)
XYZs_m = []
for index, name, x, y, Y in data:
xyY = np.array([x, y, Y])
sRGB_r1 = colour.XYZ_to_RGB(
colour.xyY_to_XYZ(xyY),
illuminant,
sRGB_w,
sRGB_XYZ_to_RGB)
sRGB_m = sRGB_r1 * sRGB_r2 * sRGB_r2 * sRGB_r2 * sRGB_k
aces_r1 = colour.XYZ_to_RGB(
colour.xyY_to_XYZ(xyY),
illuminant,
aces_w,
aces_XYZ_to_RGB)
aces_m = aces_r1 * aces_r2 * aces_r2 * aces_r2 * aces_k
XYZ_sRGB_m1 = colour.RGB_to_XYZ(sRGB_m,
sRGB_w,
sRGB_w,
sRGB_RGB_to_XYZ)
XYZ_aces_m1 = colour.RGB_to_XYZ(aces_m,
aces_w,
aces_w,
aces_RGB_to_XYZ)
XYZs_m.append((XYZ_sRGB_m1, XYZ_aces_m1))
pprint(XYZs_m)
deltas_E = []
for i, (XYZ1, XYZ2) in enumerate(XYZs_m):
Lab1 = colour.XYZ_to_Lab(XYZ1, illuminant)
Lab2 = colour.XYZ_to_Lab(XYZ2, illuminant)
deltas_E.append((data[i][1], colour.delta_E_CIE2000(Lab1, Lab2)))
pprint(deltas_E)
np.average([delta_E[1] for delta_E in deltas_E])
from pprint import pprint
rec2020_w = colour.REC_2020_COLOURSPACE.whitepoint
rec2020_XYZ_to_RGB = colour.REC_2020_COLOURSPACE.XYZ_to_RGB_matrix
rec2020_RGB_to_XYZ = colour.REC_2020_COLOURSPACE.RGB_to_XYZ_matrix
rec2020_r2 = colour.XYZ_to_RGB(XYZ_r2,
illuminant,
rec2020_w,
rec2020_XYZ_to_RGB)
rec2020_k = colour.XYZ_to_RGB(k,
illuminant,
aces_w,
aces_XYZ_to_RGB)
XYZs_m = []
for index, name, x, y, Y in data:
xyY = np.array([x, y, Y])
sRGB_r1 = colour.XYZ_to_RGB(
colour.xyY_to_XYZ(xyY),
illuminant,
sRGB_w,
sRGB_XYZ_to_RGB)
sRGB_m = sRGB_r1 * sRGB_r2 * sRGB_r2 * sRGB_r2 * sRGB_k
rec2020_r1 = colour.XYZ_to_RGB(
colour.xyY_to_XYZ(xyY),
illuminant,
rec2020_w,
rec2020_XYZ_to_RGB)
rec2020_m = rec2020_r1 * rec2020_r2 * rec2020_r2 * rec2020_r2 * rec2020_k
XYZ_sRGB_m1 = colour.RGB_to_XYZ(sRGB_m,
sRGB_w,
sRGB_w,
sRGB_RGB_to_XYZ)
XYZ_rec2020_m1 = colour.RGB_to_XYZ(rec2020_m,
rec2020_w,
rec2020_w,
rec2020_RGB_to_XYZ)
XYZs_m.append((XYZ_sRGB_m1, XYZ_rec2020_m1))
pprint(XYZs_m)
deltas_E = []
for i, (XYZ1, XYZ2) in enumerate(XYZs_m):
Lab1 = colour.XYZ_to_Lab(XYZ1, illuminant)
Lab2 = colour.XYZ_to_Lab(XYZ2, illuminant)
deltas_E.append((data[i][1], colour.delta_E_CIE2000(Lab1, Lab2, l=1)))
pprint(deltas_E)
np.average(np.average([delta_E[1] for delta_E in deltas_E]))
k = np.linspace(0, 1, 10)
X_sRGB = lambda x: 0.41238656 * x + 0.35759149 * x + 0.18045049 * x
X_ACES = lambda x: 0.9525524 * x + 0.00009368 * x
pylab.plot(k, tuple(map(X_sRGB, k)), 'o-', color='red', label='X - sRGB')
pylab.plot(k, tuple(map(X_ACES, k)), 'o-', color='green', label='X - ACES RGB')
settings = {'title': 'sRGB - ACES RGB - Uniformly Spaced Incrementing RGB',
'x_label': 'k',
'y_label': 'X',
'x_tighten': True,
'legend': True,
'legend_location': 'upper left',
'x_ticker': True,
'y_ticker': True}
decorate(**settings)
display(**settings)
k = np.linspace(0, 1, 10)
X_sRGB = lambda x: 0.41238656 * x
X_ACES = lambda x: 0.9525524 * x
pylab.plot(k, tuple(map(X_sRGB, k)), 'o-', color='red', label='X - sRGB')
pylab.plot(k, tuple(map(X_ACES, k)), 'o-', color='green', label='X - ACES RGB')
settings = {'title': 'sRGB - ACES RGB - Uniformly Spaced Incrementing R',
'x_label': 'k',
'y_label': 'X',
'x_tighten': True,
'legend': True,
'legend_location': 'upper left',
'x_ticker': True,
'y_ticker': True}
decorate(**settings)
display(**settings)
pylab.plot(X_sRGB(0.172906) * X_sRGB(0.05440562), 0, 'o', color='red', label='X - "dark skin" * "green" sRGB')
pylab.plot(X_ACES(0.11758989) * X_ACES(0.15129818), 0, 'o', color='green', label='X - "dark skin" * "green" ACES RGB')
settings = {'title': 'sRGB - ACES RGB - X Value - "dark skin" * "green"',
'x_label': 'X Value',
'x_tighten': True,
'legend': True,
'legend_location': 'upper left',
'x_ticker': True,
'y_ticker': True}
decorate(**settings)
display(**settings)
colourspaces_CIE_1931_chromaticity_diagram_plot(
['Pointer Gamut', 'sRGB', 'Rec. 2020', 'ACES RGB'])
XYZ_sRGB1 = colour.RGB_to_XYZ(sRGB_r1,
sRGB_w,
illuminant,
sRGB_RGB_to_XYZ)
XYZ_sRGB_m1 = colour.RGB_to_XYZ(sRGB_m,
sRGB_w,
illuminant,
sRGB_RGB_to_XYZ)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: WWW Data download
Step2: 描述网络
Step3: 网络直径
Step4: 密度
Step5: 作业:
Step6: Spacing in Math Mode
Step7: Degree centrality measures.(度中心性)
Step8: 度分布
Step9: 网络科学理论简介
Step10: ER随机网络
Step11: 小世界网络
Step12: BA网络
Step13: 作业:
Step14: More
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import networkx as nx
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph() # G = nx.DiGraph() # 有向网络
# 添加(孤立)节点
G.add_node("spam")
# 添加节点和链接
G.add_edge(1,2)
print(G.nodes())
print(G.edges())
# 绘制网络
nx.draw(G, with_labels = True)
G = nx.Graph()
n = 0
with open ('/Users/chengjun/bigdata/www.dat.gz.txt') as f:
for line in f:
n += 1
if n % 10**4 == 0:
flushPrint(n)
x, y = line.rstrip().split(' ')
G.add_edge(x,y)
nx.info(G)
G = nx.karate_club_graph()
clubs = [G.node[i]['club'] for i in G.nodes()]
colors = []
for j in clubs:
if j == 'Mr. Hi':
colors.append('r')
else:
colors.append('g')
nx.draw(G, with_labels = True, node_color = colors)
G.node[1] # 节点1的属性
G.edge.keys()[:3] # 前三条边的id
nx.info(G)
G.nodes()[:10]
G.edges()[:3]
G.neighbors(1)
nx.average_shortest_path_length(G)
nx.diameter(G)#返回图G的直径(最长最短路径的长度)
nx.density(G)
nodeNum = len(G.nodes())
edgeNum = len(G.edges())
2.0*edgeNum/(nodeNum * (nodeNum - 1))
cc = nx.clustering(G)
cc.items()[:5]
plt.hist(cc.values(), bins = 15)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
# M. E. J. Newman, Mixing patterns in networks Physical Review E, 67 026126, 2003
nx.degree_assortativity_coefficient(G) #计算一个图的度匹配性。
Ge=nx.Graph()
Ge.add_nodes_from([0,1],size=2)
Ge.add_nodes_from([2,3],size=3)
Ge.add_edges_from([(0,1),(2,3)])
print(nx.numeric_assortativity_coefficient(Ge,'size'))
# plot degree correlation
from collections import defaultdict
import numpy as np
l=defaultdict(list)
g = nx.karate_club_graph()
for i in g.nodes():
k = []
for j in g.neighbors(i):
k.append(g.degree(j))
l[g.degree(i)].append(np.mean(k))
#l.append([g.degree(i),np.mean(k)])
x = l.keys()
y = [np.mean(i) for i in l.values()]
#x, y = np.array(l).T
plt.plot(x, y, 'r-o', label = '$Karate\;Club$')
plt.legend(loc=1,fontsize=10, numpoints=1)
plt.xscale('log'); plt.yscale('log')
plt.ylabel(r'$<knn(k)$> ', fontsize = 20)
plt.xlabel('$k$', fontsize = 20)
plt.show()
dc = nx.degree_centrality(G)
closeness = nx.closeness_centrality(G)
betweenness= nx.betweenness_centrality(G)
fig = plt.figure(figsize=(15, 4),facecolor='white')
ax = plt.subplot(1, 3, 1)
plt.hist(dc.values(), bins = 20)
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
ax = plt.subplot(1, 3, 2)
plt.hist(closeness.values(), bins = 20)
plt.xlabel('$Closeness \, Centrality$', fontsize = 20)
ax = plt.subplot(1, 3, 3)
plt.hist(betweenness.values(), bins = 20)
plt.xlabel('$Betweenness \, Centrality$', fontsize = 20)
plt.tight_layout()
plt.show()
fig = plt.figure(figsize=(15, 8),facecolor='white')
for k in betweenness:
plt.scatter(dc[k], closeness[k], s = betweenness[k]*1000)
plt.text(dc[k], closeness[k]+0.02, str(k))
plt.xlabel('$Degree \, Centrality$', fontsize = 20)
plt.ylabel('$Closeness \, Centrality$', fontsize = 20)
plt.show()
def plotDegreeDistribution(G):
plt.plot(nx.degree_histogram(G) , 'ro', markersize = 10) #返回图中所有节点的度分布序列
plt.legend(['Degree'])
plt.xlabel('$Degree$', fontsize = 20)
plt.ylabel('$Number \, of \, nodes$', fontsize = 20)
plt.title('$Degree\,Distribution$', fontsize = 20)
plt.xscale('log')
plt.yscale('log')
plt.show()
plotDegreeDistribution(G)
import networkx as nx
import matplotlib.pyplot as plt
RG = nx.random_graphs.random_regular_graph(3,200) #生成包含20个节点、每个节点有3个邻居的规则图RG
pos = nx.spectral_layout(RG) #定义一个布局,此处采用了spectral布局方式,后变还会介绍其它布局方式,注意图形上的区别
nx.draw(RG,pos,with_labels=False,node_size = 30) #绘制规则图的图形,with_labels决定节点是非带标签(编号),node_size是节点的直径
plt.show() #显示图形
plotDegreeDistribution(RG)
import networkx as nx
import matplotlib.pyplot as plt
ER = nx.random_graphs.erdos_renyi_graph(200,0.05) #生成包含20个节点、以概率0.2连接的随机图
pos = nx.shell_layout(ER) #定义一个布局,此处采用了shell布局方式
nx.draw(ER,pos,with_labels=False,node_size = 30)
plt.show()
plotDegreeDistribution(ER)
import networkx as nx
import matplotlib.pyplot as plt
WS = nx.random_graphs.watts_strogatz_graph(200,4,0.3) #生成包含200个节点、每个节点4个近邻、随机化重连概率为0.3的小世界网络
pos = nx.circular_layout(WS) #定义一个布局,此处采用了circular布局方式
nx.draw(WS,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
plotDegreeDistribution(WS)
nx.diameter(WS)
cc = nx.clustering(WS)
plt.hist(cc.values(), bins = 10)
plt.xlabel('$Clustering \, Coefficient, \, C$', fontsize = 20)
plt.ylabel('$Frequency, \, F$', fontsize = 20)
plt.show()
import numpy as np
np.mean(cc.values())
import networkx as nx
import matplotlib.pyplot as plt
BA= nx.random_graphs.barabasi_albert_graph(200,2) #生成n=20、m=1的BA无标度网络
pos = nx.spring_layout(BA) #定义一个布局,此处采用了spring布局方式
nx.draw(BA,pos,with_labels=False,node_size = 30) #绘制图形
plt.show()
plotDegreeDistribution(BA)
Ns = [i*10 for i in [1, 10, 100, 1000]]
ds = []
for N in Ns:
print N
BA= nx.random_graphs.barabasi_albert_graph(N,2)
d = nx.average_shortest_path_length(BA)
ds.append(d)
plt.plot(Ns, ds, 'r-o')
plt.xlabel('$N$', fontsize = 20)
plt.ylabel('$<d>$', fontsize = 20)
plt.xscale('log')
plt.show()
# subgraph
G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
G.add_path([0,1,2,3])
H = G.subgraph([0,1,2])
G.edges(), H.edges()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data from the publication
Step2: Create and fit a receptive field model
Step3: Investigate model coefficients
Step4: Create and fit a stimulus reconstruction model
Step5: Visualize stimulus reconstruction
Step6: Investigate model coefficients
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Nicolas Barascud <nicolas.barascud@ens.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from os.path import join
import mne
from mne.decoding import ReceptiveField
from sklearn.model_selection import KFold
from sklearn.preprocessing import scale
path = mne.datasets.mtrf.data_path()
decim = 2
data = loadmat(join(path, 'speech_data.mat'))
raw = data['EEG'].T
speech = data['envelope'].T
sfreq = float(data['Fs'])
sfreq /= decim
speech = mne.filter.resample(speech, down=decim, npad='auto')
raw = mne.filter.resample(raw, down=decim, npad='auto')
# Read in channel positions and create our MNE objects from the raw data
montage = mne.channels.make_standard_montage('biosemi128')
info = mne.create_info(montage.ch_names, sfreq, 'eeg').set_montage(montage)
raw = mne.io.RawArray(raw, info)
n_channels = len(raw.ch_names)
# Plot a sample of brain and stimulus activity
fig, ax = plt.subplots()
lns = ax.plot(scale(raw[:, :800][0].T), color='k', alpha=.1)
ln1 = ax.plot(scale(speech[0, :800]), color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['EEG', 'Speech Envelope'], frameon=False)
ax.set(title="Sample activity", xlabel="Time (s)")
mne.viz.tight_layout()
# Define the delays that we will use in the receptive field
tmin, tmax = -.2, .4
# Initialize the model
rf = ReceptiveField(tmin, tmax, sfreq, feature_names=['envelope'],
estimator=1., scoring='corrcoef')
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Prepare model data (make time the first dimension)
speech = speech.T
Y, _ = raw[:] # Outputs for the model
Y = Y.T
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
scores = np.zeros((n_splits, n_channels))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
rf.fit(speech[train], Y[train])
scores[ii] = rf.score(speech[test], Y[test])
# coef_ is shape (n_outputs, n_features, n_delays). we only have 1 feature
coefs[ii] = rf.coef_[:, 0, :]
times = rf.delays_ / float(rf.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_scores = scores.mean(axis=0)
# Plot mean prediction scores across all channels
fig, ax = plt.subplots()
ix_chs = np.arange(n_channels)
ax.plot(ix_chs, mean_scores)
ax.axhline(0, ls='--', color='r')
ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)")
mne.viz.tight_layout()
# Print mean coefficients across all time delays / channels (see Fig 1)
time_plot = 0.180 # For highlighting a specific time.
fig, ax = plt.subplots(figsize=(4, 8))
max_coef = mean_coefs.max()
ax.pcolormesh(times, ix_chs, mean_coefs, cmap='RdBu_r',
vmin=-max_coef, vmax=max_coef, shading='gouraud')
ax.axvline(time_plot, ls='--', color='k', lw=2)
ax.set(xlabel='Delay (s)', ylabel='Channel', title="Mean Model\nCoefficients",
xlim=times[[0, -1]], ylim=[len(ix_chs) - 1, 0],
xticks=np.arange(tmin, tmax + .2, .2))
plt.setp(ax.get_xticklabels(), rotation=45)
mne.viz.tight_layout()
# Make a topographic map of coefficients for a given delay (see Fig 2C)
ix_plot = np.argmin(np.abs(time_plot - times))
fig, ax = plt.subplots()
mne.viz.plot_topomap(mean_coefs[:, ix_plot], pos=info, axes=ax, show=False,
vmin=-max_coef, vmax=max_coef)
ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot)
mne.viz.tight_layout()
# We use the same lags as in :footcite:`CrosseEtAl2016`. Negative lags now
# index the relationship
# between the neural response and the speech envelope earlier in time, whereas
# positive lags would index how a unit change in the amplitude of the EEG would
# affect later stimulus activity (obviously this should have an amplitude of
# zero).
tmin, tmax = -.2, 0.
# Initialize the model. Here the features are the EEG data. We also specify
# ``patterns=True`` to compute inverse-transformed coefficients during model
# fitting (cf. next section and :footcite:`HaufeEtAl2014`).
# We'll use a ridge regression estimator with an alpha value similar to
# Crosse et al.
sr = ReceptiveField(tmin, tmax, sfreq, feature_names=raw.ch_names,
estimator=1e4, scoring='corrcoef', patterns=True)
# We'll have (tmax - tmin) * sfreq delays
# and an extra 2 delays since we are inclusive on the beginning / end index
n_delays = int((tmax - tmin) * sfreq) + 2
n_splits = 3
cv = KFold(n_splits)
# Iterate through splits, fit the model, and predict/test on held-out data
coefs = np.zeros((n_splits, n_channels, n_delays))
patterns = coefs.copy()
scores = np.zeros((n_splits,))
for ii, (train, test) in enumerate(cv.split(speech)):
print('split %s / %s' % (ii + 1, n_splits))
sr.fit(Y[train], speech[train])
scores[ii] = sr.score(Y[test], speech[test])[0]
# coef_ is shape (n_outputs, n_features, n_delays). We have 128 features
coefs[ii] = sr.coef_[0, :, :]
patterns[ii] = sr.patterns_[0, :, :]
times = sr.delays_ / float(sr.sfreq)
# Average scores and coefficients across CV splits
mean_coefs = coefs.mean(axis=0)
mean_patterns = patterns.mean(axis=0)
mean_scores = scores.mean(axis=0)
max_coef = np.abs(mean_coefs).max()
max_patterns = np.abs(mean_patterns).max()
y_pred = sr.predict(Y[test])
time = np.linspace(0, 2., 5 * int(sfreq))
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(time, speech[test][sr.valid_samples_][:int(5 * sfreq)],
color='grey', lw=2, ls='--')
ax.plot(time, y_pred[sr.valid_samples_][:int(5 * sfreq)], color='r', lw=2)
ax.legend([lns[0], ln1[0]], ['Envelope', 'Reconstruction'], frameon=False)
ax.set(title="Stimulus reconstruction")
ax.set_xlabel('Time (s)')
mne.viz.tight_layout()
time_plot = (-.140, -.125) # To average between two timepoints.
ix_plot = np.arange(np.argmin(np.abs(time_plot[0] - times)),
np.argmin(np.abs(time_plot[1] - times)))
fig, ax = plt.subplots(1, 2)
mne.viz.plot_topomap(np.mean(mean_coefs[:, ix_plot], axis=1),
pos=info, axes=ax[0], show=False,
vmin=-max_coef, vmax=max_coef)
ax[0].set(title="Model coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.plot_topomap(np.mean(mean_patterns[:, ix_plot], axis=1),
pos=info, axes=ax[1],
show=False, vmin=-max_patterns, vmax=max_patterns)
ax[1].set(title="Inverse-transformed coefficients\nbetween delays %s and %s"
% (time_plot[0], time_plot[1]))
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [convolutional.UpSampling3D.1] size 2x2x2 upsampling on 2x2x2x3 input, data_format='channels_first'
Step2: [convolutional.UpSampling3D.2] size 1x3x2 upsampling on 2x1x3x2 input, data_format='channels_last'
Step3: [convolutional.UpSampling3D.3] size 2x1x2 upsampling on 2x1x3x3 input, data_format='channels_first'
Step4: [convolutional.UpSampling3D.4] size 2 upsampling on 2x1x3x2 input, data_format='channels_last'
Step5: export for Keras.js tests
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (2, 2, 2, 3)
L = UpSampling3D(size=(2, 2, 2), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(260)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling3D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 2, 2, 3)
L = UpSampling3D(size=(2, 2, 2), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(261)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling3D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 1, 3, 2)
L = UpSampling3D(size=(1, 3, 2), data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(252)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling3D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 1, 3, 3)
L = UpSampling3D(size=(2, 1, 2), data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(253)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling3D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (2, 1, 3, 2)
L = UpSampling3D(size=2, data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(254)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['convolutional.UpSampling3D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, import your solutions from hw5_answers.py. The following code looks a bit redundant. However, we do this to allow reloading the hw5_answers.py in case you made some changes. Normally, Python assumes that modules don't change and therefore does not try to import them again.
Step2: The Employees, Territory, Customers, and Orders tables are the same as those we used in class.
Step3: Problem 1
Step4: Shape of resulting table
Step5: Shape of resulting table
Step6: Shape of resulting table
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import hw5_answers
reload(hw5_answers)
from hw5_answers import *
Employees = pd.read_excel('/home/data/AdventureWorks/Employees.xls')
Territory = pd.read_excel('/home/data/AdventureWorks/SalesTerritory.xls')
Customers = pd.read_excel('/home/data/AdventureWorks/Customers.xls')
Orders = pd.read_excel('/home/data/AdventureWorks/ItemsOrdered.xls')
df1 = get_manager(Employees)
print "Shape of resulting table: ", df1.shape
print "Columns: ", ', '.join(df1.columns)
df1.head()
df2 = get_spend_by_order(Orders, Customers)
print "Shape of resulting table: ", df2.shape
print "Columns: ", ', '.join(df2.columns)
df2.head()
df3 = get_order_location(Orders, Customers, Territory)
print "Shape of resulting table: ", df3.shape
print "Columns: ", ', '.join(df3.columns)
df3.head()
df4 = employee_info(Employees)
print "Shape of resulting table: ", df4.shape
print "Columns: ", ', '.join(df4.columns)
df4.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Host and user information
Step2: List of file prefixes for micrograph images and XML metadata
Step3: For each name in the list
|
<ASSISTANT_TASK:>
Python Code:
import mdcs
user='admin'
pswd='admin'
host='http://127.0.0.1:8000'
template_name='DiffusionDemo'
name_list=[
"GE-DiffusionCouple-IN100-IN718",
"GE-DiffusionCouple-IN718-R95",
"GE-DiffusionCouple-R95-R88",
"GE-DiffusionCouple-R88-IN100"
]
for name in name_list:
xml_name=name+".xml"
tif_name=name+".tif"
print "Uploading:",tif_name
url = mdcs.blob.upload(tif_name,host,user,pswd)
print "Reading:",xml_name
with open(xml_name, 'r') as f:
content = f.read()
content = content.replace("http://127.0.0.1:8000/rest/blob?id=REPLACE-ME-BLOB-ID",url)
print "Uploading:",xml_name
response = mdcs.curate_as(xml_name,name,host,user,pswd,template_title=template_name,content=content)
print "Response:",response
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first two lines deal with the ability to show your graphs (generated via matplotlib) within this notebook, the remaining two lines import matplotlib's sublibrary pyplot as <FONT FACE="courier" style="color
Step2: <h3 style="color
Step3: A slower but easier to understand version of this function is shown below. The tag slow is explained shortly after.
Step4: <h3>Step 3
Step5: The choice for the interpolation is obvious
Step6: <h3>Step 4
Step7: Although the plot suggests that the interpolation works, a visual proof can be deceptive. It is best to calculate the error between the exact and interpolated solution. Here we use an $l^2$-norm
Step8: For reasons that will become clearer later, we want to consider other interpolation schemes
Step9: <h3 style="color
Step10: <h3>Step 5
Step11: The discretization of the time derivative is crude. A better discretization is the 2<sup>nd</sup>-order Runge-Kutta
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# plots graphs within the notebook
%config InlineBackend.figure_format='svg' # not sure what this does, may be default images to svg format
import matplotlib.pyplot as plt #calls the plotting library hereafter referred as to plt
import numpy as np
L = 8*np.pi
N = 200
dx = L/N
u_0 = 1.
phi = np.zeros(N)
F = np.zeros(N+1)
u = u_0*np.ones(N+1)
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
def init_simulation(x_phi,N):
phi = np.zeros(N)
phi = 1.+np.cos(x_phi-L/2.)
xmask = np.where(np.abs(x_phi-L/2.) > np.pi)
phi[xmask] = 0.
return phi
phi = init_simulation(x_phi,N)
plt.plot(x_phi,phi,lw=2)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$\phi$', fontdict = font)
plt.xlim(0,L)
plt.show()
def init_simulation_slow(u,phi,x_phi,N):
for i in range(N):
if (np.abs(x_phi[i]-L/2.) > np.pi):
phi[i] = 0.
else:
phi[i] = 1.+np.cos(x_phi[i]-L/2.)
return phi
phi = init_simulation_slow(u,phi,x_phi,N)
plt.plot(x_phi,phi,lw=2)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$\phi$', fontdict = font)
plt.xlim(0,L)
plt.show()
%%timeit
flux0 = np.zeros(N+1)
for i in range(1,N):
flux0[i] = 0.5*(phi[i-1]+phi[i])*u[i]
%%timeit
flux1 = np.zeros(N+1)
flux1[1:N] = 0.5*(phi[0:N-1]+phi[1:N])*u[1:N]
def compute_flux(a,v,N):
f=np.zeros(N+1)
f[1:N] = 0.5*(a[0:N-1]+a[1:N])*v[1:N]
f[0] = f[1]
f[N] = f[N-1]
return f
F_exact = np.zeros(N+1)
F_exact = init_simulation(x_u,N+1)
F = compute_flux(phi,u,N)
plt.plot(x_u,F_exact,lw=2,label="exact")
plt.plot(x_u,F,'r--',lw=2,label="interpolated")
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$\phi$', fontdict = font)
plt.xlim(0,L)
plt.legend(loc="upper left", bbox_to_anchor=[0, 1],
ncol=1, shadow=True, fancybox=True)
plt.show()
N = 200
phi = np.zeros(N)
F_exact = np.zeros(N+1)
F = np.zeros(N+1)
u = u_0*np.ones(N+1)
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
phi = init_simulation(x_phi,N)
F_exact = init_simulation(x_u,N+1)
F = compute_flux(phi,u,N)
error = np.sqrt(np.sum(np.power(F-F_exact,2)))
errorx = np.power(F-F_exact,2)
plt.plot(x_u,errorx)
plt.show()
print('error norm L 2= %1.4e' %error)
Nerror = 3
Narray = np.array([10, 100, 200])
delta = L/Narray
error = np.zeros(Nerror)
order = np.zeros(Nerror)
for ierror in range(Nerror):
N = Narray[ierror]
phi = np.zeros(N)
F_exact = np.zeros(N+1)
F = np.zeros(N+1)
u = u_0*np.ones(N+1)
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
phi = init_simulation(x_phi,N)
F_exact = init_simulation(x_u,N+1)
F = compute_flux(phi,u,N)
error[ierror] = np.linalg.norm(F-F_exact)
#error[ierror] = np.sqrt(np.sum(np.power(F-F_exact,2)))
print('error norm L 2= %1.4e' %error[ierror])
order = 0.1*delta**(2)
plt.loglog(delta,error,lw=2,label='interpolate')
plt.loglog(delta,order,lw=2,label='$\propto\Delta x^2$')
plt.legend(loc="upper left", bbox_to_anchor=[0, 1],
ncol=1, shadow=True, fancybox=True)
plt.xlabel('$\Delta x$', fontdict = font)
plt.ylabel('$\Vert F\Vert_2$', fontdict = font)
plt.show
Nscheme = 4
Scheme = np.array(['CS','US1','US2','US3'])
g_1 = np.array([1./2.,0.,0.,3./8.])
g_2 = np.array([0.,0.,1./2.,1./8.])
def compute_flux_advanced(a,v,N,num_scheme):
imask = np.where(Scheme == num_scheme)
g1 = g_1[imask]
g2 = g_2[imask]
f=np.zeros(N+1)
f[2:N] = ((1.-g1+g2)*a[1:N-1]+g1*a[2:N]-g2*a[0:N-2])*v[2:N]
if (num_scheme == 'US2') or (num_scheme == 'US3'):
f[1] = ((1.-g1)*a[0]+g1*a[1])*v[1]
f[0] = f[1]
f[N] = f[N-1]
return f
table = ListTable()
table.append(['Scheme', '$g_1$', '$g_2$'])
for i in range(4):
table.append([Scheme[i],g_1[i], g_2[i]])
table
Nerror = 3
Narray = np.array([10, 100, 200])
delta = L/Narray
error = np.zeros((Nerror,Nscheme))
order = np.zeros((Nerror,Nscheme))
for ischeme in range(Nscheme):
num_scheme = Scheme[ischeme]
for ierror in range(Nerror):
N = Narray[ierror]
dx = L/N
phi = np.zeros(N)
F_exact = np.zeros(N+1)
F = np.zeros(N+1)
u = u_0*np.ones(N+1)
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
phi = init_simulation(x_phi,N)
F_exact = init_simulation(x_u,N+1)
F = compute_flux_advanced(phi,u,N,num_scheme)
error[ierror,ischeme] = np.linalg.norm(F-F_exact)
#print('error norm L 2= %1.4e' %error[ierror,ischeme])
for ischeme in range(Nscheme):
plt.loglog(delta,error[:,ischeme],lw=2,label=Scheme[ischeme])
order = 2.0*(delta/delta[0])
plt.loglog(delta,order,'k:',lw=2,label='$\propto\Delta x$')
order = 0.1*(delta/delta[0])**(2)
plt.loglog(delta,order,'k-',lw=2,label='$\propto\Delta x^2$')
order = 0.1*(delta/delta[0])**(3)
plt.loglog(delta,order,'k--',lw=2,label='$\propto\Delta x^3$')
plt.legend(loc=2, bbox_to_anchor=[0, 1],
ncol=3, shadow=True, fancybox=True)
plt.xlabel('$\Delta x$', fontdict = font)
plt.ylabel('$\Vert F\Vert_2$', fontdict = font)
plt.xlim(L/300,L/9.)
plt.ylim(1e-5,1e2)
plt.show
def flux_divergence(f,N,dx):
df = np.zeros(N)
df[0:N] = (f[1:N+1]-f[0:N])/dx
return df
N=200
Simulation_time = 5.
dx = L/N
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
u_0 = 1.
num_scheme = 'CS'
u = u_0*np.ones(N+1)
phi = np.zeros(N)
flux = np.zeros(N+1)
divflux = np.zeros(N)
phi = init_simulation(x_phi,N)
phi_init = phi.copy()
number_of_iterations = 100
dt = Simulation_time/number_of_iterations
t = 0.
for it in range (number_of_iterations):
flux = compute_flux_advanced(phi,u,N,num_scheme)
divflux = flux_divergence(flux,N,dx)
phi -= dt*divflux
t += dt
plt.plot(x_phi,phi,lw=2,label='simulated')
plt.plot(x_phi,phi_init,lw=2,label='initial')
plt.legend(loc=2, bbox_to_anchor=[0, 1],
ncol=2, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$\phi$', fontdict = font)
plt.xlim(0,L)
plt.show()
N=200
Simulation_time = 5.
dx = L/N
x_phi = np.linspace(dx/2.,L-dx/2.,N)
x_u = np.linspace(0.,L,N+1)
u_0 = 1.
num_scheme = 'CS'
u = u_0*np.ones(N+1)
phi = np.zeros(N)
flux = np.zeros(N+1)
divflux = np.zeros(N)
phiold = np.zeros(N)
phi = init_simulation(x_phi,N)
phi_init = phi.copy()
rk_coef = np.array([0.5,1.])
number_of_iterations = 100
dt = Simulation_time/number_of_iterations
t = 0.
for it in range (number_of_iterations):
phiold = phi
for irk in range(2):
flux = compute_flux_advanced(phi,u,N,num_scheme)
divflux = flux_divergence(flux,N,dx)
phi = phiold-rk_coef[irk]*dt*divflux
t += dt
plt.plot(x_phi,phi,lw=2,label='simulated')
plt.plot(x_phi,phi_init,lw=2,label='initial')
plt.legend(loc=2, bbox_to_anchor=[0, 1],
ncol=2, shadow=True, fancybox=True)
plt.xlabel('$x$', fontdict = font)
plt.ylabel('$\phi$', fontdict = font)
plt.xlim(0,L)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PairGrid
Step2: pairplot
Step3: Facet Grid
Step4: JointGrid
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
iris = sns.load_dataset('iris')
iris.head()
# Just the Grid
sns.PairGrid(iris)
# Then you map to the grid
g = sns.PairGrid(iris)
g.map(plt.scatter)
# Map to upper,lower, and diagonal
g = sns.PairGrid(iris)
g.map_diag(plt.hist)
g.map_upper(plt.scatter)
g.map_lower(sns.kdeplot)
sns.pairplot(iris)
sns.pairplot(iris,hue='species',palette='rainbow')
tips = sns.load_dataset('tips')
tips.head()
# Just the Grid
g = sns.FacetGrid(tips, col="time", row="smoker")
g = sns.FacetGrid(tips, col="time", row="smoker")
g = g.map(plt.hist, "total_bill")
g = sns.FacetGrid(tips, col="time", row="smoker",hue='sex')
# Notice hwo the arguments come after plt.scatter call
g = g.map(plt.scatter, "total_bill", "tip").add_legend()
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = sns.JointGrid(x="total_bill", y="tip", data=tips)
g = g.plot(sns.regplot, sns.distplot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: まずは、距離関数と各種重みを構築する関数を定義します。
Step2: 進捗の可視化のため、コールバック関数を定義しておきます。
Step3: シミュレーティッドアニーリングの実施
Step4: 量子アニーリングの実施
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import collections
import time
import matplotlib.pyplot as plt
import numpy as np
import anneal
from anneal.models import ClassicalIsingModel, QuantumIsingModel
from anneal.annealers import SimulatedAnnealer, QuantumAnnealer
POSITIONS = np.array((
(24050.0000, 123783),
(24216.6667, 123933),
(24233.3333, 123950),
(24233.3333, 124016),
(24250.0000, 123866),
(24300.0000, 123683),
(24316.6667, 123900),
(24316.6667, 124083),
(24333.3333, 123733),
))
def dist(a, b):
a = np.array(a)
b = np.array(b)
return np.sqrt(((a - b)**2).sum())
def build_weights(positions, coeff=1):
n_cities = len(positions)
j = collections.defaultdict(int)
max_dist = 0
for t in range(n_cities):
for a in range(n_cities):
for b in range(n_cities):
d = dist(positions[a], positions[b])
max_dist = d if max_dist < d else max_dist
j[t, a, (t + 1)%n_cities, b] = -d
A = max_dist * coeff
for t in range(n_cities):
for a in range(n_cities):
for b in range(n_cities):
if a != b:
j[t, a, t, b] -= 2*A
for a in range(n_cities):
for t1 in range(n_cities):
for t2 in range(n_cities):
if t1 != t2:
j[t1, a, t2, a] -= 2*A
h = np.zeros((n_cities, n_cities))
for t in range(n_cities):
for a in range(n_cities):
h[t, a] += 2*A
c = -2*A*n_cities
return j, h, c
class Callback(object):
def __init__(self):
self.history = []
def __call__(self, annealer, state_is_updated, model_is_updated):
objective_value = annealer.model.objective_value()
self.history.append(objective_value)
if annealer.iter_count % 1 == 0:
if isinstance(annealer.model, QuantumIsingModel):
state = annealer.model.observe_best()
else:
state = annealer.model.state
print("{}: {}'th iter, beta: {}, gamma: {}, objective_value: {}, is_valid: {}".format(
annealer.__class__.__name__,
annealer.iter_count,
annealer.model.beta,
annealer.model.__dict__.get('gamma', 0),
objective_value,
self.check_constraints(state)
))
def check_constraints(self, state):
return (
(state.to_array().sum(axis=1) == 1).all()
and (state.to_array().sum(axis=0) == 1).all()
)
j, h, c = build_weights(POSITIONS)
c_history = []
min_energy = float('inf')
best_annealer = None
iter = 0
start = time.time()
for i in range(16):
print('{}th challenge.'.format(i))
callback = Callback()
c_model = ClassicalIsingModel(j, h, c, state_shape=h.shape)
c_annealer = SimulatedAnnealer(c_model, beta_factor=2) # max_iter が小さいので、 beta_factor は大きく取っている
c_annealer.anneal(iter_callback=callback, max_iter=10)
c_history.append(callback.history)
energy = c_model.objective_value()
iter += c_annealer.iter_count
if energy < min_energy:
min_energy = energy
best_annealer = c_annealer
best_model = best_annealer.model
print('annealing time: {}'.format(time.time() - start))
print('annealer: {}'.format(best_annealer))
print('iterations(average): {}'.format(iter/16))
print('objective: {}'.format(best_model.objective_value()))
print('state: {}'.format(best_model.state.to_array()))
start = time.time()
callback = Callback()
q_model = QuantumIsingModel(j, h, c, state_shape=h.shape, n_trotter=100)
q_annealer = QuantumAnnealer(q_model, gamma_factor=0.5) # max_iter が小さいので、 gamma_factor は小さく取っている
q_annealer.anneal(iter_callback=callback, max_iter=10)
observed = q_model.observe_best()
print('annealing time: {}'.format(time.time() - start))
print('annealer: {}'.format(q_annealer))
print('iterations: {}'.format(q_annealer.iter_count))
print('objective: {}'.format(q_model.objective_value()))
print('best state: {}'.format(observed.to_array()))
print('energy of observed: {}'.format(c_model.energy(observed)))
def plot_route(state, positions):
route = [positions[np.argmax(row)] for row in state.to_array()]
route.append(route[0])
route = np.array(route)
d = 0
for i in range(1, len(route)):
d += dist(route[i - 1], route[i])
print(d)
plt.plot(route[:, 0], route[:, 1], '-o')
print('Simulated Annealing')
plot_route(best_model.state, POSITIONS)
plt.show()
print('Quantum Annealing')
plot_route(observed, POSITIONS)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we load the first input image and show it's origin, spacing, etc.
Step2: Now let's split it into two overlaping images.
Step3: Now let's merge the images without registration
Step4: Now rotate and translate one image
Step5: Now run merge with registration
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import numpy
import itk
from itk import TubeTK as ttk
from itkwidgets import view
import matplotlib.pyplot as plt
from matplotlib import cm
%matplotlib inline
ImageType=itk.Image[itk.F,2]
input_image1 = "Data\ES0015_Large.mha"
reader0 = itk.ImageFileReader[ImageType].New(FileName=input_image1)
reader0.Update()
resampler0 = ttk.ResampleImage[itk.F,2].New(Input=reader0.GetOutput(), ResampleFactor=(0.5,0.5))
resampler0.Update()
im0 = resampler0.GetOutput()
print("Origin:")
print(im0.GetOrigin())
print("Spacing:")
print(im0.GetSpacing())
view(im0)
CropImageFilterType = ttk.CropImage[ImageType,ImageType]
crop1 = CropImageFilterType.New(Input=im0, Min=(0,0), Max=(200,200))
crop1.Update()
im1 = crop1.GetOutput()
view(im1)
crop2 = CropImageFilterType.New(Input=im0, Min=(50,100), Max=(255,255))
crop2.Update()
im2 = crop2.GetOutput()
view(im2)
MAIFilterType = ttk.MergeAdjacentImages[ImageType]
merge1 = MAIFilterType.New(Input1=im1, Input2=im2)
merge1.SetMaxIterations(0)
merge1.Update()
im12 = merge1.GetOutput()
view(im12)
TransformType = itk.AffineTransform[itk.D, 2]
trans = TransformType.New()
trans.Rotate2D( 3.1415926 * 0.02 )
ResampleFilterType = ttk.ResampleImage[itk.F, 2]
resample2 = ResampleFilterType.New(Input=im2, Transform=trans)
resample2.SetLoadTransform( True )
resample2.Update()
im2T = resample2.GetOutput()
merge2 = MAIFilterType.New(Input1=im1, Input2=im2T)
merge2.SetBlendUsingAverage( True )
merge2.SetMaxIterations(0)
merge2.Update()
im12T = merge2.GetOutput()
view(im12T)
merge3 = MAIFilterType.New(Input1=im1, Input2=im2T)
merge3.SetMaxIterations(1000)
merge3.SetExpectedRotation(0.1)
merge3.SetExpectedOffset(1)
merge3.SetSamplingRatio(0.5)
merge3.SetPadding((10,10))
merge3.Update()
im12TR = merge3.GetOutput()
view(im12TR)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: where n refers to the size of the population of cells, a is the number of active cells at any instance in time, s is the number of actual synapses on a dendritic segment, and θ is the threshold for NMDA spikes. Following (Ahmad & Hawkins, 2015), the numerator counts the number of possible ways θ or more cells can match a fixed set of s synapses. The denominator counts the number of ways a cells out of n can be active.
Step2: Table 1B
Step3: Table 1C
Step4: Table 1D
Step5: Charts for SDR Paper
Step6: Small sparsity is insufficient
Step7: A small subsample can be very reliable (but not too small)
Step8: Impact of noise on false negatives
Step9: Impact of noise on first-order TMs trained on one sequence
Step10: Charts for BAMI
Step11: Union Property Math
Step12: Expected performance for first-order TMs with varying sparsity
|
<ASSISTANT_TASK:>
Python Code:
oxp = Symbol("Omega_x'")
b = Symbol("b")
n = Symbol("n")
theta = Symbol("theta")
s = Symbol("s")
a = Symbol("a")
subsampledOmega = (binomial(s, b) * binomial(n - s, a - b)) / binomial(n, a)
subsampledFpF = Sum(subsampledOmega, (b, theta, s))
subsampledOmegaSlow = (binomial(s, b) * binomial(n - s, a - b))
subsampledFpFSlow = Sum(subsampledOmegaSlow, (b, theta, s))/ binomial(n, a)
display(subsampledFpF)
display(subsampledFpFSlow)
display("n=10000, a=64, s=24, theta=12", subsampledFpF.subs(s,24).subs(n, 10000).subs(a, 64).subs(theta, 12).evalf())
display("n=10000, a=300, s=24, theta=12", subsampledFpFSlow.subs(theta, 12).subs(s, 24).subs(n, 10000).subs(a, 300).evalf())
display("n=2000, a=40, s=20, theta=10", subsampledFpF.subs(theta, 15).subs(s, 20).subs(n, 2000).subs(a, 40).evalf(100))
T1B = subsampledFpFSlow.subs(n, 100000).subs(a, 2000).subs(theta,s).evalf()
print "n=100000, a=2000, theta=s"
display("s=6",T1B.subs(s,6).evalf())
display("s=8",T1B.subs(s,8).evalf())
display("s=10",T1B.subs(s,10).evalf())
T1C = subsampledFpFSlow.subs(n, 100000).subs(a, 2000).subs(s,2*theta).evalf()
print "n=10000, a=300, s=2*theta"
display("theta=6",T1C.subs(theta,6).evalf())
display("theta=8",T1C.subs(theta,8).evalf())
display("theta=10",T1C.subs(theta,10).evalf())
display("theta=12",T1C.subs(theta,12).evalf())
m = Symbol("m")
T1D = subsampledFpF.subs(n, 100000).subs(a, 2000).subs(s,2*m*theta).evalf()
print "n=100000, a=2000, s=2*m*theta"
display("theta=10, m=2",T1D.subs(theta,10).subs(m,2).evalf())
display("theta=10, m=4",T1D.subs(theta,10).subs(m,4).evalf())
display("theta=10, m=6",T1D.subs(theta,10).subs(m,6).evalf())
display("theta=20, m=6",T1D.subs(theta,20).subs(m,6).evalf())
eq1 = subsampledFpFSlow.subs(s, 64).subs(theta, 12)
print "a=64 cells active, s=16 synapses on segment, dendritic threshold is theta=8\n"
errorList = []
nList = []
for n0 in range(300,20100,200):
error = eq1.subs(n, n0).subs(a,64).evalf()
errorList += [error]
nList += [n0]
print "population n = %5d, sparsity = %5.2f%%, probability of false match = "%(n0, 64/n0), error
print errorList
print nList
print ("2% sparsity with n=400")
print subsampledFpFSlow.subs(s, 4).subs(a, 8).subs(theta, 2).subs(n,400).evalf()
print ("2% sparsity with n=4000")
print subsampledFpFSlow.subs(s, 4).subs(a, 400).subs(theta, 2).subs(n,4000).evalf()
eq2 = subsampledFpFSlow.subs(n, 10000).subs(a, 300)
print "a=200 cells active out of population of n=10000 cells\n"
errorList = []
sList = []
for s0 in range(2,31,1):
print "synapses s = %3d, theta = s/2 = %3d, probability of false match = "%(s0,s0/2), eq2.subs(s, s0).subs(theta,s0/2).evalf()
errorList += [eq2.subs(s, s0).subs(theta,s0/2).evalf()]
sList += [s0]
print errorList
print sList
b = Symbol("b")
v = Symbol("v")
theta = Symbol("theta")
s = Symbol("s")
a = Symbol("a")
overlapSetNoise = (binomial(s, b) * binomial(a - s, v - b)) / binomial(a, v)
noiseFN = Sum(overlapSetNoise, (b, s-theta+1, s))
eqn = noiseFN.subs(s, 30).subs(a, 128)
print "a=128 cells active with segment containing s=30 synapses (n doesn't matter here)\n"
for t in range(8,20,4):
print "theta = ",t
errorList = []
noiseList = []
noisePct = 0.05
while noisePct <= 0.85:
noise = int(round(noisePct*128,0))
errorList += [eqn.subs(v, noise).subs(theta,t).evalf()]
noiseList += [noise/128.0]
noisePct += 0.05
print errorList
print noiseList
eqn = noiseFN
eqn = eqn.subs(s, 20).subs(a, 40).subs(c, 40)
for t in range(8, 20, 4):
print "theta = ",t
errorList = []
jaccardSimilarityList = []
noiseList = []
noisePct = 0.00
while noisePct <= 1:
noise = int(round(noisePct*40,0))
error = eqn.subs(v, noise).subs(theta,t).evalf()
errorList.append(error)
jaccardSimilarity = 1 - error
jaccardSimilarityList.append(jaccardSimilarity)
noiseList += [noise/40.0]
noisePct += 0.05
print errorList
print jaccardSimilarityList
print noiseList
w0 = 32
print "a=%d cells active, s=%d synapses on segment, dendritic threshold is s/2\n" % (w0,w0)
errorList = []
nList = []
for n0 in range(50,500,50):
w0 = n0/2
eq1 = subsampledFpFSlow.subs(s, w0).subs(theta, w0/2)
error = eq1.subs(n, n0).subs(a,w0).evalf()
errorList += [error]
nList += [n0]
print "population n = %5d, sparsity = %7.4f%%, probability of false match = "%(n0, float(w0)/n0), error
print errorList
print nList
oxd = Symbol("omega")
n = Symbol("n")
a = Symbol("a")
b = Symbol("b")
theta = Symbol("theta")
s = Symbol("s")
m = Symbol("m")
q = Symbol("q")
p = (1 - a/n) ** m
ss = Min(floor((1 - (1 - s/n)**m)*n), a)
expectedUnionOverlap = binomial(((1 - p)*n), b) * binomial(((n - (1 - p)*n)), a - b) / binomial(n, a)
expectedUnionFP = Sum(expectedUnionOverlap, (b, theta, ss))
display(expectedUnionFP)
eq1 = expectedUnionFP.subs(a, 40).subs(n, 4000).subs(theta, 15).subs(s, 30)
display(eq1)
for num_patterns in range(100):
eq2 = eq1.subs(m, num_patterns)
#display(eq2)
error_prob = eq2.evalf(100)
print num_patterns, error_prob
eq1 = p.subs(a, 40).subs(n, 4000)
for num_patterns in range(100):
expected_distinct = eq1.subs(m, num_patterns).evalf(10)
print num_patterns, expected_distinct, (1 - expected_distinct)
eq1 = subsampledFpF
expected_num_segments_per_cell = (19*1000)*a/n
eq2 = 1 - ((1 - subsampledFpF)**expected_num_segments_per_cell)
jaccard = a/(a + eq2*(n - a))
display(jaccard)
jaccard2 = jaccard.subs(a, 64).subs(theta, 10).subs(s, 25)
display(jaccard2)
print "["
for i in range(300, 4100, 100):
eq4 = jaccard2.subs(n, i)
print i, str(eq4.evalf(10)), expected_num_segments_per_cell.subs(a, 64).subs(n, i).evalf(), eq2.subs(a, 64).subs(theta, 10).subs(s, 25).subs(n, i).evalf()
print "]"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Print the docstring
Step2: So as you can see, Nastran has many matrix formats.
Step3: There are more ways to read an OP4
|
<ASSISTANT_TASK:>
Python Code:
import os
from pyNastran.utils import print_bad_path
from pyNastran.op4.op4 import read_op4
import numpy as np
from numpy import float32, float64, int32, int64, product
# decrease output precision
np.set_printoptions(precision=3, threshold=20)
help(read_op4)
# read the op4, will pop open a dialog box
#matrices = read_op4()
op4_filename = r'C:\Users\Steve\Desktop\ISat_Launch_Sm_4pt.op4'
assert os.path.exists(op4_filename), print_bad_path(op4_filename)
#specify the file
matrices = read_op4(op4_filename)
# only 1 matrix
matrices = read_op4(op4_filename, matrix_names='FLAMA', debug=False)
# 1 or more matrices
matrices = read_op4(op4_filename, matrix_names=['FLAMA','UGEXT'])
# extract a matrix
form, flama = matrices['FLAMA']
print("form = %s" % form)
print("type = %s" % type(flama))
print("keys = %s" % matrices.keys())
print(matrices.keys())
form_flama, flama = matrices['FLAMA']
print("shape = %s" % str(flama.shape))
print("flamat nvals = %s" % flama.size)
form_ugext, ugext = matrices['UGEXT']
print("form_ugext=%s type=%s" % (form_ugext, type(ugext[0,0])))
#print "ugext", ugext
print("ugext.shape = %s" % str(ugext.shape))
print("ugext nvals = %s" % ugext.size)
print(ugext[:,:])
#print(flama)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Binary representation of an integer
Step2: with bin()
Step3: Binary representation of a float
Step4: Representation errors
|
<ASSISTANT_TASK:>
Python Code:
import sys
import math
import ctypes
import struct
sys.float_info
sys.int_info
format(0.125, '.12g')
format(0.101, '.12g')
0.3
0.1
0.1 + 0.1 + 0.1 == 0.3
f"{0x1234:b}"
a = 1
"{0:b}".format(a)
bin(a)
sys.float_info
a = 1.0
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
a = 2.0
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
a = 4.0
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
a = 8.0
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
a = 2.0**500
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
a = -2.0
a
bin(ctypes.c_uint.from_buffer(ctypes.c_float(a)).value)
bin(struct.unpack('!i',struct.pack('!f',a))[0])
sum([0.1] * 10) == 1.0
math.fsum([0.1] * 10) == 1.0
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create time-series features and determine label based on market movement
Step2: Label engineering
Step3: TODO
Step5: Add time series features
Step6: Compute percentage change, then self join with prices from S&P index.
Step7: Compute normalized price change (shown above).
Step8: Verify results
Step9: <h3><font color="#4885ed">Join with S&P 500 table and Create labels
Step10: TODO
Step11: The dataset is still quite large and the majority of the days the market STAYs. Let's focus our analysis on dates where earnings per share (EPS) information is released by the companies. The EPS data has 3 key columns surprise, reported_EPS, and consensus_EPS
Step12: The surprise column indicates the difference between the expected (consensus expected eps by analysts) and the reported eps. We can join this table with our derived table to focus our analysis during earnings periods
Step14: Feature exploration
Step15: TODO Use the get_aggregate_stats from above to visualize the normalized_change column.
Step16: Let's look at results by day-of-week, month, etc.
|
<ASSISTANT_TASK:>
Python Code:
PROJECT = 'your-gcp-project' # Replace with your project ID.
import pandas as pd
from google.cloud import bigquery
from IPython.core.magic import register_cell_magic
from IPython import get_ipython
bq = bigquery.Client(project = PROJECT)
# Allow you to easily have Python variables in SQL query.
@register_cell_magic('with_globals')
def with_globals(line, cell):
contents = cell.format(**globals())
if 'print' in line:
print(contents)
get_ipython().run_cell(contents)
def create_dataset():
dataset = bigquery.Dataset(bq.dataset("stock_market"))
try:
bq.create_dataset(dataset) # Will fail if dataset already exists.
print("Dataset created")
except:
print("Dataset already exists")
create_dataset()
%%with_globals
%%bigquery --project {PROJECT}
--# TODO
%%with_globals
%%bigquery --project {PROJECT}
--# TODO
%%with_globals print
%%bigquery df --project {PROJECT}
CREATE OR REPLACE TABLE `stock_market.price_history_delta`
AS
(
WITH shifted_price AS
(
SELECT *,
(LAG(close, 1) OVER (PARTITION BY symbol order by Date DESC)) AS tomorrow_close
FROM `stock_src.price_history`
WHERE Close > 0
)
SELECT a.*,
(tomorrow_close - Close) AS tomo_close_m_close
FROM shifted_price a
)
%%with_globals
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.price_history_delta
ORDER by Date
LIMIT 100
%%with_globals print
%%bigquery --project {PROJECT}
SELECT
--# TODO: verify the stock market is going up -- on average.
FROM
stock_market.price_history_delta
def get_window_fxn(agg_fxn, n_days):
Generate a time-series feature.
E.g., Compute the average of the price over the past 5 days.
SCALE_VALUE = 'close'
sql = '''
({agg_fxn}(close) OVER (PARTITION BY (# TODO)
ORDER BY (# TODO)
ROWS BETWEEN {n_days} (# TODO)))/{scale}
AS close_{agg_fxn}_prior_{n_days}_days'''.format(
agg_fxn=agg_fxn, n_days=n_days, scale=SCALE_VALUE)
return sql
WEEK = 5
MONTH = 20
YEAR = 52*5
agg_funcs = ('MIN', 'MAX', 'AVG', 'STDDEV')
lookbacks = (WEEK, MONTH, YEAR)
sqls = []
for fxn in agg_funcs:
for lookback in lookbacks:
sqls.append(get_window_fxn(fxn, lookback))
time_series_features_sql = ','.join(sqls) # SQL string.
def preview_query():
print(time_series_features_sql[0:1000])
preview_query()
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.price_features_delta
AS
SELECT *
FROM
(SELECT *,
{time_series_features_sql},
-- Also get the raw time-series values; will be useful for the RNN model.
(ARRAY_AGG(close) OVER (PARTITION BY symbol
ORDER BY Date
ROWS BETWEEN 260 PRECEDING AND 1 PRECEDING))
AS close_values_prior_260,
ROW_NUMBER() OVER (PARTITION BY symbol ORDER BY Date) AS days_on_market
FROM stock_market.price_history_delta)
WHERE days_on_market > {YEAR}
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.price_features_delta
ORDER BY symbol, Date
LIMIT 10
scaled_change = (50.59 - 50.69) / 50.69
scaled_s_p = (939.38 - 930.09) / 930.09
normalized_change = scaled_change - scaled_s_p
print('''
scaled change: {:2.3f}
scaled_s_p: {:2.3f}
normalized_change: {:2.3f}
'''.format(scaled_change, scaled_s_p, normalized_change))
snp500_index = 'gspc'
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.price_features_norm_per_change
AS
WITH
all_percent_changes AS
(
SELECT *, (tomo_close_m_close / Close) AS scaled_change
FROM `stock_market.price_features_delta`
),
s_p_changes AS
(SELECT
scaled_change AS s_p_scaled_change,
date
FROM all_percent_changes
WHERE symbol="{snp500_index}")
SELECT all_percent_changes.*,
s_p_scaled_change,
(# TODO) AS normalized_change
FROM
all_percent_changes LEFT JOIN s_p_changes
--# Add S&P change to all rows
ON all_percent_changes.date = s_p_changes.date
%%with_globals print
%%bigquery df --project {PROJECT}
SELECT *
FROM stock_market.price_features_norm_per_change
LIMIT 10
df.head()
down_thresh = -0.01
up_thresh = 0.01
%%with_globals print
%%bigquery df --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.percent_change_sp500
AS
SELECT *,
CASE
--# TODO
END AS direction
FROM stock_market.price_features_norm_per_change features
INNER JOIN `stock_src.snp500`
USING (symbol)
%%with_globals print
%%bigquery --project {PROJECT}
SELECT direction, COUNT(*) as cnt
FROM stock_market.percent_change_sp500
GROUP BY direction
%%with_globals print
%%bigquery df --project {PROJECT}
SELECT *
FROM stock_market.percent_change_sp500
LIMIT 20
df.columns
%%with_globals print
%%bigquery --project {PROJECT}
SELECT *
FROM `stock_src.eps`
LIMIT 10
%%with_globals print
%%bigquery --project {PROJECT}
CREATE OR REPLACE TABLE stock_market.eps_percent_change_sp500
AS
SELECT a.*, b.consensus_EPS, b.reported_EPS, b.surprise
FROM stock_market.percent_change_sp500 a
INNER JOIN `stock_src.eps` b
ON a.Date = b.date
AND a.symbol = b.symbol
%%with_globals print
%%bigquery --project {PROJECT}
SELECT *
FROM stock_market.eps_percent_change_sp500
LIMIT 20
%%with_globals print
%%bigquery --project {PROJECT}
SELECT direction, COUNT(*) as cnt
FROM stock_market.eps_percent_change_sp500
GROUP BY direction
def get_aggregate_stats(field, round_digit=2):
Run SELECT ... GROUP BY field, rounding to nearest digit.
df = bq.query('''
SELECT {field}, COUNT(*) as cnt
FROM
(SELECT ROUND({field}, {round_digit}) AS {field}
FROM stock_market.eps_percent_change_sp500) rounded_field
GROUP BY {field}
ORDER BY {field}'''.format(field=field,
round_digit=round_digit,
PROJECT=PROJECT)).to_dataframe()
return df.dropna()
field = 'close_AVG_prior_260_days'
CLIP_MIN, CLIP_MAX = 0.1, 4.
df = get_aggregate_stats(field)
values = df[field].clip(CLIP_MIN, CLIP_MAX)
counts = 100 * df['cnt'] / df['cnt'].sum() # Percentage.
ax = values.hist(weights=counts, bins=30, figsize=(10, 5))
ax.set(xlabel=field, ylabel="%");
field = 'normalized_change'
# TODO
VALID_GROUPBY_KEYS = ('DAYOFWEEK', 'DAY', 'DAYOFYEAR',
'WEEK', 'MONTH', 'QUARTER', 'YEAR')
DOW_MAPPING = {1: 'Sun', 2: 'Mon', 3: 'Tues', 4: 'Wed',
5: 'Thur', 6: 'Fri', 7: 'Sun'}
def groupby_datetime(groupby_key, field):
if groupby_key not in VALID_GROUPBY_KEYS:
raise Exception('Please use a valid groupby_key.')
sql = '''
SELECT {groupby_key}, AVG({field}) as avg_{field}
FROM
(SELECT {field},
EXTRACT({groupby_key} FROM date) AS {groupby_key}
FROM stock_market.eps_percent_change_sp500) foo
GROUP BY {groupby_key}
ORDER BY {groupby_key} DESC'''.format(groupby_key=groupby_key,
field=field,
PROJECT=PROJECT)
print(sql)
df = bq.query(sql).to_dataframe()
if groupby_key == 'DAYOFWEEK':
df.DAYOFWEEK = df.DAYOFWEEK.map(DOW_MAPPING)
return df.set_index(groupby_key).dropna()
field = 'normalized_change'
df = groupby_datetime('DAYOFWEEK', field)
ax = df.plot(kind='barh', color='orange', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
field = 'close'
df = groupby_datetime('DAYOFWEEK', field)
ax = df.plot(kind='barh', color='orange', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
field = 'normalized_change'
df = groupby_datetime('MONTH', field)
ax = df.plot(kind='barh', color='blue', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
field = 'normalized_change'
df = groupby_datetime('QUARTER', field)
ax = df.plot(kind='barh', color='green', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
field = 'close'
df = groupby_datetime('YEAR', field)
ax = df.plot(kind='line', color='purple', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
field = 'normalized_change'
df = groupby_datetime('YEAR', field)
ax = df.plot(kind='line', color='purple', alpha=0.7)
ax.grid(which='major', axis='y', linewidth=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph regularization for Twitter rumour veracity classification using natural graphs
Step2: Dataset description
Step4: Convert rumor annotations to labels
Step7: Load annotated rumours from dataset
Step9: Load tokenizers, model architectures and pre-trained weights
Step14: Tokenization and model inference functions
Step15: Testing tokenization and model inference with an example
Step17: Hyperparameters for the classifier to be trained
Step21: Process dataset in batch or single inference mode
Step22: Load and inspect dataset
Step23: Compute and store textual embeddings
Step25: Create train and test datasets
Step28: Create train and test tf.data.TFRecordDataset
Step29: Inspecting train dataset
Step30: Inspecting test dataset
Step32: Multi-layer perceptron classification model
Step34: Functional MLP model
Step37: tf.Keras.Model subclass MLP
Step38: Base MLP model
Step39: Compile and train the base MLP model
Step41: Evaluate base model on test dataset
Step42: Create, compile and train MLP model with graph regularization
Step43: Evaluate MLP model with graph regularization on test dataset
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --quiet neural-structured-learning
!pip install --quiet transformers
!pip install --quiet tokenizers
!pip install --quiet sentencepiece
import os
import json
import random
import pprint
import numpy as np
import neural_structured_learning as nsl
import tensorflow as tf
import tensorflow_datasets as tfds
import sentencepiece
from tokenizers import BertWordPieceTokenizer
from transformers import BertConfig
from transformers import BertTokenizer, TFBertModel
from transformers import XLNetTokenizer, TFXLNetModel
from transformers import RobertaTokenizer, TFRobertaModel
from transformers import AlbertTokenizer, TFAlbertModel
from transformers import T5Tokenizer, TFT5Model
from transformers import ElectraTokenizer, TFElectraModel
# Resets notebook state
tf.keras.backend.clear_session()
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print(
"GPU is",
"available" if tf.config.list_physical_devices("GPU") else "NOT AVAILABLE")
!mkdir /tmp/PHEME
!wget --quiet -P /tmp/PHEME https://ndownloader.figshare.com/files/11767817
!ls -l /tmp/PHEME
!tar -C /tmp/PHEME -xvzf /tmp/PHEME/11767817
!ls /tmp/PHEME/all-rnr-annotated-threads/
Python 3 function to convert rumour annotations into True, False, Unverified
def convert_annotations(annotation, string = True):
if 'misinformation' in annotation.keys() and 'true'in annotation.keys():
if int(annotation['misinformation'])==0 and int(annotation['true'])==0:
if string:
label = "unverified"
else:
label = 2
elif int(annotation['misinformation'])==0 and int(annotation['true'])==1 :
if string:
label = "true"
else:
label = 1
elif int(annotation['misinformation'])==1 and int(annotation['true'])==0 :
if string:
label = "false"
else:
label = 0
elif int(annotation['misinformation'])==1 and int(annotation['true'])==1:
print ("OMG! They both are 1!")
print(annotation['misinformation'])
print(annotation['true'])
label = None
elif 'misinformation' in annotation.keys() and 'true' not in annotation.keys():
# all instances have misinfo label but don't have true label
if int(annotation['misinformation'])==0:
if string:
label = "unverified"
else:
label = 2
elif int(annotation['misinformation'])==1:
if string:
label = "false"
else:
label = 0
elif 'true' in annotation.keys() and 'misinformation' not in annotation.keys():
print ('Has true not misinformation')
label = None
else:
print('No annotations')
label = None
return label
def load_pheme_data(parent_dir):
Loads and returns tweets and their replies from input `parent_dir`.
Args:
parent_dir: A string with full path to directory where the data lies in.
Returns:
A tuple T (annotation, structure, source_tweets, reactions) such that:
Each of them is a dictionary directly read from the underlying JSON
structure provided in the PHEME dataset.
with open(parent_dir + '/annotation.json') as f:
annotation = json.load(f)
with open(parent_dir + '/structure.json') as f:
structure = json.load(f)
source_tweets = {}
for f in os.scandir(parent_dir + '/source-tweets'):
if f.name[0] != '.':
with open(f.path) as json_file:
source_tweets[f.name.split('.json')[0]] = json.load(json_file)
reactions = {}
for f in os.scandir(parent_dir + '/reactions'):
if f.name[0] != '.':
with open(f.path) as json_file:
reactions[f.name.split('.json')[0]] = json.load(json_file)
return annotation, structure, source_tweets, reactions
def load_labels_and_texts(topics):
Reads verified rumour tweets, replies and labels for input `topics`.
Non-rumours or unverified tweet threads aren't included in returned dataset.
Args:
topics: A List of strings, each containing the full path to a topic
to be read.
Returns:
A List of dictionaries such that each entry E contains:
E['label']: (integer) the rumour veracity annotation.
E['source_text']: (string) the source tweet text.
E['reactions']: (List of strings) the texts from the tweet replies.
labels_and_texts = []
for t in topics:
rumours = [
f.path for f in os.scandir(t + '/rumours') if f.is_dir()
]
for r in rumours:
annotation, structure, source_tweets, reactions = load_pheme_data(r)
for source_tweet in source_tweets.values():
labels_and_texts.append({
'label' : convert_annotations(annotation, string = False),
'source_text' : source_tweet['text'],
'reactions' : [reaction['text'] for reaction in reactions.values()]
})
print('Read', len(labels_and_texts), 'annotated rumour tweet threads')
verified_labels_and_texts = list(
filter(lambda d : d['label'] != 2, labels_and_texts))
print('Returning', len(verified_labels_and_texts),
'verified rumour tweet threads')
return verified_labels_and_texts
topics = [
f.path
for f in os.scandir('/tmp/PHEME/all-rnr-annotated-threads/')
if f.is_dir()
]
topics
configuration = BertConfig()
Uncomment any tokenizer-model pair for experimentation with other models.
TOKENIZERS = {
# 'bert_base' : BertTokenizer.from_pretrained('bert-base-uncased'),
# 'bert_large' : BertTokenizer.from_pretrained('bert-large-uncased'),
# 'xlnet_base' : XLNetTokenizer.from_pretrained('xlnet-base-cased'),
# 'xlnet_large' : XLNetTokenizer.from_pretrained('xlnet-large-cased'),
# 'roberta_base' : RobertaTokenizer.from_pretrained('roberta-base'),
# 'roberta_large' : RobertaTokenizer.from_pretrained('roberta-large'),
'albert_base' : AlbertTokenizer.from_pretrained('albert-base-v2'),
# 'albert_large' : AlbertTokenizer.from_pretrained('albert-large-v2'),
# 'albert_xlarge' : AlbertTokenizer.from_pretrained('albert-xlarge-v2'),
# 'albert_xxlarge' : AlbertTokenizer.from_pretrained('albert-xxlarge-v2'),
# 't5_small' : T5Tokenizer.from_pretrained('t5-small'),
# 't5_base' : T5Tokenizer.from_pretrained('t5-base'),
# 't5_large' : T5Tokenizer.from_pretrained('t5-large'),
# 'electra_small' : ElectraTokenizer.from_pretrained('google/electra-small-discriminator'),
# 'electra_large' : ElectraTokenizer.from_pretrained('google/electra-large-discriminator'),
}
PRETRAINED_MODELS = {
# 'bert_base' : TFBertModel.from_pretrained('bert-base-uncased'),
# 'bert_large' : TFBertModel.from_pretrained('bert-large-uncased'),
# 'xlnet_base' : TFXLNetModel.from_pretrained('xlnet-base-cased'),
# 'xlnet_large' : TFXLNetModel.from_pretrained('xlnet-large-cased'),
# 'roberta_base' : TFRobertaModel.from_pretrained('roberta-base'),
# 'roberta_large' : TFRobertaModel.from_pretrained('roberta-large'),
'albert_base' : TFAlbertModel.from_pretrained('albert-base-v2'),
# 'albert_large' : TFAlbertModel.from_pretrained('albert-large-v2'),
# 'albert_xlarge' : TFAlbertModel.from_pretrained('albert-xlarge-v2'),
# 'albert_xxlarge' : TFAlbertModel.from_pretrained('albert-xxlarge-v2'),
# 't5_small' : TFT5Model.from_pretrained('t5-small'),
# 't5_base' : TFT5Model.from_pretrained('t5-base'),
# 't5_large' : TFT5Model.from_pretrained('t5-large'),
# 'electra_small' : TFElectraForPreTraining.from_pretrained('google/electra-small-discriminator'),
# 'electra_large' : TFElectraForPreTraining.from_pretrained('google/electra-large-discriminator')
}
# Given that tweets are short, use a small value for reduced inference time
TOKENIZER_MAX_SEQ_LENGTH = 64
def bert_embedding_model_inference(model, tokenizer, text):
Tokenizes and computes BERT `model` inference for input `text`.
input_ids = tf.constant(
tokenizer.encode(text, add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
# The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0]
cls_embedding = last_hidden_states[0][0]
return cls_embedding
def albert_embedding_model_inference(model, tokenizer, text):
Tokenizes and computes ALBERT `model` inference for input `text`.
encoded_text = tf.constant(tokenizer.encode(text))[None, :]
outputs = model(encoded_text)
# The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0]
cls_embedding = last_hidden_states[0][0]
return cls_embedding
def albert_embedding_batch_model_inference(model, tokenizer, text_batch):
Tokenizes and computes ALBERT `model` inference for input `text_batch`.
encoded_text = [
tf.constant(tokenizer.encode(
t, max_length=TOKENIZER_MAX_SEQ_LENGTH, pad_to_max_length=True))
for t in text_batch
]
encoded_batch = tf.stack(encoded_text)
outputs = model(encoded_text)
cls_embeddings = []
for last_hidden_state in outputs[0]:
cls_embeddings.append(last_hidden_state[0])
return cls_embeddings
def t5_embedding_model_inference(model, tokenizer, text):
Tokenizes and computes T5 `model` inference for input `text`.
inputs = tokenizer.encode(text, return_tensors="tf") # Batch size 1
outputs = model(inputs, decoder_input_ids=inputs)
# The last hidden-state is the first element of the output tuple
last_hidden_states = outputs[0]
cls_embedding = last_hidden_states[0][0]
return cls_embedding
'''
More model inference functions can be added, following documentation on
https://huggingface.co/transformers/
'''
INFERENCE_MODELS = {
'bert' : bert_embedding_model_inference,
'albert' : albert_embedding_model_inference,
'albert_batch' : albert_embedding_batch_model_inference,
't5' : t5_embedding_model_inference
}
PRETRAINED_MODEL = 'albert_base'
INFERENCE = 'albert'
test_inference = INFERENCE_MODELS[INFERENCE](
PRETRAINED_MODELS[PRETRAINED_MODEL],
TOKENIZERS[PRETRAINED_MODEL],
"Why is the sky blue"
)
print('Example embedding', test_inference)
embedding_dim = test_inference.shape[0]
class HParams(object):
Hyperparameters used for training.
def __init__(self):
### dataset parameters
self.num_classes = 2
self.embedding_dim = embedding_dim
### neural graph learning parameters
self.distance_type = nsl.configs.DistanceType.L2
self.graph_regularization_multiplier = 0.1
self.num_neighbors = 5
### model architecture
self.num_fc_units = [64, 64]
### training parameters
self.train_epochs = 50
self.batch_size = 128
self.encoder_inference_batch_size = 32
self.dropout_rate = 0.2
### eval parameters
self.eval_steps = None # All instances in the test set are evaluated.
HPARAMS = HParams()
def add_batch_embeddings(labels_and_texts):
Splits `labels_and_texts` into batches and performs tokenization and
converts tweet text into its corresponding embedding.
Args:
labels_and_texts: A List of dictionaries such that each entry E contains:
E['label']: (integer) the rumour veracity annotation.
E['source_text']: (string) the source tweet text.
E['reactions']: (List of strings) the texts from the tweet replies.
Each entry E from labels_and_texts is updated, adding the following key,
value pairs:
E['source_embedding']: (Tensor of floats) embeddings for E['source_text']
E['reaction_embedding']: (List of float Tensors) embeddings for
E['reactions'], up to HPARAMS.num_neighbors and
in the corresponding E['reactions'] order.
inputs = []
print('Accumulating inputs')
for e in labels_and_texts:
inputs.append(e['source_text'])
for r in e['reactions'][:HPARAMS.num_neighbors]:
# Alternative ways to select neighbors within a tweet thread can be used.
inputs.append(r)
def generate_batches(inputs, batch_size):
Splits `inputs` list into chunks of (up to) `batch_size` length.
for i in range(0, len(inputs), batch_size):
yield inputs[i: i + batch_size]
inferences = []
for i, batch in enumerate(generate_batches(
inputs, HPARAMS.encoder_inference_batch_size)):
print('Running model inference for batch', i)
batch_inference = INFERENCE_MODELS[INFERENCE](
PRETRAINED_MODELS[PRETRAINED_MODEL],
TOKENIZERS[PRETRAINED_MODEL],
batch)
for inference in batch_inference:
inferences.append(inference)
i = 0
for e in labels_and_texts:
e['source_embedding'] = inferences[i]
i += 1
e['reaction_embedding'] = []
for r in e['reactions'][:HPARAMS.num_neighbors]:
e['reaction_embedding'].append(inferences[i])
i += 1
def add_embeddings(labels_and_texts):
Performs tokenization and model inference for each element of
labels_and_texts, updating it with computed embeddings.
Args:
labels_and_texts: A List of dictionaries such that each entry E contains:
E['label']: (integer) the rumour veracity annotation.
E['source_text']: (string) the source tweet text.
E['reactions']: (List of strings) the texts from the tweet replies.
Each entry E from labels_and_texts is updated, adding the following key,
value pairs:
E['source_embedding']: (Tensor of floats) embeddings for E['source_text']
E['reaction_embedding']: (List of float Tensors) embeddings for
E['reactions'], up to HPARAMS.num_neighbors and
in the corresponding E['reactions'] order.
for index, label_and_texts in enumerate(labels_and_texts):
if index % 100 == 0:
print('Computing embeddings for item', index)
label_and_texts['source_embedding'] = INFERENCE_MODELS[INFERENCE](
PRETRAINED_MODELS[PRETRAINED_MODEL],
TOKENIZERS[PRETRAINED_MODEL],
label_and_texts['source_text'])
label_and_texts['reaction_embedding'] = []
for r in label_and_texts['reactions'][:HPARAMS.num_neighbors]:
# Alternative ways to select neighbors within a tweet thread can be used.
label_and_texts['reaction_embedding'].append(INFERENCE_MODELS[INFERENCE](
PRETRAINED_MODELS[PRETRAINED_MODEL],
TOKENIZERS[PRETRAINED_MODEL],
r))
labels_and_texts = load_labels_and_texts(topics)
for e in random.sample(labels_and_texts, 3):
pprint.pprint(e)
if 'batch' in INFERENCE:
add_batch_embeddings(labels_and_texts)
else:
add_embeddings(labels_and_texts)
# Alternative ways to split include a split by time or by news event.
random.shuffle(labels_and_texts)
train_size = int(0.8 * len(labels_and_texts))
TRAIN_DATA = labels_and_texts[:train_size]
TEST_DATA = labels_and_texts[train_size:]
# Constants used to identify neighbor features in the input.
NBR_FEATURE_PREFIX = 'NL_nbr_'
NBR_WEIGHT_SUFFIX = '_weight'
def create_np_tensors_from_datum(datum, propagate_label=True):
Creates a node and neighbor numpy tensors given input datum.
Args:
datum: A dictionary with node and neighbor features and annotation label.
propagate_label: Boolean indicating if we labels should be propagated to
neighbors.
Returns:
A tuple T (node_tensor, neighbor_tensors) such that:
T[0]: a dictionary containing node embeddings and label
T[1]: a List of dictionaries, each containing embeddings for a
neighbor and, if propagate_label is true, the corresponding label.
label_tensor = datum['label']
def get_float32_tensor(d):
np_array = np.array(d, dtype='f')
return np_array
node_tensor = {
'embedding' : get_float32_tensor(datum['source_embedding']),
'label' : label_tensor
}
neighbor_tensors = []
for re in datum['reaction_embedding']:
tensor = {
'embedding' : get_float32_tensor(re),
}
if propagate_label:
tensor['label'] = label_tensor
neighbor_tensors.append(dict(tensor))
return node_tensor, neighbor_tensors
TRAIN_TENSORS = [
create_np_tensors_from_datum(d) for d in TRAIN_DATA
]
TEST_TENSORS = [
create_np_tensors_from_datum(d) for d in TEST_DATA
]
TRAIN_TENSORS[0][0], TEST_TENSORS[0][0]
def make_dataset(tf_features, training=False):
Creates a `tf.data.TFRecordDataset`.
Args:
tf_features: List of (node_tensor, neighbor_tensors) tuples.
training: Boolean indicating if we are in training mode.
Returns:
An instance of `tf.data.TFRecordDataset` containing the `tf.train.Example`
objects.
def get_tf_examples_with_nsl_signals(node_feature, neighbor_features):
Merges `neighbor_features` and `node_feature`.
Args:
node_feature: A dictionary of `tf.train.Feature`.
neighbor_features: A list of `tf.train.Feature` dictionaries.
Returns:
A pair whose first value is a dictionary containing relevant features
and whose second value contains the ground truth label.
feature_dict = dict(node_feature)
# We also extract corresponding neighbor features in a similar manner to
# the features above during training.
if training:
for i in range(HPARAMS.num_neighbors):
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, i, 'embedding')
nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, i,
NBR_WEIGHT_SUFFIX)
if i < len(neighbor_features):
nf = neighbor_features[i]
feature_dict[nbr_feature_key] = nf['embedding']
feature_dict[nbr_weight_key] = 1.0
else:
feature_dict[nbr_feature_key] = np.zeros(
HPARAMS.embedding_dim, dtype='f')
feature_dict[nbr_weight_key] = 0.0
label = feature_dict.pop('label')
return feature_dict, label
print('Adding NSL features for entries')
tensors_with_nsl = {}
labels = []
for (node, neighbors) in tf_features:
feature_dict, label = get_tf_examples_with_nsl_signals(node, neighbors)
for k,v in feature_dict.items():
if k not in tensors_with_nsl:
tensors_with_nsl[k] = []
tensors_with_nsl[k].append(v)
labels.append(label)
print('Creating tf.data.Dataset from tensors')
dataset = tf.data.Dataset.from_tensor_slices((tensors_with_nsl, labels))
dataset = dataset.batch(HPARAMS.batch_size)
return dataset
train_dataset = make_dataset(TRAIN_TENSORS, training=True)
test_dataset = make_dataset(TEST_TENSORS)
for feature_batch, label_batch in train_dataset.take(1):
print('Feature list:', list(feature_batch.keys()))
print('Batch of inputs:', feature_batch['embedding'])
nbr_feature_key = '{}{}_{}'.format(NBR_FEATURE_PREFIX, 0, 'embedding')
nbr_weight_key = '{}{}{}'.format(NBR_FEATURE_PREFIX, 0, NBR_WEIGHT_SUFFIX)
print('Batch of neighbor inputs:', feature_batch[nbr_feature_key])
print('Batch of neighbor weights:',
tf.reshape(feature_batch[nbr_weight_key], [-1]))
print('Batch of labels:', label_batch)
for feature_batch, label_batch in test_dataset.take(1):
print('Feature list:', list(feature_batch.keys()))
print('Batch of inputs:', feature_batch['embedding'])
print('Batch of labels:', label_batch)
def make_mlp_sequential_model(hparams):
Creates a sequential multi-layer perceptron model.
model = tf.keras.Sequential()
model.add(
tf.keras.layers.InputLayer(
input_shape=(hparams.embedding_dim,), name='embedding'))
for num_units in hparams.num_fc_units:
model.add(tf.keras.layers.Dense(num_units, activation='relu'))
# For sequential models, by default, Keras ensures that the 'dropout' layer
# is invoked only during training.
model.add(tf.keras.layers.Dropout(hparams.dropout_rate))
model.add(tf.keras.layers.Dense(hparams.num_classes, activation='softmax'))
return model
def make_mlp_functional_model(hparams):
Creates a functional API-based multi-layer perceptron model.
inputs = tf.keras.Input(
shape=(hparams.embedding_dim,), dtype='int64', name='embedding')
cur_layer = inputs
for num_units in hparams.num_fc_units:
cur_layer = tf.keras.layers.Dense(num_units, activation='relu')(cur_layer)
# For functional models, by default, Keras ensures that the 'dropout' layer
# is invoked only during training.
cur_layer = tf.keras.layers.Dropout(hparams.dropout_rate)(cur_layer)
outputs = tf.keras.layers.Dense(
hparams.num_classes, activation='softmax')(
cur_layer)
model = tf.keras.Model(inputs, outputs=outputs)
return model
def make_mlp_subclass_model(hparams):
Creates a multi-layer perceptron subclass model in Keras.
class MLP(tf.keras.Model):
Subclass model defining a multi-layer perceptron.
def __init__(self):
super(MLP, self).__init__()
self.dense_layers = [
tf.keras.layers.Dense(num_units, activation='relu')
for num_units in hparams.num_fc_units
]
self.dropout_layer = tf.keras.layers.Dropout(hparams.dropout_rate)
self.output_layer = tf.keras.layers.Dense(
hparams.num_classes, activation='softmax')
def call(self, inputs, training=False):
cur_layer = inputs['embedding']
for dense_layer in self.dense_layers:
cur_layer = dense_layer(cur_layer)
cur_layer = self.dropout_layer(cur_layer, training=training)
outputs = self.output_layer(cur_layer)
return outputs
return MLP()
# Create a base MLP model using the functional API.
# Alternatively, you can also create a sequential or subclass base model using
# the make_mlp_sequential_model() or make_mlp_subclass_model() functions
# respectively, defined above. Note that if a subclass model is used, its
# summary cannot be generated until it is built.
base_model_tag, base_model = 'FUNCTIONAL', make_mlp_functional_model(HPARAMS)
base_model.summary()
base_model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
base_model.fit(train_dataset, epochs=HPARAMS.train_epochs, verbose=1)
# Helper function to print evaluation metrics.
def print_metrics(model_desc, eval_metrics):
Prints evaluation metrics.
Args:
model_desc: A description of the model.
eval_metrics: A dictionary mapping metric names to corresponding values. It
must contain the loss and accuracy metrics.
print('\n')
print('Eval accuracy for ', model_desc, ': ', eval_metrics['accuracy'])
print('Eval loss for ', model_desc, ': ', eval_metrics['loss'])
if 'graph_loss' in eval_metrics:
print('Eval graph loss for ', model_desc, ': ', eval_metrics['graph_loss'])
eval_results = dict(
zip(base_model.metrics_names,
base_model.evaluate(test_dataset, steps=HPARAMS.eval_steps)))
print_metrics('Base MLP model', eval_results)
# Build a new base MLP model.
base_reg_model_tag, base_reg_model = 'FUNCTIONAL', make_mlp_functional_model(
HPARAMS)
# Wrap the base MLP model with graph regularization.
graph_reg_config = nsl.configs.make_graph_reg_config(
max_neighbors=HPARAMS.num_neighbors,
multiplier=HPARAMS.graph_regularization_multiplier,
distance_type=HPARAMS.distance_type,
sum_over_axis=-1)
graph_reg_model = nsl.keras.GraphRegularization(base_reg_model,
graph_reg_config)
graph_reg_model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
graph_reg_model.fit(train_dataset, epochs=HPARAMS.train_epochs, verbose=1)
eval_results = dict(
zip(graph_reg_model.metrics_names,
graph_reg_model.evaluate(test_dataset, steps=HPARAMS.eval_steps)))
print_metrics('MLP + graph regularization', eval_results)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Интересные нам всплески потребления кончаются где-то на 10000-ной миллисекунде (их пять подряд, мы моргали лампочкой пять раз).
Step6: Функции для парсинга событий из лога и поиска точки синхронизации
|
<ASSISTANT_TASK:>
Python Code:
df_r1000 = df.groupby(df.index//1000).mean()
fig = sns.plt.figure(figsize=(16, 6))
ax = sns.plt.subplot()
df_r1000.plot(ax=ax)
fig = sns.plt.figure(figsize=(16, 6))
ax = sns.plt.subplot()
df_r1000[:12000].plot(ax=ax)
import numpy as np
import pandas as pd
from scipy import signal
from scipy import interpolate
from scipy.stats import pearsonr
import logging
log = logging.getLogger(__name__)
def torch_status(lines):
Parse torch statuses from lines
for line in lines:
if "newStatus=2" in line:
yield (
datetime.strptime(
line.split()[1], "%H:%M:%S.%f"),
1)
elif "newStatus=1" in line:
yield (
datetime.strptime(
line.split()[1], "%H:%M:%S.%f"),
0)
def parse_torch_events(filename, sps=1000):
Parse torch events from file, considering target sample rate.
Offset is the number of sample
log.info("Parsing torch events...")
with open(filename) as eventlog:
df = pd.DataFrame.from_records(
torch_status(eventlog), columns=["offset", "status"])
df["offset"] = df["offset"].map(
lambda x: int(np.round((x - df["offset"][0]).total_seconds() * sps)))
return df
def ref_signal(torch, trailing_zeros=1000):
Generate square reference signal with trailing zeroes
log.info("Generating ref signal...")
f = interpolate.interp1d(torch["offset"], torch["status"], kind="zero")
X = np.linspace(0, torch["offset"].values[-1], torch["offset"].values[-1])
return np.append(f(X), np.zeros(trailing_zeros))
def cross_correlate(sig, ref, first=30000):
Calculate cross-correlation with lag. Take only first n lags.
log.info("Calculating cross-correlation...")
lags = np.arange(len(sig) - len(ref))
if len(lags) > first:
lags = lags[:first]
return pd.DataFrame.from_records(
[pearsonr(sig[lag:lag+len(ref)], ref) for lag in lags],
columns=["corr", "p_value"])
def sync(sig, eventlog, sps=1000, trailing_zeros=1000, first=30000):
rs = ref_signal(
parse_torch_events(eventlog, sps=sps),
trailing_zeros=trailing_zeros)
cc = cross_correlate(sig, rs)
sync_point = np.argmax(cc["corr"])
if cc["p_value"][sync_point] > 0.05:
raise RuntimeError("P-value is too big: %d" % cc["p_value"][sync_point])
log.info(
"Pearson's coef: %d, p-value: %d",
cc["corr"][sync_point],
cc["p_value"][sync_point])
return sync_point
te = parse_torch_events("browser_download.log", sps=1000)
rs = ref_signal(te)
cc = cross_correlate(df_r1000[0], rs)
fig = sns.plt.figure(figsize=(16, 6))
ax = sns.plt.subplot()
sns.plt.plot(df_r1000[0][:20000], label="signal")
sns.plt.plot(cc["corr"][:20000]*1000 + 500, label="cross-correlation")
sns.plt.plot(np.append(np.zeros(sync_point), rs * 500 + 500), label="reference")
#sns.plt.plot(cc["p_value"][:20000]*1000, label="p-value")
sync_point = np.argmax(cc["corr"])
sns.plt.axvline(sync_point)
ax.legend()
fig = sns.plt.figure(figsize=(10, 6))
ax = sns.plt.subplot()
sns.plt.scatter(np.arange(0, 30, 2), np.zeros(15), label="Одно")
sns.plt.scatter(np.arange(1, 31, 2), np.zeros(15), label="Другое", color="red")
ax.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: В качестве градиентного спуска возьмём самый простой вариант реализации со скоростью обучения 1 и 100 итераций.
Step2: Получили точность 90,96% на отложенной выборке. Посмотрим, получится ли её улучшить позже.
Step3: Как видим на графике, точность (к тому же, и Q, так как мы штрафуем некорректные классификации) с отличными от 0 коэффициентами регуляризации лишь понижается, притом значительно. Вполне возможно, что для конкретной архитектуры и данных выигрыша стратегия использования регуляризации в целом не даёт.
Step4: Заметим, что по сравнению с однослойной моделью качество выросло значительно — примерно на 5,2 процента.
Step5: Видим, что точность сильно растёт при увеличении размера до $\approx 2^7$, однако затем она значительно не меняется, а время обучения растёт.
Step6: Явную зависимость от числа слоём мы вывести не можем, однако при 7 слоях достигается оптимальная точность. Стоит, однако, заметить, что при ином виде оптимизации график выглядит иначе
Step7: Получаем, что при увеличении числа слоёв точность снижается, особенно заметно снижение при 10 слоях (оверфит?).
Step8: Имеем точность хоть ненамного, но большую, чем у предыдущего решения (на 2,33% больше стандартного)
Step9: Выведем на экран обобщения наших чисел, которые получились в результате работы автоэнкодера, а затем проверим, насколько после полученного обобщения повысится качество классификации, используя поверх модели построенную ранее.
Step10: Результат даже ухудшился. Вывод можно сделать один
Step11: Воспользовались автоэнкодером иначе
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import Input
from keras.optimizers import SGD, Adam, RMSprop, Adadelta, Adam, Nadam
from keras.utils import np_utils
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# add column of ones for single layer network
X_train_single, X_test_single = np.hstack([np.ones((X_train.shape[0],1)),X_train]),np.hstack([np.ones((X_test.shape[0],1)),X_test])
def sm_single_layer_train_gd(w,X,Y, learn_rate, iter_num):
for i in range(iter_num):
prod=X@ w
prod-=np.amax(prod,axis=1,keepdims=True)
sm=np.exp(prod)/np.sum(np.exp(prod),axis=1,keepdims=True)
g=(X.T @ (sm-Y))/X.shape[0]
w -= learn_rate * g
return w
def predict(w,X):
prod=X @ w
prod-=np.amax(prod,axis=1,keepdims=True)
sm=np.exp(prod)/np.sum(np.exp(prod),axis=1,keepdims=True)
return np.argmax(sm,axis=1)
def calculate_acc(w, test_features, test_labels):
pred=predict(w,test_features)
wrong = np.sum(pred != test_labels)
return 1 - wrong / test_labels.shape[0]
w_start=np.zeros((X_train_single.shape[1],10))
w_res=sm_single_layer_train_gd(w_start,X_train_single,Y_train,1,100)
print(calculate_acc(w_res,X_test_single,y_test))
def sm_single_layer_train_gd_reg(w,X,Y, learn_rate,l1coeff,l2coeff, iter_num):
for i in range(iter_num):
prod=X@ w
prod-=np.amax(prod,axis=1,keepdims=True)
sm=np.exp(prod)/np.sum(np.exp(prod),axis=1,keepdims=True)
g=(X.T @ (sm-Y))/X.shape[0]+l1coeff*np.sign(w)+l2coeff*2*w
w -= learn_rate * g
return w
reg_coeffs=np.arange(0,1,5e-2)
xgrid, ygrid = np.meshgrid(reg_coeffs, reg_coeffs)
zgrid=np.empty_like(xgrid)
for i in range(len(reg_coeffs)):
for j in range(len(reg_coeffs)):
w_start=np.zeros((X_train_single.shape[1],10))
w_res=sm_single_layer_train_gd_reg(w_start,X_train_single,Y_train,1,reg_coeffs[i],reg_coeffs[j],10)
zgrid[i][j]=calculate_acc(w_res,X_test_single,y_test)
plt.figure()
plt.xlabel('L1 coeff')
plt.ylabel('L2 coeff')
cont=plt.contourf(xgrid, ygrid, zgrid,1000, cmap=plt.cm.jet)
cbar = plt.colorbar(cont)
cbar.ax.set_ylabel('Accuracy')
plt.show()
batch_size = 128
nb_epoch = 50
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
acc=[]
for layer_size in range(1,12):
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(2**layer_size, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
acc.append(score[1])
ax=plt.gca()
plt.plot(list((2**i for i in range (1,12))),acc)
ax.set_xscale("log",basex=2)
plt.show()
acc=[]
for i in range(10):
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
for layer in range(i):
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
acc.append(score[1])
plt.plot(list(range(1,11)),acc)
plt.show()
acc=[]
for i in range(10):
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
for layer in range(i):
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
acc.append(score[1])
plt.plot(list(range(1,11)),acc)
plt.show()
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(512))
model.add(Activation('tanh'))
model.add(Dropout(0.1))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', score[1])
np.random.seed(1337) # for reproducibility
encoding_dim = 32
input_img = Input(shape=(784,))
encoded = Dense(encoding_dim, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input=input_img, output=decoded)
encoder = Model(input=input_img, output=encoded)
encoded_input = Input(shape=(encoding_dim,))
decoder_layer = autoencoder.layers[-1]
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='mean_squared_error')
autoencoder.fit(X_train, X_train,nb_epoch=nb_epoch,batch_size=128,verbose=0,shuffle=True,validation_data=(X_test, X_test))
encoded_imgs = encoder.predict(X_train)
decoded_imgs = decoder.predict(encoded_imgs)
encoded_test = encoder.predict(X_test)
decoded_test = decoder.predict(encoded_test)
import matplotlib.pyplot as plt
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(X_train[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
np.random.seed(1337) # for reproducibility
model = Sequential()
model.add(Dense(512,input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(512))
model.add(Activation('tanh'))
model.add(Dropout(0.1))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=Adadelta(), metrics=['accuracy'])
history = model.fit(decoded_imgs, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=0, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', score[1])
layers = [
Dense(encoding_dim, input_shape=(784,)),
Activation('relu')
]
autoencoder = [Dense(784, input_shape=(encoding_dim,))]
model = Sequential(layers + autoencoder)
model.compile(loss='mean_squared_error',optimizer=Adadelta())
model.fit(X_train, X_train,batch_size=batch_size, nb_epoch=nb_epoch,verbose=0)
imgs = model.predict(X_train)
layers[0].trainable=False
layers[1].trainable=False
layers.append(Dense(512,input_shape=(encoding_dim,)))
layers.append(Activation('relu'))
layers.append(Dropout(0.2))
layers.append(Dense(512))
layers.append(Activation('relu'))
layers.append(Dense(512))
layers.append(Activation('tanh'))
layers.append(Dropout(0.1))
layers.append(Dense(10))
layers.append(Activation('softmax'))
model = Sequential(layers)
model.compile(loss='categorical_crossentropy',optimizer=Adadelta(),metrics=['accuracy'])
model.fit(X_train, Y_train,batch_size=batch_size,nb_epoch=nb_epoch, verbose=1,validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
import matplotlib.pyplot as plt
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
ax = plt.subplot(2, n, i + 1)
plt.imshow(X_train[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Demonstrations
Step2: Calculating BetaT for Mutliple Temperatures
Step3: Using the RhoT model
Step4: Calculating RhoT for Mutliple temperatures
Step5: Using the RhoTP model
Step6: Calculating RhoTP at Mutliple Pressures
Step7: RhoTPx model
Step8: Calculating RhoTP as a Function of Composition
|
<ASSISTANT_TASK:>
Python Code:
# import some tools to use in this example
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# import the model class
from auxi.tools.materialphysicalproperties.idealgas import BetaT
# create a model object
βT = BetaT()
# define the state of the gas
T = 500.0 # [K]
# calculate the gas density
β = βT(T=T)
print ("β =", β, βT.units)
# calculate the gas density
Ts = list(range(400, 1550, 50)) # [K]
β = [βT(T=T) for T in Ts]
# plot a graph
plt.plot(Ts, β, "bo", alpha = 0.5)
plt.xlabel('$T$ [K]')
plt.ylabel('$%s$ [%s]' % (βT.display_symbol, βT.units))
plt.show()
# import the molar mass function
from auxi.tools.chemistry.stoichiometry import molar_mass as mm
# import the model class
from auxi.tools.materialphysicalproperties.idealgas import RhoT
# create a model object
# Since the model only calculates as a function temperature, we need to specify
# pressure and average molar mass when we create it.
ρT = RhoT(molar_mass=mm('CO2'), P=101325.0)
# define the state of the gas
T = 500.0 # [K]
# calculate the gas density
ρ = ρT.calculate(T=T)
print(ρT.symbol, "=", ρ, ρT.units)
# calculate the gas density
Ts = list(range(400, 1550, 50)) # [K]
ρs = [ρT(T=T) for T in Ts]
# plot a graph
plt.plot(Ts, ρs, "bo", alpha = 0.7)
plt.xlabel('$T$ [K]')
plt.ylabel('$%s$ [%s]' % (ρT.display_symbol, ρT.units))
plt.show()
# import the model class
from auxi.tools.materialphysicalproperties.idealgas import RhoTP
# create a model object
# Since the model only calculates as a function of temperature and pressure,
# we need to specify an average molar mass when we create it.
ρTP = RhoTP(mm('CO2'))
# define the state of the gas
T = 500.0 # [K]
P = 101325.0 # [Pa]
# calculate the gas density
ρ = ρTP.calculate(T=T,P=P)
print(ρTP.symbol, "=", ρ, ρTP.units)
# define the state of the gas
T = 700.0 # [K]
Ps = np.linspace(0.5*101325, 5*101325) # [Pa]
# calculate the gas density
ρs = [ρTP(T=T, P=P) for P in Ps]
# plot a graph
plt.plot(Ps, ρs, "bo", alpha = 0.7)
plt.xlabel('$P$ [Pa]')
plt.ylabel('$%s$ [%s]' % (ρTP.display_symbol, ρTP.units))
plt.show()
# import the model class
from auxi.tools.materialphysicalproperties.idealgas import RhoTPx
# create a model object
ρTPx = RhoTPx()
# define the state of the gas
T = 700.0 # [K]
P = 101325.0 # [Pa]
x = {'H2':0.5, 'Ar':0.5} # [mole fraction]
# calculate the gas density
ρ = ρTPx(T=700, P=101000, x=x)
print(ρTPx.symbol, "=", ρ, ρTPx.units)
# define the state of the gas
T = 700.0 # [K]
P = 101325.0 # [Pa]
xs_h2 = np.arange(0,1.1,0.1) # [mole fraction H2]
# calculate density as a function of composition for a binary Ar-H2 gas mixture
ρs = [ρTPx(T=700, P=101325 ,x={'Ar':1-x, 'H2':x}) for x in xs_h2]
# plot a graph
plt.plot(xs_h2, ρs, "bo", alpha = 0.7)
plt.xlim((0,1))
plt.xlabel('$x_{H_2}$ [mol]')
plt.ylabel('$%s$ [%s]' % (ρTPx.display_symbol, ρTPx.units))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we plot a single example of both classes, to show what the data looks like. First the pulsar example.
Step2: It is clear that the peak is not in the centre. For most examples it is, but not for all. How about for the non-pulsar examples?
Step4: The non-pulsar example doesn't appear to be correctly centred either. So we centre the data using a simple function. We define this function below
Step5: Now we execute this centering function.
Step7: Now the data is correctly loaded and centred, we can move on.
Step8: It's clear that the function is producing values very close to those expected from the theory. It is also clear that our function is giving the same answers to the numpy function. So it appears to be working well. Now for another test, this time on the uniform distribution.
Step9: The data looks good, so we can continue. Next we split the data into test and training samples. This is important so we can build and test a machine learning classifier on the data. To do this, we use functions built in to the scikit-learn machine learning library.
Step10: Now that we have generated some example data, lets see what happens when we build a classifier to operate on this data. We will use a simple Bayesian classifier for this data.
Step11: Now lets see what happens as the data distributions diverge, causing i.i.d violations. Here we shift the target class distributions only, in some new test data. In particular we change only the mean of the target class feature distributions in this data. This code can take around 30 seconds to execute.
Step12: The plot above shows how a change in the mean of the target class feature distributions (for $f^{1}{i}, f^{2}{i}, f^{3}_{i}$), alters classification performance. Performance can drop below 50%, which is no better then random guessing! This is clearly not good. But what happens if both classes experience change in their feature distributions? Suppose the non-target class now experiences change in $\sigma$ in a new independent sample.
Step14: As you can see, the result is even more stark, with accuracy being heavily impacted as the i.i.d assumption is violated. So you may wonder is this really a significant problem? Whilst what we have shown here is somewhat contrived, the effects are real. The i.i.d assumption is often violated in real-world problems.
Step15: Next we observe the performance when the data isn't scaled differently. The following cell takes approximately 20-30 seconds to execute.
Step16: Now we build and test the classifier.
Step17: We can see that when the data belongs to the same distribution/data ranges, even a simple classifier can do well using only 4 features. Now we use the scale function to convert some of the data into different data ranges. We then re-run this experiment.
Step18: Now train and test as before.
Step19: We can see that accuracy has degraded. This example shows that it is important to ensure our data is always pre-processed in the same way. Indeed, even if data is produced at different telescopes, it can still help if the data is pre-processed in the same way. Doing this is certainly better than doing nothing at all. Indeed, in Lyon et. al. 2016, the authors were able to train an accurate classifier for the LOFAR telescope, using only data from the Parkes telescope. Whilst the results where far from perfect, they were very good.
Step20: Now with the data sets created, lets train then test as before.
|
<ASSISTANT_TASK:>
Python Code:
# Import the libraries to be used throughout.
%pylab inline
import matplotlib.pyplot as plt
# The HTRU 2 profile data is split - one file containing the real pulsar
# profiles, one file containing noise/interference profiles. We load both
# these data sources here. First we construct relative paths to the files.
data_dir = 'data/HTRU2'
pulsar_file = data_dir + '/HTRU2_pulsar.csv'
nonpulsar_file = data_dir + '/HTRU2_nonpulsar.csv'
# Now simply load the data.
pulsar_data = genfromtxt(pulsar_file, dtype=np.int,delimiter=',')
non_pulsar_data = genfromtxt(nonpulsar_file, dtype=np.int,delimiter=',')
# Print overview details.
print ('\n\nTotal number of pulsar profiles: ', len(pulsar_data))
print ('Total number of noise/RFI profiles: ', len(non_pulsar_data))
figure(1)
plot(pulsar_data[7], 'r')
xlabel('Bin')
ylabel('Normalised Intensity')
title('Example Integrated Profile for a pulsar')
show()
figure(2)
plot(non_pulsar_data[0], 'b')
xlabel('Bin')
ylabel('Normalised Intensity')
title('Example Integrated Profile for a non-pulsar')
show()
import operator
def centre_on_peak(data):
Centre the data such that the maximum y-axis value is in the
centre of the data.
Parameters
----------
:param data: the data to be centred.
Returns
----------
:return: the centred data array.
# Stores the centred data.
centred_data = []
# Get the index of the maximum value.
index, value = max(enumerate(data), key=operator.itemgetter(1))
# Find midpoint of the data.
midpoint = int(len(data)/2)
# Figure out the shift required to centre the data (put max value in centre bin).
n = midpoint - index # N gives the number of bins the data should be shifted.
a = n % len(data)
# Apply the correction.
centred_data = numpy.concatenate([data[-a:],data[:-a]])
return centred_data
# Here we simply loop over each item in the data arrays,
# and update their values.
for i in range(0, len(pulsar_data)):
pulsar_data[i] = centre_on_peak(pulsar_data[i])
for i in range(0, len(non_pulsar_data)):
non_pulsar_data[i] = centre_on_peak(non_pulsar_data[i])
figure(3)
plot(pulsar_data[7], 'r')
xlabel('Bin')
ylabel('Normalised Intensity')
title('Example Integrated Profile for a pulsar - Centred')
show()
figure(4)
plot(non_pulsar_data[0], 'b')
xlabel('Bin')
ylabel('Normalised Intensity')
title('Example Integrated Profile for a non-pulsar - Centred')
show()
def compute_features(data):
Computes machine learning feature values for the supplied data array.
Parameters
----------
:param data: a data array.
Returns
----------
:return: the computed machine learning features as a list [mean, stdev, shew, kurtosis].
if data is not None: # Check data is not empty
if len(data) > 0:
# Sums computed during calculation.
mean_sum = 0
mean_subtracted_sum_power_2 = 0
mean_subtracted_sum_power_3 = 0
mean_subtracted_sum_power_4 = 0
# The number of data points in the array.
n = len(data)
# Necessary first loop to calculate the sum, min and max
for d in data:
mean_sum += float(d)
if mean_sum > 0 or mean_sum < 0: # If the mean is less than or greater than zero (should be)
# Update the mean value.
mean_value = mean_sum / float(n)
# Now try to compute the standard deviation, using
# the mean computed above... we also compute values in
# this loop required to compute the excess Kurtosis and
# standard deviation.
for d in data:
mean_subtracted_sum_power_2 += np.power((float(d) - mean_value), 2.0)
# Used to compute skew
mean_subtracted_sum_power_3 += np.power((float(d) - mean_value), 3.0)
# Used to compute Kurtosis
mean_subtracted_sum_power_4 += np.power((float(d) - mean_value), 4.0)
# Update the standard deviation value.
stdev = np.sqrt(mean_subtracted_sum_power_2 / (n - 1.0))
# Next try to calculate the excess Kurtosis and skew using the
# information gathered above.
one_over_n = 1.0 / n # Used multiple times...
kurt = ((one_over_n * mean_subtracted_sum_power_4) / np.power((one_over_n * mean_subtracted_sum_power_2), 2) ) - 3
skew = (one_over_n * mean_subtracted_sum_power_3) / np.power(np.sqrt(one_over_n * mean_subtracted_sum_power_2), 3)
return [mean_value, stdev, skew, kurt]
else: # Data sums to zero, i.e. no data!
return [0,0,0,0]
else: # Data empty for some reason...
return [0,0,0,0]
# Import the random library again, just incase
# this notebook is executed out of order.
import random as rnd
# Set a simple seed value - ensures the results
# are reproducible.
np.random.seed(12345678)
X = [] # Stores the feature data.
Y = [] # Stores the class labels.
# Generate the feature data for the target class examples.
f1 = np.random.normal(0, 1.0, 1000)
f2 = np.random.normal(0, 1.0, 1000)
f3 = np.random.normal(0, 1.0, 1000)
# Now show how the data looks...
figure(5)
count, bins, ignored = hist(f1, 50, normed=True)
# Since we now what the mu and sigma values are, we
# plot a theoretical curve. We can then compare the
# distribution to this curve.
mu = 0.0
sigma = 1.0
# Plot theoretical curve
plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ),linewidth=2, color='r')
ylabel('Probability Density')
xlabel('Bin')
title('Distribution of feature 1 data - Target class (mu=0.0, sigma=1.0)')
show()
# Now store the feature values and labels, in the correct
# sets. Remember X contains the feature data, Y the true
# class labels. Here the true class label is always 1, as
# this data represents the target class.
for x, y, z in zip(f1, f2, f3):
X.append([x,y,z])
Y.append(1)
# Now generate the non-target data.
f1 = np.random.normal(0.1, 2.0, 1000)
f2 = np.random.normal(0.2, 2.5, 1000)
f3 = np.random.normal(0.3, 3.0, 1000)
for x, y, z in zip(f1, f2, f3):
X.append([x,y,z])
Y.append(0)
# Now show how the data looks...
figure(6)
count, bins, ignored = hist(f1, 50, normed=True)
# Since we now what the mu and sigma values are, we
# plot a theoretical curve. We can then compare the
# distribution to this curve.
mu = 0.1
sigma = 2.0
# Plot theoretical curve
plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) * np.exp( - (bins - mu)**2 / (2 * sigma**2) ),linewidth=2, color='r')
ylabel('Probability Density')
xlabel('Bin')
title('Distribution of feature 1 data - Non-target class (mu=0.1, sigma=2.0)')
show()
# Some cleanup
f1 = None
f2 = None
f3 = None
mu = None
sigma = None
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.5)
print ('Examples in training set: ' , str(len(x_train)))
print ('Examples in testing set: ' , str(len(x_test)))
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
# First train the classifier with call to fit.
classifier.fit(x_train, y_train)
# Now obtain the classifiers 'score'
accuracy = classifier.score(x_test, y_test)
print ("Naive Bayes Classifier accuracy: ", (100* accuracy), "%.")
# Store the accuracy recorded for each
# distributional change.
recorded_accuracies = []
# Some simple displacements we'll apply
# to the feature distributions the classifier
# was trained upon.
displacements = np.arange(-2.0,2.0,0.05)
# Stores the non-i.i.d used during this experiment.
x_test_non_iid = []
y_test_non_iid = []
# For each displacement
for d in displacements:
# Used to compute classifier accuracy after each run.
aggregate_sum = 0.0
aggregate_accuracy = 0.0
n = 25
# For n iterations...
for x in np.arange(0,n,1.0):
x_test_non_iid = []
y_test_non_iid = []
# Generate some new example data using the
# displacement values to move the feature distributions.
f1 = np.random.normal(0.1+d, 1.0, 1000)
f2 = np.random.normal(0.1+d, 1.0, 1000)
f3 = np.random.normal(0.1+d, 1.0, 1000)
for x, y, z in zip(f1, f2, f3):
x_test_non_iid.append([x,y,z])
y_test_non_iid.append(1)
# Now generate the non-target data.
f1 = np.random.normal(0.1, 2.0, 1000)
f2 = np.random.normal(0.2, 2.5, 1000)
f3 = np.random.normal(0.3, 3.0, 1000)
#noise_1 = np.random.normal(0.4+d, 2.0, 1000)
#noise_2 = np.random.normal(0.5+d, 2.5, 1000)
#noise_3 = np.random.normal(0.6+d, 3.0, 1000)
for x, y, z in zip(f1, f2, f3):
x_test_non_iid.append([x,y,z])
y_test_non_iid.append(0)
accuracy = classifier.score(x_test_non_iid, y_test_non_iid)
aggregate_sum += accuracy
#print "NB accuracy: ", accuracy # Uncomment if you wish to see the values
recorded_accuracies.append(aggregate_sum/float(n))
# Some cleanup
f1 = None
f2 = None
f3 = None
x_test_non_iid = None
y_test_non_iid = None
# Now plot the change observed in classifier accuracy over time.
plt.plot(recorded_accuracies,label='Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('n')
plt.title('Accuracy as test distribution drifts (changing target class mu)')
plt.legend(loc='upper left')
plt.show()
# Store the accuracy recorded for each
# distributional change.
recorded_accuracies = []
# Some simple displacements we'll apply
# to the feature distributions the classifier
# was trained upon.
displacements = np.arange(-2.0,2.0,0.05)
# Stores the non-i.i.d used during this experiment.
x_test_non_iid = []
y_test_non_iid = []
# For each displacement
for d in displacements:
# Used to compute classifier accuracy after each run.
aggregate_sum = 0.0
aggregate_accuracy = 0.0
n = 25
# For n iterations...
for x in np.arange(0,n,1.0):
x_test_non_iid = []
y_test_non_iid = []
# Generate some new example data using the
# displacement values to move the feature distributions.
f1 = np.random.normal(0.1+d, 1.0, 1000)
f2 = np.random.normal(0.1+d, 1.0, 1000)
f3 = np.random.normal(0.1+d, 1.0, 1000)
for x, y, z in zip(f1, f2, f3):
x_test_non_iid.append([x,y,z])
y_test_non_iid.append(1)
# Now generate the non-target data.
f1 = np.random.normal(0.1, 2.0+d, 1000)
f2 = np.random.normal(0.2, 2.5+d, 1000)
f3 = np.random.normal(0.3, 3.0+d, 1000)
for x, y, z in zip(f1, f2, f3):
x_test_non_iid.append([x,y,z])
y_test_non_iid.append(0)
accuracy = classifier.score(x_test_non_iid, y_test_non_iid)
aggregate_sum += accuracy
#print "NB accuracy: ", accuracy # Uncomment if you wish to see the values
recorded_accuracies.append(aggregate_sum/float(n))
# Some cleanup
f1 = None
f2 = None
f3 = None
x_test_non_iid = None
y_test_non_iid = None
# Now plot the change observed in classifier accuracy over time.
plt.plot(recorded_accuracies,label='Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('n')
plt.title('Accuracy as test distribution drifts (changing target class mu, non-target class sigma)')
plt.legend(loc='upper left')
plt.show()
def scale(data,new_min, new_max):
Scales data to within the range [new_min,new_max].
Parameters
----------
:param data: the data to scale.
:param new_min: the new minimum value for the data range.
:param new_max: the new maximum value for the data range.
Returns
----------
:return: A new array with the data scaled to within the range [new_min,new_max].
min_ = min(data)
max_ = max(data)
new_data = []
for n in range(len(data)):
value = data[n]
x = (new_min * (1-( (value-min_) /( max_- min_ )))) + (new_max * ( (value-min_) /( max_- min_ ) ))
new_data.append(x)
return new_data
# Now scale the first half of each data set to [0,1],
# and add to the test and training data sets.
from sklearn.model_selection import train_test_split
X = [] # Stores the feature data.
Y = [] # Stores the class labels.
# Add pulsar examples.
for i in range(0, len(pulsar_data)):
# Now here we extract the features with the call
# to compute_features().
X.append(compute_features(pulsar_data[i]))
Y.append(1)
# Add non-pulsar examples.
for i in range(0, len(non_pulsar_data)):
# Now here we extract the features with the call
# to compute_features().
X.append(compute_features(non_pulsar_data[i]))
Y.append(0)
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.5)
print ('\nExamples in training set: ' , str(len(x_train)))
print ('Examples in testing set: ' , str(len(x_test)))
# There should be 4 features per example. Lets just check this is
# the case.
print ('Dimensions of training set: ' , str(np.asarray(x_train).shape))
print ('Dimensions of testing set: ' , str(np.asarray(x_test).shape))
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
# First train the classifier with call to fit.
classifier.fit(x_train, y_train)
# Now obtain the classifiers 'score'
accuracy = classifier.score(x_test, y_test)
print ("Naive Bayes Classifier accuracy: ", (100* accuracy), "%.")
# Get fresh data sets to prevent making mistakes
# Also keeps the cells modular.
X = [] # Stores the feature data.
Y = [] # Stores the class labels.
# Add pulsar examples.
for i in range(0, len(pulsar_data)):
X.append(pulsar_data[i])
Y.append(1)
# Add non-pulsar examples.
for i in range(0, len(non_pulsar_data)):
X.append(non_pulsar_data[i])
Y.append(0)
# Get a whole new split.
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.5)
# Now scale the training data set to [0,1],
# then the test data set to [0,255].
for i in range(0, len(x_train)):
x_train[i] = compute_features(scale(x_train[i],0,1))
for i in range(0, len(x_test)):
x_test[i] = compute_features(scale(x_test[i],0,255))
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
# First train the classifier with call to fit.
classifier.fit(x_train, y_train)
# Now obtain the classifiers 'score'
accuracy = classifier.score(x_test, y_test)
print ("Naive Bayes Classifier accuracy: ", (100* accuracy), "%.")
from scipy import signal
# Get fresh data sets to prevent making mistakes
# Also keeps the cells modular.
X = [] # Stores the feature data.
Y = [] # Stores the class labels.
# Add pulsar examples.
for i in range(0, len(pulsar_data)):
X.append(pulsar_data[i])
Y.append(1)
# Add non-pulsar examples.
for i in range(0, len(non_pulsar_data)):
X.append(non_pulsar_data[i])
Y.append(0)
# Get a whole new split.
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.5)
# Now scale the training data set to [0,1],
for i in range(0, len(x_train)):
x_train[i] = compute_features(scale(x_train[i],0,1))
for i in range(0, len(x_test)):
# First get the data back to [0,1]
x = scale(x_test[i],0,1)
x_downsampled = signal.resample(x, 64)
x_test[i] = compute_features(x_downsampled)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
# First train the classifier with call to fit.
classifier.fit(x_train, y_train)
# Now obtain the classifiers 'score'
accuracy = classifier.score(x_test, y_test)
print ("Naive Bayes Classifier accuracy: ", (100* accuracy), "%.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: bicultural
Step2: mixed race
Step3: polyculturalism has no frequency in NYT
|
<ASSISTANT_TASK:>
Python Code:
words = ['biracial', 'biethnic', 'bicultural', 'interracial']
plot(words)
_ = plt.xlim(1890, 2015)
_ = plt.ylim(10e-7, 10e-2)
savefig('NYT2.png')
words = ['mixed race', 'mixed ethnicity', 'other race', 'other ethnicity']
plot(words)
_ = plt.ylim(2e-4, 3e-1)
savefig('NYT3.png')
words = ['multiracial', 'multiethnic', 'multicultural', 'polyracial', 'polyethnic', 'polycultural']
plot(words)
_ = plt.xlim(1950, 2015)
_ = plt.ylim(1e-4, 3e-1)
savefig('NYT4.png')
words = ['multiculturalism', 'polyculturalism', 'cultural pluralism']
plot(words)
_ = plt.xlim(1920, 2015)
_ = plt.ylim(3e-4, 3e-1)
savefig('NYT5.png')
words = ['monocultural', 'bicultural', 'interracial','multiracial', 'multicultural','mixed race']
plot(words)
_ = plt.ylim(3e-4, 3e-1)
savefig('NYT6.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Generate list of data
Step3: !!! NMF TEST !!!
Step4: Rank Subgraphs Based on Pos/Neg Expression
Step5: Plot an example of relative expression
Step6: Subgraph Motion-Detection
Step7: Plot Ranked System Subgraph Matrices
Step8: Subgraph Topology
Step9: Expression Constrasts
Step10: Lo vs Hi
Step11: Task Performance
Step12: Plot Correlation Distributions
Step13: Regional Contribution to Modulation in Performance
|
<ASSISTANT_TASK:>
Python Code:
try:
%load_ext autoreload
%autoreload 2
%reset
except:
print 'NOT IPYTHON'
from __future__ import division
import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
import sys
import glob
import numpy as np
import pandas as pd
#import seaborn as sns
from statsmodels.sandbox.stats import multicomp
import scipy.stats as stats
import scipy.io as io
import h5py
import matplotlib
import matplotlib.pyplot as plt
echobase_path = '/Users/akhambhati/Developer/hoth_research/Echobase'
#echobase_path = '/data/jag/akhambhati/hoth_research/Echobase'
sys.path.append(echobase_path)
import Echobase
convert_conn_vec_to_adj_matr = Echobase.Network.Transforms.configuration.convert_conn_vec_to_adj_matr
convert_adj_matr_to_cfg_matr = Echobase.Network.Transforms.configuration.convert_adj_matr_to_cfg_matr
rcParams = Echobase.Plotting.fig_format.update_rcparams(matplotlib.rcParams)
path_Remotes = '/Users/akhambhati/Remotes'
#path_Remotes = '/data/jag/bassett-lab/akhambhati'
path_CoreData = path_Remotes + '/CORE.fMRI_cogcontrol.medaglia'
path_PeriphData = path_Remotes + '/RSRCH.NMF_CogControl'
path_InpData_Netw = path_PeriphData + '/e01-FuncNetw'
path_InpData_Subg = path_PeriphData + '/e02-FuncSubg'
path_ExpData = path_PeriphData + '/e03-FuncSubg_Dynamics'
path_Figures = './e03-Figures/'
for path in [path_CoreData, path_PeriphData, path_ExpData]:
if not os.path.exists(path):
print('Path: {}, does not exist'.format(path))
os.makedirs(path)
%matplotlib inline
# Load Functional Data
df_cfg = np.load('{}/Population.Configuration_Matrix.npz'.format(path_InpData_Netw))
df_subgraph = np.load('{}/Yeo_Subgraph.All.npz'.format(path_InpData_Subg))
df_to_yeo = np.load('{}/Lausanne125_to_Yeo.npz'.format(path_InpData_Netw))
df_nmf = np.load("{}/NMF_Consensus.Param.All.npz".format(path_InpData_Subg),
mmap_mode='r')
n_subj, _, _, _, _, n_block = df_cfg['cfg_obs_lut'].shape
n_fac = len(df_subgraph['system_subgraph'])
surr_coef = np.array([np.load(pth, mmap_mode='r')['fac_coef'][...]
for pth in glob.glob('{}/NMF_Surrogate.Param.*.npz'.format(path_InpData_Subg))])
surr_coef = surr_coef.reshape(-1, surr_coef.shape[-1])
# Load Behavioral Data
df_blk = io.loadmat('{}/BlockwiseDataCorrectTrialsOnly.mat'.format(path_CoreData))
bad_subj_ix = [1, 6]
good_subj_ix = np.setdiff1d(np.arange(n_subj+2), bad_subj_ix)
'high control accuracy', 'low control accuracy', 'high control mean RT',
'high control median RT', 'low control mean RT', 'low control median RT'
df_perf = {'Stroop': {'lo': {'accuracy': df_blk['StroopData'][good_subj_ix, 1, :],
'meanRT': df_blk['StroopData'][good_subj_ix, 4, :],
'medianRT': df_blk['StroopData'][good_subj_ix, 5, :]},
'hi': {'accuracy': df_blk['StroopData'][good_subj_ix, 0, :],
'meanRT': df_blk['StroopData'][good_subj_ix, 2, :],
'medianRT': df_blk['StroopData'][good_subj_ix, 3, :]}
},
'Navon' : {'lo': {'accuracy': df_blk['NavonData'][good_subj_ix, 1, :],
'meanRT': df_blk['NavonData'][good_subj_ix, 4, :],
'medianRT': df_blk['NavonData'][good_subj_ix, 5, :]},
'hi': {'accuracy': df_blk['NavonData'][good_subj_ix, 0, :],
'meanRT': df_blk['NavonData'][good_subj_ix, 2, :],
'medianRT': df_blk['NavonData'][good_subj_ix, 3, :]}
}
}
# Load Motion Data
df_motion = {'Stroop': io.loadmat('{}/StroopMove.mat'.format(path_CoreData))['move'][:, 0],
'Navon': io.loadmat('{}/NavonMove.mat'.format(path_CoreData))['move'][:, 0]}
cfg_matr = df_cfg['cfg_matr'].copy()
#cfg_matr = (df_cfg['cfg_matr'].T * df_cfg['cfg_L2']).T
rank = 13
alpha = 1e-7
beta = 1e-7
n_fac = rank
# Grab the task ID of the current job (and the associated parameter dictionary)
fac_subnet = np.random.uniform(low=0, high=1.0, size=(rank, cfg_matr.shape[1]))
fac_coef = np.random.uniform(low=0, high=1.0, size=(rank, cfg_matr.shape[0]))
# Run NMF Algorithm
fac_subnet, fac_coef, err = Echobase.Network.Partitioning.Subgraph.nmf.snmf_bcd(
cfg_matr, alpha=alpha, beta=beta, fac_subnet_init=fac_subnet, fac_coef_init=fac_coef,
max_iter=20, sparse_dim='conn', verbose=True)
import string
abcd = list(string.ascii_uppercase)
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
# Re-rank subgraphs based on positive/negative expression
del_expr_mean = []
del_expr_stdv = []
for fac_ii in xrange(n_fac):
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, :, :, :, 0, :]
neg_expr = sel_fac_coef[:, :, :, :, 1, :]
del_expr = (pos_expr-neg_expr).mean(axis=-1).mean(axis=-1).mean(axis=-1).mean(axis=-1)
del_expr_mean.append(del_expr.mean())
del_expr_stdv.append(del_expr.std() / np.sqrt(n_subj))
del_expr_mean = np.array(del_expr_mean)
del_expr_stdv = np.array(del_expr_stdv)
sort_fac = np.argsort(del_expr_mean)[::-1]
# Create a sorted dictionary
sort_fac_dict = {}
for ltr, fac_ii in zip(abcd, sort_fac):
sort_fac_dict[ltr] = fac_ii
# Plot distribution of mean relative expression
plt.figure(dpi=300)
ax = plt.subplot(111)
ax.bar(xrange(n_fac), del_expr_mean[sort_fac], yerr=del_expr_stdv[sort_fac], lw=0)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(n_fac)+0.4);
ax.set_xticklabels(np.sort(sort_fac_dict.keys()));
ax.set_xlim([0, n_fac])
ax.set_xlabel('Subgraphs')
ax.set_ylabel('Mean Relative Expression')
plt.savefig('{}/Subgraph_Relative_Expression.svg'.format(path_Figures))
plt.show()
fac_key = 'K'
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, :, :, :, 0, :]
neg_expr = sel_fac_coef[:, :, :, :, 1, :]
# Plot distribution of mean relative expression
plt.figure(dpi=300)
ax = plt.subplot(111)
ax.plot(pos_expr[27, ...].reshape(-1), color='r')
ax.plot(neg_expr[27, ...].reshape(-1), color='b')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Blocks')
ax.set_ylabel('Expression')
plt.savefig('{}/Example_Pos_Neg.Subgraph_{}.svg'.format(path_Figures, fac_key))
plt.show()
plt.close()
alpha = 0.05
motion_rv = []
motion_pv = []
motion_color = []
motion_fac = []
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
for fac_key in np.sort(sort_fac_dict.keys()):
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
fac_expr_subj = sel_fac_coef.reshape(n_subj, -1).mean(axis=-1)
rv, pv = stats.spearmanr(0.5*(df_motion['Stroop'] + df_motion['Navon']),
fac_expr_subj)
motion_rv.append(rv)
motion_pv.append(pv)
is_sig = multicomp.multipletests(motion_pv, alpha=alpha, method='fdr_bh')[0]
for sig_bool, fac_key in zip(is_sig, np.sort(sort_fac_dict.keys())):
if sig_bool:
motion_color.append('r')
motion_fac.append(fac_key)
else:
motion_color.append('k')
motion_rv = np.array(motion_rv)
motion_color = np.array(motion_color)
motion_fac = np.array(motion_fac)
# Identify Subgraphs that correlate with motion
plt.figure(dpi=300)
ax = plt.subplot(111)
ax.bar(xrange(n_fac), motion_rv,
lw=0, color=motion_color)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(n_fac)+0.4);
ax.set_xticklabels(np.sort(sort_fac_dict.keys()));
ax.set_ylim([-1, 1])
ax.set_xlim([0, n_fac])
ax.set_xlabel('Subgraphs')
ax.set_ylabel('Spearman rho(Expression, Motion)')
plt.savefig('{}/Subgraph_Motion_Score.svg'.format(path_Figures))
plt.show()
%matplotlib inline
n_laus = len(df_to_yeo['yeo_lbl'])
n_yeo = len(df_to_yeo['yeo_name'])
fsize = 5.5
fig = plt.figure(figsize=(6, 6), dpi=300)
for ii, fac_key in enumerate(np.sort(sort_fac_dict.keys())):
fac_ii = sort_fac_dict[fac_key]
sel_fac_subnet = df_subgraph['system_subgraph'][fac_ii]['subnet_roi']
fac_vec = convert_adj_matr_to_cfg_matr(np.expand_dims(sel_fac_subnet, axis=0))[0, :]
vmin, vmax = fac_vec.min(), fac_vec.max()
ax = fig.add_subplot(3, 4, ii+1)
mat = ax.matshow(sel_fac_subnet, cmap='magma', vmin=vmin, vmax=vmax)
for xx in df_to_yeo['yeo_adj_demarc']:
ax.vlines(xx, 0, n_laus, color='w', lw=0.25)
ax.hlines(xx, 0, n_laus, color='w', lw=0.25)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_tick_params(width=0)
ax.xaxis.set_tick_params(width=0)
ax.grid(False)
ax.tick_params(axis='both', which='major', pad=-3)
ax.set_title(fac_key)
ax.set_xticks((df_to_yeo['yeo_adj_demarc'][:-1] + (np.diff(df_to_yeo['yeo_adj_demarc']) * 0.5)));
ax.set_xticklabels(df_to_yeo['yeo_name'], fontsize=5.0, rotation=45)
ax.set_yticks((df_to_yeo['yeo_adj_demarc'][:-1] + (np.diff(df_to_yeo['yeo_adj_demarc']) * 0.5)));
ax.set_yticklabels(df_to_yeo['yeo_name'], fontsize=5.0, rotation=45)
fig.tight_layout(pad=0.01, h_pad=0.01, w_pad=0.01)
fig.savefig('{}/Ranked_System_Subgraph.svg'.format(path_Figures))
fig.show()
%matplotlib inline
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
mean_rel_expr = {}
corep_index = {}
n_laus = len(df_to_yeo['yeo_lbl'])
n_yeo = len(df_to_yeo['yeo_name'])
alpha = 0.05 / (2*n_yeo)
fsize = 5.5
fig = plt.figure(figsize=(6, 6), dpi=300)
for ii, fac_key in enumerate(np.sort(sort_fac_dict.keys())):
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, :, :, :, 0, :]
neg_expr = sel_fac_coef[:, :, :, :, 1, :]
del_expr = (pos_expr-neg_expr).mean(axis=-1).mean(axis=-1).mean(axis=-1).mean(axis=-1).mean(axis=-1)
mean_rel_expr[fac_key] = del_expr
intra_sys = df_subgraph['system_subgraph'][fac_ii]['intra_sys']
inter_sys = df_subgraph['system_subgraph'][fac_ii]['inter_sys']
null_intra_sys = df_subgraph['system_subgraph'][fac_ii]['null_intra_sys']
null_inter_sys = df_subgraph['system_subgraph'][fac_ii]['null_inter_sys']
sig_intra_sys = np.mean(null_intra_sys > intra_sys, axis=0) < alpha
sig_inter_sys = np.mean(null_inter_sys > inter_sys, axis=0) < alpha
intra_sys_thresh = intra_sys.copy()
inter_sys_thresh = inter_sys.copy()
intra_sys_thresh[~sig_intra_sys] = 0
inter_sys_thresh[~sig_inter_sys] = 0
corep_index[fac_key] = np.nanmean((intra_sys_thresh - inter_sys_thresh) / \
(intra_sys_thresh + inter_sys_thresh))
intra_colors = np.array(['k' for xx in xrange(n_yeo)])
inter_colors = np.array(['k' for xx in xrange(n_yeo)])
intra_colors[sig_intra_sys] = 'r'
inter_colors[sig_inter_sys] = 'r'
ax = fig.add_subplot(3, 4, ii+1)
ax.barh(np.arange(n_yeo), intra_sys, color=intra_colors, lw=0)
ax.barh(np.arange(n_yeo), -1*inter_sys, color=inter_colors, lw=0)
ax.plot([0.0, 0.0], [0.0, n_yeo], color='r', lw=1.0)
for x_ii, (low_x, high_x) in enumerate(zip(np.percentile(null_intra_sys, 100-100*alpha, axis=0),
-1*np.percentile(null_inter_sys, 100-100*alpha, axis=0))):
ax.fill_between([low_x, high_x],
x_ii-0.1, x_ii+0.9,
color=[0.1, 0.1, 0.1], lw=0, alpha=0.25)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='both', which='major', pad=0)
ax.set_ylim([0, n_yeo])
ax.set_yticks(np.arange(n_yeo) + 0.4);
ax.set_yticklabels(df_to_yeo['yeo_name'], fontsize=5.0, rotation=45)
ax.set_xlim([-0.015, 0.015])
ax.set_xticks(np.linspace(-0.015, 0.015, 3));
ax.set_title(fac_key)
ax.invert_yaxis()
plt.savefig('{}/Ranked_System_Subgraph.CorePeriphery.svg'.format(path_Figures))
plt.show()
plt.figure(figsize=(2,2), dpi=300.0)
ax = plt.subplot(111)
print(stats.spearmanr(corep_index.values(), mean_rel_expr.values()))
mm, yy, _, _, _ = stats.linregress(corep_index.values(), mean_rel_expr.values())
ax.scatter(corep_index.values(), mean_rel_expr.values(),
lw=0, alpha=0.75, color=[0.25, 0.25, 0.25])
ax.plot(np.array([-1.0, 1.0]), mm*np.array([-1.0, 1.0])+yy, color='k')
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='both', which='major', pad=1)
ax.set_xlim([-1.1, 1.1])
ax.set_xticks(np.linspace(-1.0, 1.0, 5));
ax.set_xlabel('Core-Periphery')
min_val, max_val = -3.5, 8
ax.set_ylim([min_val, max_val])
ax.set_ylabel('Mean Relative Expression')
plt.savefig('{}/Relative_Expression.CorePeriphery.svg'.format(path_Figures))
plt.show()
%matplotlib inline
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
alpha = 0.05
del_expr_mean_stroop = []
del_expr_stdv_stroop = []
del_expr_mean_navon = []
del_expr_stdv_navon = []
tv = []
pv = []
clr = []
for ii, fac_key in enumerate(np.sort(sort_fac_dict.keys())):
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, :, :, :, 0, :]
neg_expr = sel_fac_coef[:, :, :, :, 1, :]
del_expr = (pos_expr-neg_expr).mean(axis=-1)[:, :, :, 1].mean(axis=-1)
del_expr_mean = del_expr.mean(axis=0)
del_expr_stdv = del_expr.std(axis=0) / np.sqrt(n_subj)
del_expr_mean_stroop.append(del_expr_mean[0])
del_expr_stdv_stroop.append(del_expr_stdv[0])
del_expr_mean_navon.append(del_expr_mean[1])
del_expr_stdv_navon.append(del_expr_stdv[1])
# Within-experiment Stats
tv_expr, pv_expr = stats.ttest_rel(*del_expr.T)
tv.append(tv_expr)
pv.append(pv_expr)
#
is_sig = multicomp.multipletests(pv, alpha=alpha, method='fdr_bh')[0]
for sig_bool in is_sig:
if sig_bool:
clr.append('r')
else:
clr.append([0.2, 0.2, 0.2])
print(stats.spearmanr(del_expr_mean_stroop, del_expr_mean_navon))
plt.figure(figsize=(3,3), dpi=300)
ax = plt.subplot(111)
for ii in xrange(n_fac):
xx = del_expr_mean_stroop[ii]
yy = del_expr_mean_navon[ii]
ax.scatter(xx, yy, lw=0, color=clr[ii], s=10.0)
ax.plot([xx, 1/2*(xx+yy)],
[yy, 1/2*(xx+yy)], color=clr[ii])
min_val, max_val = -3.5, 8
ax.plot([min_val, max_val],
[min_val, max_val], 'k', alpha=0.1)
ax.vlines(0, min_val, max_val, 'r')
ax.hlines(0, min_val, max_val, 'r')
ax.set_xlim([min_val, max_val])
ax.set_ylim([min_val, max_val])
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Stroop: Relative Expression')
ax.set_ylabel('Navon: Relative Expression')
plt.savefig('{}/Stroop_vs_Navon.svg'.format(path_Figures))
plt.show()
plt.close()
%matplotlib inline
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
for expr_ii, expr_id in enumerate(df_cfg['cfg_key_label'][()]['Experiment_ID']):
del_expr_mean_lo = []
del_expr_stdv_lo = []
del_expr_mean_hi = []
del_expr_stdv_hi = []
pv = []
tv = []
clr = []
for ii, fac_key in enumerate(np.sort(sort_fac_dict.keys())):
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, expr_ii, :, :, :, :][:, :, :, 0, :][:, :, 1, :]
neg_expr = sel_fac_coef[:, expr_ii, :, :, :, :][:, :, :, 1, :][:, :, 1, :]
del_expr = (pos_expr-neg_expr).mean(axis=-1)
del_expr_mean = del_expr.mean(axis=0)
del_expr_stdv = del_expr.std(axis=0) / np.sqrt(n_subj)
del_expr_mean_lo.append(del_expr_mean[0])
del_expr_stdv_lo.append(del_expr_stdv[0])
del_expr_mean_hi.append(del_expr_mean[1])
del_expr_stdv_hi.append(del_expr_stdv[1])
# Within-experiment Stats
tv_expr, pv_expr = stats.ttest_rel(*del_expr.T)
pv.append(pv_expr)
print(tv_expr, pv_expr)
for is_sig in Echobase.Statistics.FDR.fdr.bhp(pv, alpha=0.05, dependent=True):
if is_sig:
clr.append('r')
else:
clr.append([0.2, 0.2, 0.2])
print(stats.spearmanr(del_expr_mean_lo, del_expr_mean_hi))
plt.figure(figsize=(3,3), dpi=300)
ax = plt.subplot(111)
for ii in xrange(n_fac):
xx = del_expr_mean_lo[ii]
yy = del_expr_mean_hi[ii]
ax.scatter(xx, yy, lw=0, color=clr[ii], s=10.0)
ax.plot([xx, 1/2*(xx+yy)],
[yy, 1/2*(xx+yy)], color=clr[ii])
min_val, max_val = -3.5, 8
ax.plot([min_val, max_val],
[min_val, max_val], 'k', alpha=0.1)
ax.vlines(0, min_val, max_val, 'r')
ax.hlines(0, min_val, max_val, 'r')
ax.set_xlim([min_val, max_val])
ax.set_ylim([min_val, max_val])
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Low Demand: Relative Expression')
ax.set_ylabel('High Demand: Relative Expression')
plt.savefig('{}/{}.Lo_vs_Hi.svg'.format(path_Figures, expr_id))
plt.show()
plt.close()
%matplotlib inline
coef_ix = np.array(df_cfg['cfg_obs_lut'], dtype=int)
perf_id = 'meanRT'
subgraph_perf = {}
for expr_ii, expr_id in enumerate(df_cfg['cfg_key_label'][()]['Experiment_ID']):
perf_lo = df_perf[expr_id]['lo'][perf_id].mean(axis=-1)
perf_hi = df_perf[expr_id]['hi'][perf_id].mean(axis=-1)
expr_perf = {}
for cnd_ii, cnd_id in enumerate(df_cfg['cfg_key_label'][()]['Condition_ID']):
cnd_perf = {}
for ii, fac_key in enumerate(np.sort(sort_fac_dict.keys())):
fac_ii = sort_fac_dict[fac_key]
sel_fac_coef = df_subgraph['system_subgraph'][fac_ii]['expr_coef'][coef_ix]
pos_expr = sel_fac_coef[:, expr_ii, :, :, :, :][:, cnd_ii, 1, 0, :]
neg_expr = sel_fac_coef[:, expr_ii, :, :, :, :][:, cnd_ii, 1, 1, :]
rel_expr = pos_expr-neg_expr
cnd_perf[fac_key] = stats.spearmanr(rel_expr.mean(axis=-1),
perf_hi-perf_lo)
expr_perf[cnd_id] = cnd_perf
subgraph_perf[expr_id] = expr_perf
# Subgraph Node Strengths
fac_adj = np.array([df_subgraph['system_subgraph'][sort_fac_dict[fac_key]]['subnet_roi']
for fac_key in np.sort(sort_fac_dict.keys())])
fac_cfg = convert_adj_matr_to_cfg_matr(fac_adj)
subgraph_ns = fac_adj.mean(axis=1)
subgraph_ns_null = []
for pp in xrange(10000):
fac_adj_null = np.array([convert_conn_vec_to_adj_matr(np.random.permutation(cfg))
for cfg in fac_cfg])
subgraph_ns_null.append(fac_adj_null.mean(axis=1))
subgraph_ns_null = np.array(subgraph_ns_null)
# Iterate over Subgraph Performance Predictor
subgraph_partc = {}
for expr_ii, expr_id in enumerate(subgraph_perf.keys()):
expr_partc = {}
for cnd_ii, cnd_id in enumerate(subgraph_perf[expr_id].keys()):
sel_spear = np.array([subgraph_perf[expr_id][cnd_id][fac_key]
for fac_key in np.sort(sort_fac_dict.keys())])
## Performance Plot
plt.figure(figsize=(3,3), dpi=300.0)
ax = plt.subplot(111)
clr = np.array(['k' for ff in xrange(n_fac)])
for ix, pv in enumerate(sel_spear[:, 1]):
if pv < 0.05:
clr[ix] = 'r'
ax.bar(xrange(n_fac), sel_spear[:, 0], lw=0, color=clr)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(n_fac)+0.4);
ax.set_xticklabels(np.sort(sort_fac_dict.keys()));
ax.set_xlim([0, n_fac])
ax.set_ylim([-0.55, 0.55])
ax.set_xlabel('Subgraphs')
ax.set_ylabel('rho(Relative Expression, RT Cost)')
plt.savefig('{}/Subgraph_Performance.{}.{}.svg'.format(path_Figures, expr_id, cnd_id))
plt.show()
## Subgraph participation score
expr_partc[cnd_id] = {'real': np.dot(np.arctanh(sel_spear[:, 0]), subgraph_ns),
'null': np.array([np.dot(np.arctanh(sel_spear[:, 0]), s_ns)
for s_ns in subgraph_ns_null])}
subgraph_partc[expr_id] = expr_partc
from mayavi import mlab
import nibabel as nib
cmap = 'RdBu'
view_angle = {'Sag_PA': [0.0, 90.0],
'Sag_AP': [180.0, 90.0]}
# Get the pial surface recons
pial_hemi = {'LH': {},
'RH': {}}
pial_hemi['LH']['vert'], pial_hemi['LH']['tria'] = nib.freesurfer.io.read_geometry('{}/BrainRenderSubject15/surf/lh.pial'.format(path_CoreData))
pial_hemi['RH']['vert'], pial_hemi['RH']['tria'] = nib.freesurfer.io.read_geometry('{}/BrainRenderSubject15/surf/rh.pial'.format(path_CoreData))
# Get the Lausanne label files for each ROI
label_files = []
for roi in df_to_yeo['df_laus_yeo'][df_to_yeo['sort_laus_to_yeo']]:
laus_lbl = roi[1].lower()
hemi = roi[2].lower()
# Parse the atlas name and find the label file if it exists
lbl_file = '{}.{}.label'.format(hemi, laus_lbl)
lbl_file = lbl_file.replace(' ', '')
label_files.append('{}/BrainRenderSubject15/label/regenerated_{}_125/{}'.format(path_CoreData, hemi, lbl_file))
subgraph_partc_pixmap = {}
for expr_ii, expr_id in enumerate(subgraph_partc.keys()):
xmax = [0.005, 0.01]
for cnd_id in subgraph_partc[expr_id].keys():
real_partc = subgraph_partc[expr_id][cnd_id]['real'].copy()
null_partc = subgraph_partc[expr_id][cnd_id]['null'].copy()
pv = np.mean(np.abs(null_partc) > np.abs(real_partc), axis=0)
is_sig = Echobase.Statistics.FDR.fdr.bhp(pv, alpha=0.05, dependent=True)
real_partc[~is_sig] = 0
# Iterate over hemisphere of the pial surface
for hemi in pial_hemi.keys():
n_vert = len(pial_hemi[hemi]['vert'])
pial_scalars = np.zeros(n_vert)
# Assign subcortical
nonparc_lbl = []
for roi_ii, roi in enumerate(df_to_yeo['df_laus_yeo'][df_to_yeo['sort_laus_to_yeo']]):
lbl_file = label_files[roi_ii]
sys_lbl = roi[3]
if roi[2] != hemi:
continue
if (sys_lbl == 'CRB') or (sys_lbl == 'SUB'):
continue
nonparc_lbl.append([parc_ix for parc_ix in nib.freesurfer.io.read_label(lbl_file)])
nonparc_lbl = np.array(nonparc_lbl)
parc_subcort = np.setdiff1d(np.arange(n_vert), nonparc_lbl)
sys_lbl = df_to_yeo['yeo_lbl'][df_to_yeo['sort_laus_to_yeo']]
pial_scalars[parc_subcort] = np.sum(real_partc[sys_lbl == 'SUB']) / np.sum(real_partc[sys_lbl == 'SUB'] != 0)
# Iterate over brain regions
for roi_ii, roi in enumerate(df_to_yeo['df_laus_yeo'][df_to_yeo['sort_laus_to_yeo']]):
lbl_file = label_files[roi_ii]
sys_lbl = roi[3]
if roi[2] != hemi:
continue
if (sys_lbl == 'CRB') or (sys_lbl == 'SUB'):
continue
# Load the file and add scalar to the vertices
parc_lbl = nib.freesurfer.io.read_label(lbl_file)
pial_scalars[parc_lbl] = real_partc[roi_ii]
# Plot the colored Brain System
fig = mlab.figure(bgcolor=(1.0, 1.0, 1.0))
src = mlab.pipeline.triangular_mesh_source(pial_hemi[hemi]['vert'][:,0],
pial_hemi[hemi]['vert'][:,1],
pial_hemi[hemi]['vert'][:,2],
pial_hemi[hemi]['tria'], scalars=pial_scalars, opacity=0.75, figure=fig)
norms = mlab.pipeline.poly_data_normals(src, figure=fig)
norms.filter.splitting = False
surf = mlab.pipeline.surface(norms, figure=fig)
surf.parent.scalar_lut_manager.set(lut_mode=cmap, data_range=[-xmax[expr_ii], xmax[expr_ii]], use_default_range=False)
lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()[::-1, :]
surf.module_manager.scalar_lut_manager.lut.table = lut
# Rotate the view and save a screenshot
pixmap = {}
for ang in view_angle.keys():
mlab.view(azimuth=view_angle[ang][0],
elevation=view_angle[ang][1])
pixmap[ang] = mlab.screenshot(mode='rgba')
mlab.close(all=True)
subgraph_partc_pixmap[hemi] = pixmap
fig = plt.figure(figsize=(6,6), dpi=300.0)
ax = fig.add_subplot(2,2,1); ax.imshow(subgraph_partc_pixmap['RH']['Sag_PA']); ax.set_axis_off()
ax = fig.add_subplot(2,2,2); ax.imshow(subgraph_partc_pixmap['LH']['Sag_AP']); ax.set_axis_off()
ax = fig.add_subplot(2,2,3); ax.imshow(subgraph_partc_pixmap['RH']['Sag_AP'][:, ::-1, :]); ax.set_axis_off()
ax = fig.add_subplot(2,2,4); ax.imshow(subgraph_partc_pixmap['LH']['Sag_PA'][:, ::-1, :]); ax.set_axis_off()
fig.savefig('{}/Subgraph_Participation.{}.{}.svg'.format(path_Figures, expr_id, cnd_id))
plt.show()
plt.figure(figsizedpi=300.0)
ax = plt.subplot(111)
mat = ax.imshow(subgraph_partc_pixmap['RH']['Sag_PA'], cmap='RdBu');
plt.colorbar(mat, ax=ax)
plt.savefig('{}/RdBu_Colorbar.svg'.format(path_Figures))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <blockquote style="margin-right
Step2: The more complicated a combination is, the more difficult it will be for a model to learn, like this formula for an engine's "displacement", a measure of its power
Step3: Data visualization can suggest transformations, often a "reshaping" of a feature through powers or logarithms. The distribution of WindSpeed in US Accidents is highly skewed, for instance. In this case the logarithm is effective at normalizing it
Step4: Check out our lesson on normalization in Data Cleaning where you'll also learn about the Box-Cox transformation, a very general kind of normalizer.
Step5: You could also use a dataframe's built-in methods to create boolean values. In the Concrete dataset are the amounts of components in a concrete formulation. Many formulations lack one or more components (that is, the component has a value of 0). This will count how many components are in a formulation with the dataframe's built-in greater-than gt method
Step6: Building-Up and Breaking-Down Features
Step7: You could also join simple features into a composed feature if you had reason to believe there was some interaction in the combination
Step8: <blockquote style="margin-right
Step9: The mean function is a built-in dataframe method, which means we can pass it as a string to transform. Other handy methods include max, min, median, var, std, and count. Here's how you could calculate the frequency with which each state occurs in the dataset
Step10: You could use a transform like this to create a "frequency encoding" for a categorical feature.
|
<ASSISTANT_TASK:>
Python Code:
#$HIDE_INPUT$
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
accidents = pd.read_csv("../input/fe-course-data/accidents.csv")
autos = pd.read_csv("../input/fe-course-data/autos.csv")
concrete = pd.read_csv("../input/fe-course-data/concrete.csv")
customer = pd.read_csv("../input/fe-course-data/customer.csv")
autos["stroke_ratio"] = autos.stroke / autos.bore
autos[["stroke", "bore", "stroke_ratio"]].head()
autos["displacement"] = (
np.pi * ((0.5 * autos.bore) ** 2) * autos.stroke * autos.num_of_cylinders
)
# If the feature has 0.0 values, use np.log1p (log(1+x)) instead of np.log
accidents["LogWindSpeed"] = accidents.WindSpeed.apply(np.log1p)
# Plot a comparison
fig, axs = plt.subplots(1, 2, figsize=(8, 4))
sns.kdeplot(accidents.WindSpeed, shade=True, ax=axs[0])
sns.kdeplot(accidents.LogWindSpeed, shade=True, ax=axs[1]);
roadway_features = ["Amenity", "Bump", "Crossing", "GiveWay",
"Junction", "NoExit", "Railway", "Roundabout", "Station", "Stop",
"TrafficCalming", "TrafficSignal"]
accidents["RoadwayFeatures"] = accidents[roadway_features].sum(axis=1)
accidents[roadway_features + ["RoadwayFeatures"]].head(10)
components = [ "Cement", "BlastFurnaceSlag", "FlyAsh", "Water",
"Superplasticizer", "CoarseAggregate", "FineAggregate"]
concrete["Components"] = concrete[components].gt(0).sum(axis=1)
concrete[components + ["Components"]].head(10)
customer[["Type", "Level"]] = ( # Create two new features
customer["Policy"] # from the Policy feature
.str # through the string accessor
.split(" ", expand=True) # by splitting on " "
# and expanding the result into separate columns
)
customer[["Policy", "Type", "Level"]].head(10)
autos["make_and_style"] = autos["make"] + "_" + autos["body_style"]
autos[["make", "body_style", "make_and_style"]].head()
customer["AverageIncome"] = (
customer.groupby("State") # for each state
["Income"] # select the income
.transform("mean") # and compute its mean
)
customer[["State", "Income", "AverageIncome"]].head(10)
customer["StateFreq"] = (
customer.groupby("State")
["State"]
.transform("count")
/ customer.State.count()
)
customer[["State", "StateFreq"]].head(10)
# Create splits
df_train = customer.sample(frac=0.5)
df_valid = customer.drop(df_train.index)
# Create the average claim amount by coverage type, on the training set
df_train["AverageClaim"] = df_train.groupby("Coverage")["ClaimAmount"].transform("mean")
# Merge the values into the validation set
df_valid = df_valid.merge(
df_train[["Coverage", "AverageClaim"]].drop_duplicates(),
on="Coverage",
how="left",
)
df_valid[["Coverage", "AverageClaim"]].head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Estados ligados
Step2: Para efectos numéricos el infinito se traslada a una longitud grande comparativamente al ancho del pozo, la cual se designará como $L$. En el caso de que $a = L$, corresponde justamente al pozo infinito, de manera que la simulación de estos dos casos requiere un solo control y es basado en el potencial finito.
Step3: Potencial armonico
Step4: Potencial arbitrario
|
<ASSISTANT_TASK:>
Python Code:
from tecnicas_numericas import *
import tecnicas_numericas
print(dir(tecnicas_numericas))
def V_inf(x):
return 0
def V_fin(V_0, a, x):
if abs(x) < a/2:
return 0
else:
return V_0
control_pozo = fun_contenedor_base()
agregar_control(control_pozo, FloatSlider(value = 5.2, min = .5, max= 10., step= .1, description='a'))
pozo_link = link((control_pozo.children[1], 'min'), (control_pozo.children[4], 'value'))
boton_pozo = Button(description='Simular pozo')
def click_pozo(boton):
V_max = control_pozo.children[0].value
L = control_pozo.children[1].value
N = control_pozo.children[2].value
n = control_pozo.children[3].value
a = control_pozo.children[4].value
Vx = lambda x: V_fin(V_max, a, x)
Solve_Schr(Vx, V_max, L, N, n)
clear_output(wait=True)
boton_pozo.on_click(click_pozo)
display(control_pozo, boton_pozo)
def V_arm(omega, x):
return omega**2 * x**2 / 4
control_arm = fun_contenedor_base()
agregar_control(control_arm, FloatSlider(value = 1., min = .1, max= 4., step= .1, description='$\omega$'))
boton_arm = Button(description='Simular potencial')
def click_arm(boton):
E_max = control_arm.children[0].value
L = control_arm.children[1].value
N = control_arm.children[2].value
n = control_arm.children[3].value
omega = control_arm.children[4].value
Vx = lambda x: V_arm(omega, x)
Solve_Schr(Vx, E_max, L, N, n)
clear_output(wait=True)
boton_arm.on_click(click_arm)
display(control_arm, boton_arm)
control_arb = fun_contenedor_base()
E_max = control_arb.children[0]
L = control_arb.children[1]
N = control_arb.children[2]
n = control_arb.children[3]
n.value = 300
L.value = 20.
str_potencial = Text(value='x**2 / 4 + x**3 / 50', description= 'Potencial')
str_potencial.funcion = lambda x: eval(str_potencial.value)
agregar_control(control_arb, str_potencial)
# Ingrese un texto en formato python con dependencia solo de 'x'.
def ingreso_potencial(str_potencial):
str_potencial.funcion = lambda x: eval(str_potencial.value)
Vx = str_potencial.funcion
h = L.value / n.value
V_vec = [Vx(-L.value/2 + h*i) for i in range(n.value + 1)]
V_min = min(V_vec)
V_max = max(V_vec)
dV = (V_max - V_min) / 50
E_max.step = dV
E_max.min = V_min
E_max.max = V_max + (V_max - V_min)
E_max.value = V_max
ingreso_potencial(str_potencial)
boton_arb = Button(description='Simular potencial')
def click_arbitrario(boton):
Vx = str_potencial.funcion
Solve_Schr(Vx, E_max.value, L.value, N.value, n.value)
clear_output(wait=True)
str_potencial.on_submit(ingreso_potencial)
boton_arb.on_click(click_arbitrario)
display(control_arb, boton_arb)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: BS, buy- sell can be predicted with 85% accuracy
Step3: Dataset
Step4: Regression
Step5: ARCH
Step6: Logistic Regression
Step7: Decision Trees
Step8: Random Forest
Step9: KNN
Step10: Ada Boosting binary Classification
Step11: Gradient Tree Boosting binary Classification
Step12: Quadratic Discriminant Analysis binary Classification
Step13: SVM
Step14: Saving
Step15: graphical
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import csv
import glob
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults
import pickle
#from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
# loading csv file
def get_csv_pd(path):
#spy_pd=pd.read_csv('C:\\Users\Michal\Dropbox\IB_data\SPY.csv',sep=' ',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
#spy_pd=pd.read_csv(path+'\SPY.csv',sep=',',names=['askPrice','askSize','bidPrice','bidSize'],index_col=0,parse_dates=True)
spy_pd=pd.read_csv(path,sep=',',dtype={'askPrice':np.float32,'askSize':np.float32,
'bidPrice':np.float32,'bidSize':np.float32},index_col=0,parse_dates=True)
#spy_pd = pd.read_csv(path, usecols=['askPrice','askSize','bidPrice','bidSize'], engine='python', skipfooter=3)
return spy_pd
def preprocessing(df):
df.bidPrice=df.loc[:,'bidPrice'].replace(to_replace=0, method='ffill')
df.bidSize=df.loc[:,'bidSize'].replace(to_replace=0, method='ffill')
df.askPrice=df.loc[:,'askPrice'].replace(to_replace=0, method='ffill')
df.askSize=df.loc[:,'askSize'].replace(to_replace=0, method='ffill')
df=df.dropna()
# to exclude 0
df=df[df['bidPrice']>df.bidPrice.mean()-df.bidPrice.std()]
df=df[df['askPrice']>df.askPrice.mean()-df.askPrice.std()]
df['mid']=(df.askPrice+df.bidPrice)/2
df['vwap']=((df.loc[:,'bidPrice']*df.loc[:,'bidSize'])+(df.loc[:,'askPrice']*df.loc[:,'askSize']))/(df.loc[:,'bidSize']+df.loc[:,'askSize'])
df['spread']=df.vwap-df.mid
df['v']=(df.mid-df.mid.shift(60))
df['mom']=np.where(np.logical_and((df.mid-df.mid.shift(12))!=0,df.v!=0),(df.mid-df.mid.shift(12))/df.v,0)
df['return']=(df.askPrice/df.bidPrice.shift(1))-1
#df['ret'] = np.log(df.Close/df.Close.shift(1))
df['sigma']=df.spread.rolling(60).std()
#df['sigma']=df.Close.rolling(5).std()
df['high']=df.askPrice.rolling(5).max()
df['low']=df.bidPrice.rolling(5).min()
#df['mom']=np.where(np.logical_and(df.vel_c==1,df.Close>df.price),1,np.where(np.logical_and(df.vel_c==-1,df.Close<df.price),-1,0))
#flagD=np.logical_and(np.logical_and(df.Close.shift(10)<df.Close.shift(15),df.Close.shift(15)< df.Close.shift(20)),df.Close< df.Close.shift(10))
#flagU=np.logical_and(np.logical_and(df.Close.shift(15)>df.Close.shift(20),df.Close.shift(10)> df.Close.shift(15)),df.Close> df.Close.shift(10))
#df['UD']= np.where(flagU,-1,np.where(flagD,1,0))
#df['P']=(df.High+df.Low+df.Close)/3
#df['UT']=(pd.rolling_max(df.High,60)+pd.rolling_max(df.P+df.High-df.Low,60))*0.5
#df['DT']=(pd.rolling_min(df.Low,60)+pd.rolling_min(df.P+df.High-df.Low,60))*0.5
#df['BA']=np.where(df.Close<=df.DT,-1,np.where(df.Close>=df.UT,1,0))# below or above
return df
'''
def normalise(df,window_length=60):
dfn=(df-df.rolling(window_length).min())/(df.rolling(window_length).max()-df.rolling(window_length).min())
return dfn
def de_normalise(data,df,window_length=60):
dn=(df*(data.rolling(window_length).max()-data.rolling(window_length).min()))+data.rolling(window_length).min()
return dn
#https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(l, n):
Yield successive n-sized chunks from l.
for i in range(0, len(l), n):
yield l[i:i + n]
##### ARIMA
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARIMAResults
###ARIMA preprocessing
def arima_processing(df):
#data=df[['vwap','mid']]
df=df.dropna()
df['Lvwap']=np.log(df.vwap)
df['Lmid']=np.log(df.mid)
df['LDvwap']=df.Lvwap-df.Lvwap.shift(60)
df['LDmid']=df.Lmid-df.Lmid.shift(60)
df=df.dropna()
return df
###Model is already saved from "/Dropbox/DataScience/ARIMA_model_saving.ipynb". Here loaded and added to "df_ml"
def ARIMA_(data):
### load model
data=data.dropna()
predictions_mid=ARIMA_mid(data.LDmid)
predictions_vwap=ARIMA_vwap(data.LDvwap)
vwap_arima=np.exp(predictions_vwap+data.Lvwap.shift(60))
mid_arima=np.exp(predictions_mid+data.Lmid.shift(60))
df_ml['arima']=data.mid+vwap_arima-mid_arima
def ARIMA_mid(data):
### load model
mid_arima_loaded = ARIMAResults.load('mid_arima.pkl')
predictions_mid = mid_arima_loaded.predict()
return predictions_mid
def ARIMA_vwap(data):
### load model
vwap_arima_loaded = ARIMAResults.load('vwap_arima.pkl')
predictions_vwap = vwap_arima_loaded.predict()
return predictions_vwap
#### KALMAN moving average
##KF moving average
#https://github.com/pykalman/pykalman
# Import a Kalman filter and other useful libraries
from pykalman import KalmanFilter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import poly1d
def kalman_ma(data):
#x=data.mid
x=data.mid
# Construct a Kalman filter
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 248,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=.01)
# Use the observed values of the price to get a rolling mean
state_means, _ = kf.filter(x.values)
state_means = pd.Series(state_means.flatten(), index=x.index)
df_ml['km']=state_means
### Linear Regression, sklearn, svm:SVR,linear_model
import pickle
#from sklearn.cross_validation import train_test_split
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
## loading model saved from /Dropbox/DataScience/REG_model_saving.ipynb
filename_rgr = 'rgr.sav'
filename_svr = 'svr.sav'
# load the model from disk
loaded_rgr_model = pickle.load(open(filename_rgr, 'rb'))
loaded_svr_model = pickle.load(open(filename_svr, 'rb'))
def strat_lr(data,df):
df=df.dropna()
data=data.dropna()
X=df[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
y=df.mid
predict_regr=loaded_rgr_model.predict(X)
predict_svr=loaded_svr_model.predict(X)
df['predict_regr']=predict_regr
df['predict_svr']=predict_svr
df_ml['REG']=de_normalise(data.mid,df.predict_regr)
df_ml['SVR']=de_normalise(data.mid,df.predict_svr)
#### loading classification model from /Dropbox/DataScience/ML_20Sep
filename_svm_model_up = 'svm_model_up.sav'
filename_lm_model_up = 'lm_model_up.sav'
filename_svm_model_dn = 'svm_model_dn.sav'
filename_lm_model_dn = 'lm_model_dn.sav'
# load the model from disk
loaded_svm_up_model = pickle.load(open(filename_svm_model_up, 'rb'))
loaded_lm_up_model = pickle.load(open(filename_lm_model_up, 'rb'))
loaded_svm_dn_model = pickle.load(open(filename_svm_model_dn, 'rb'))
loaded_lm_dn_model = pickle.load(open(filename_lm_model_dn, 'rb'))
def classification_up_dn(data):
X=data[['askPrice','askSize','bidPrice','bidSize','vwap','spread','v','return','sigma']]
y1=data.U
y2=data.D
predict_svm_up=loaded_svm_up_model.predict(X)
predict_lm_up=loaded_lm_up_model.predict(X)
predict_svm_dn=loaded_svm_dn_model.predict(X)
predict_lm_dn=loaded_lm_dn_model.predict(X)
data['predict_svm_up']=predict_svm_up
data['predict_lm_up']=predict_lm_up
data['predict_svm_dn']=predict_svm_dn
data['predict_lm_dn']=predict_lm_dn
data['predict_svm']=data.predict_svm_up+data.predict_svm_dn
data['predict_lm']=data.predict_lm_up+data.predict_lm_dn
data['UD']=np.where(np.logical_and(data.predict_svm>0,data.predict_lm>0),1,np.where(np.logical_and(data.predict_svm<0,data.predict_lm<0),-1,0))
df_ml['UD']=data.UD
### LSTM
#df.loc[:, cols].prod(axis=1)
def lstm_processing(df):
df=df.dropna()
df_price=df[['mid','vwap','arima','km','REG','SVR']]
#normalization
dfn=normalise(df_price,12)
dfn['UD']=df.UD
return dfn
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import numpy
import matplotlib.pyplot as plt
import pandas
import math
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import load_model
model = load_model('21sep.h5')
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
b = dataset[i:(i+look_back), 1]
c = dataset[i:(i+look_back), 2]
d = dataset[i:(i+look_back), 3]
e= dataset[i:(i+look_back), 4]
f = dataset[i:(i+look_back), 5]
g= dataset[i:(i+look_back), 6]
dataX.append(np.c_[b,c,d,e,f,g])
#dataX.append(b)
#dataX.append(c)
#dataX.append(d)
#dataX.append(e)
#dataX.concatenate((a,bT,cT,dT,eT),axis=1)
dataY.append(dataset[i + look_back,0])
return np.array(dataX), np.array(dataY)
def strat_LSTM(df_ml):
#normalization
df_lstm=lstm_processing(df_ml)
df_lstm=df_lstm.dropna()
dataset=df_lstm.values
dataset = dataset.astype('float32')
# reshape into X=t and Y=t+1
look_back = 3
X_,Y_ = create_dataset(dataset,look_back)
# reshape input to be [samples, time steps, features]
X_ = numpy.reshape(X_, (X_.shape[0],X_.shape[1],X_.shape[2]))
# make predictions
predict = model.predict(X_)
df_lstm=df_lstm.tail(len(predict))
df_lstm['LSTM']=predict
#LSTM=(df_lstm.LSTM*(df_ml.mid.rolling(60).max()-df_ml.midClose.rolling(60).min()))+df_LSTM.Close.rolling(60).min()
LSTM=de_normalise(df_ml.mid,df_lstm.LSTM,window_length=12)
df_lstm['pred']=LSTM
df_lstm=df_lstm.dropna()
df_lstm=df_lstm.tail(len(df_ml))
df_ml['LSTM']=df_lstm.pred
'''
'''
#https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(l, n):
Yield successive n-sized chunks from l.
for i in range(0, len(l), n):
yield l[i:i + n]
'''
filename = '/home/octo/Dropbox'+ '/SPY7Dec.csv'
data=get_csv_pd(filename)
data=preprocessing(data)
df=data.dropna()
df=df[['mid','vwap','spread','v','mom','return','sigma','high','low',]]
# split into train and test sets
train_size = int(len(df) * 0.80)
test_size = len(df) - train_size
train= df[0:train_size]
test= df[train_size:len(df)]
print(len(train), len(test))
train_X=train[['mid','vwap','spread','v','return','sigma','high','low',]]
train_y=train['mom']
test_X=test[['mid','vwap','spread','v','return','sigma','high','low',]]
test_y=test['mom']
train_X.head()
test_y.head()
from sklearn import linear_model
regr = linear_model.LinearRegression()
#regr.fit(X.tail(20),y.tail(20))
#predict=regr.predict(X.tail(5))
regr.fit(train_X,train_y)
predict=regr.predict(test_X)
#X=X.dropna()
#y=y.dropna()
#y[y == inf] = 0
dt=test[['mid']]
dt['predict']=predict
#dt['predict']=dt.mid+dt.mid*dt.predict
dt['predict']=dt.predict*test.v+test.mid.shift(12)
pdf=test
pdf['pREG']=dt.predict
pdf.tail()
from sklearn.svm import SVR
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.9) #kernel='linear' #kernel='poly'
predict_svr = svr_rbf.fit(train_X,train_y).predict(test_X)
dt1=test[['mid']]
dt1['predict']=predict_svr
dt1['predict']=dt1.predict*test.v+test.mid.shift(12)
pdf['pSVR']=dt1.predict
pdf.dropna().head()
pdf[['mid','high','low','pREG','pSVR']].tail(300).plot(figsize=(15,9))
#df[['Volume']].tail(5000).plot(figsize=(15,9))
#data[['AvgVolume']].tail(5000).plot(figsize=(15,9))
plt.show()
# look at the results
plt.scatter(pdf['mid'],test_y, c='k', label='data')
plt.hold('on')
plt.plot(pdf['pREG'],test_y, c='g', label='pREG')
#plt.plot(pdf['pSVR'], y, c='g', label='pSVR')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
# look at the results
plt.scatter(pdf['mid'],pdf['pREG'], c='k', label='pSVR')
plt.hold('on')
plt.plot(pdf['mid'],pdf['pSVR'], c='g', label='pREG')
plt.plot(pdf['mid'], pdf['high'], c='g', label='high')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
X=df[['mid','vwap','spread','v','return','sigma','high','low',]]
y=df['mom']
len(df)
from sklearn import linear_model
regr = linear_model.LinearRegression()
#regr.fit(X.tail(20),y.tail(20))
#predict=regr.predict(X.tail(5))
regr.fit(X.dropna(),y.dropna())
predict=regr.predict(X)
#X=X.dropna()
#y=y.dropna()
#y[y == inf] = 0
dt=df[['mid']]
dt['predict']=predict
#dt['predict']=dt.mid+dt.mid*dt.predict
dt['predict']=dt.predict*df.v+df.mid.shift(12)
classify_df=df
classify_df['pREG']=dt.predict
from sklearn.svm import SVR
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.9) #kernel='linear' #kernel='poly'
predict_svr = svr_rbf.fit(X, y).predict(X)
dt1=df[['mid']]
dt1['predict']=predict_svr
dt1['predict']=dt1.predict*df.v+df.mid.shift(12)
classify_df['pSVR']=dt1.predict
def classification(df):
mid1=(df.high+df.low)/2
#flagUD=np.where(np.logical_and(df.mid>df.pREG,df.mid>df.pSVR),1,np.where(np.logical_and(df.mid<df.pREG,df.mid<df.pSVR),-1,0))
#df['UD']= np.where(np.logical_and(df.mid>mid1,flagUD==1),1,np.where(np.logical_and(df.mid<mid1,flagUD==-1),-1,0))
flagUD=np.where(np.logical_and(df.mid>df.pREG,df.mid>df.pSVR),1,np.where(np.logical_and(df.mid<df.pREG,df.mid<df.pSVR),-1,0))
UD= np.where(np.logical_and(df.mid>mid1,flagUD==1),1,np.where(np.logical_and(df.mid<mid1,flagUD==-1),-1,0))
df['U']= np.where(UD==1,1,0)
df['D']= np.where(UD==-1,-1,0)
df['UD']=df.U+df.D
return df
data_class=classification(classify_df)
data_class=data_class.dropna()
df=df.dropna()
# both df and data_class have U,D,UD
data_class.head()
df.head()
# split into train and test sets
train_size = int(len(data_class) * 0.80)
test_size = len(data_class) - train_size
train= data_class[0:train_size]
test= data_class[train_size:len(data_class)]
print(len(train), len(test))
train_X=train[['mid','vwap','spread','v','return','sigma','high','low','mom','pREG','pSVR']]
train_y=train['UD']
test_X=test[['mid','vwap','spread','v','return','sigma','high','low','mom','pREG','pSVR']]
test_y=test['UD']
train_U=train['U']
test_U=test['U']
train_D=train['D']
test_D=test['D']
print(len(train_U), len(test_U))
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(train_X,train_U)
print(model)
# make predictions
expected =test_U
predicted = model.predict(test_X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
model = LogisticRegression()
model.fit(train_X,train_D)
print(model)
# make predictions
expected =test_D
predicted = model.predict(test_X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
# fit a CART model to the data
model = DecisionTreeClassifier()
model.fit(train_X,train_U)
print(model)
# make predictions
expected =test_U
predicted = model.predict(test_X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
accuracy =model.score(test_X,test_U)
accuracy
# fit a CART model to the data
model = DecisionTreeClassifier()
model.fit(train_X,train_D)
print(model)
# make predictions
expected =test_D
predicted = model.predict(test_X)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
accuracy =model.score(test_X,test_D)
accuracy
# Import the random forest model.
from sklearn.ensemble import RandomForestRegressor
# Initialize the model with some parameters.
model = RandomForestRegressor(n_estimators=100, min_samples_leaf=10, random_state=1)
# Fit the model to the data.
model.fit(train_X,train_U)
print(model)
# Make predictions.
expected=test_U
predicted = model.predict(test_X)
accuracy =model.score(test_X,test_U)
accuracy
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1)
clf.fit(train_X,train_U)
accuracy = clf.score(test_X,test_U)
print(accuracy)
from sklearn import neighbors
clf = neighbors.KNeighborsClassifier()
clf.fit(train_X,train_U)
accuracy = clf.score(test_X,test_U)
print(accuracy)
from sklearn.ensemble import AdaBoostClassifier
clf = AdaBoostClassifier()
clf.fit(train_X,train_U)
accuracy = clf.score(test_X,test_U)
print(accuracy)
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(n_estimators=100)
clf.fit(train_X,train_U)
accuracy = clf.score(test_X,test_U)
print(accuracy)
from sklearn.qda import QDA
clf = QDA()
clf.fit(train_X,train_U)
accuracy = clf.score(test_X,test_U)
print(accuracy)
from sklearn import metrics
from sklearn.svm import SVC
# fit a SVM model to the data
model = SVC()
model.fit(train_X,train_U)
print(model)
# Make predictions.
expected=test_U
predicted = model.predict(test_X)
accuracy =model.score(test_X,test_U)
accuracy
# fit a SVM model to the data
model = SVC()
model.fit(train_X,train_D)
print(model)
# Make predictions.
expected=test_D
predicted = model.predict(test_X)
accuracy =model.score(test_X,test_D)
accuracy
if savemodel == True:
fname_out = '{}-{}.pickle'.format(fout, datetime.now())
with open(fname_out, 'wb') as f:
cPickle.dump(clf, f, -1)
# plotting
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
%matplotlib inline
#plt.rcParams['figure.figsize'] = 8,6
test.boxplot(column='v')
test.boxplot(by='v')
plt.ylim(245,248)
test.boxplot(by='UD')
#some descriptive statistics
test.describe()
test['v'].plot(kind='hist', grid=True, title='velocity')
test['UD'].plot(kind='hist', grid=True, title='up-down')
test['v'].plot(kind='line', grid=True, title='velocity')
test['UD'].plot(kind='line', grid=True, title='up-down')
# Find 7, 30, 120 day moving averages (very broadly, rolling week, month and quarter)
spy_12 = test.rolling(window=12).mean()
spy_60 = test.rolling(window=60).mean()
spy_360 = test.rolling(window=360).mean()
fig = plt.figure()
fig.autofmt_xdate()
ax = fig.add_subplot(1,1,1)
ax.plot(test.index,test, label='SPY')
ax.plot(spy_12.index, spy_12, label='1 min rolling')
ax.plot(spy_60.index, spy_60, label='5 min rolling')
ax.plot(spy_360.index,spy_360, label='30 min rolling')
ax.grid()
ax.legend(loc=2)
ax.set_xlabel('Date')
plt.title('SPY Closes & Rolling Averages')
plt.show()
#frequency
round(test['mom']).value_counts()
round(test['vwap'],1).hist(bins=50)
test.boxplot(column='mid')
#df for datascience
#signal=df.DataFrame(data=df.mid)
signal=df
#df['time']=df.index.strftime('%H:%M:%S')
time=signal.index.strftime('%H:%M:%S')
P=(signal.high+signal.low+signal.mid)/3
signal['UT']=(P+signal.high.rolling(60).max()-signal.low.rolling(60).max())
signal['DT']=(P-signal.high.rolling(60).min()+signal.low.rolling(60).min())
signal['BS']=np.where(signal.mid<=df.DT,"B",np.where(signal.mid>=df.UT,"S","H"))
signal=signal.dropna()
signal.head()
df[['UT','DT','mid','high','low','pREG','pSVR']].tail(100).plot(figsize=(16, 10))
plt.show()
signal.boxplot(column='mid',by ='BS')
temp1 = round(signal['UD']).value_counts(ascending=True)
temp2 = signal.pivot_table(values='UD',index=['BS'],aggfunc=lambda x: x.map({'B':1,'S':-1,'H':0}).mean())
print ('Frequency Table for spread:')
print (temp1)
print ('\nProbility')
print (temp2.tail())
temp3 = pd.crosstab(round(signal['UD']),signal['BS'])
temp3.plot(kind='bar', stacked=True, color=['red','blue'], grid=False)
# number of missing values in each column as isnull() returns 1, if the value is null.
signal.apply(lambda x: sum(x.isnull()),axis=0)
signal['BS'].value_counts()
signal['UD'].value_counts()
table = signal.pivot_table(values='v', index='BS' ,columns='UD', aggfunc=np.median)
print(table)
#Boolean indexing
signal.loc[(signal['v']<0) & (signal["BS"]=="B") & (signal["DT"]>signal["mid"]), ['mid',"spread","BS","DT"]].head()
train_X.head()
# Create first network with Keras
from keras.models import Sequential
from keras.layers import Dense
import numpy
# create model
model = Sequential()
model.add(Dense(12, input_dim=11, init='uniform', activation='relu'))
model.add(Dense(8, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(train_X,train_U, nb_epoch=11, batch_size=10)
# evaluate the model
scores = model.evaluate(test_X,test_U)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For reading groundwater level timeseries there is the read_dino function. For river level gauge data there is the read_dino_level_gauge function. These functions are available at the module level. The code below loads these timeseries from CSV files in the data directory.
Step2: Note that these pastas.Timeseries objects contain metadata, i.e. for the river_levels
Step3: The resulting pastas.TimeSeries come with their own plot methods, to quickly visualize the data
Step4: As can be observed in the plot above, something went wrong with the registration of the river levels in the field. The shows a huge downward shift at the end of 2014. The negative values from end of 2014 onwards are assummed to be correct. The positive values were registered incorrectly (missing a minus sign).
Step5: Plot the timeseries again, to see if the applied fix looks reasonable
Step6: Another option for loading river levels is the DinoPeilschaal reader class. This creates a DinoPeilschaal object containing the data and the metadata. These are accessible through the data and meta attributes, respectively.
Step7: View the original data
Step8: The metadata
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import pastas as ps
ps.show_versions()
gw_levels = ps.read_dino(r'../data/B49F0555001_1.csv')
river_levels = ps.read_dino_level_gauge(r'../data/P43H0001.csv')
river_levels.metadata
ax = gw_levels.plot()
river_levels.plot()
river_levels.series_original = river_levels.series_original.abs() * -1 # set positive values to negative
river_levels.update_series()
gw_levels.plot()
river_levels.plot()
river_levels_obj = ps.read.dinoloket.DinoPeilschaal(r'../data/P43H0001.csv')
river_levels_obj.data.head()
river_levels_obj.meta
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To reduce memory consumption and running time, some of the steps are
Step2: The data was collected with a CTF 275 system at 2400 Hz and low-pass
Step3: In the memory saving mode we use preload=False and use the memory
Step4: Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference
Step5: For noise reduction, a set of bad segments have been identified and stored
Step6: Here we compute the saccade and EOG projectors for magnetometers and add
Step7: Visually inspect the effects of projections. Click on 'proj' button at the
Step8: Typical preprocessing step is the removal of power line artifact (50 Hz or
Step9: We also lowpass filter the data at 100 Hz to remove the hf components.
Step10: Epoching and averaging.
Step11: The event timing is adjusted by comparing the trigger times on detected
Step12: We mark a set of bad channels that seem noisier than others. This can also
Step13: The epochs (trials) are created for MEG channels. First we find the picks
Step14: We only use first 40 good epochs from each run. Since we first drop the bad
Step15: The averages for each conditions are computed.
Step16: Typical preprocessing step is the removal of power line artifact (50 Hz or
Step17: Here we plot the ERF of standard and deviant conditions. In both conditions
Step18: Show activations as topography figures.
Step19: We can see the MMN effect more clearly by looking at the difference between
Step20: Source estimation.
Step21: The transformation is read from a file. More information about coregistering
Step22: To save time and memory, the forward solution is read from a file. Set
Step23: The sources are computed using dSPM method and plotted on an inflated brain
Step24: Deviant condition.
Step25: Difference.
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: BSD (3-clause)
import os.path as op
import pandas as pd
import numpy as np
import mne
from mne import combine_evoked
from mne.minimum_norm import apply_inverse
from mne.datasets.brainstorm import bst_auditory
from mne.io import read_raw_ctf
print(__doc__)
use_precomputed = True
data_path = bst_auditory.data_path()
subject = 'bst_auditory'
subjects_dir = op.join(data_path, 'subjects')
raw_fname1 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_01.ds')
raw_fname2 = op.join(data_path, 'MEG', 'bst_auditory',
'S01_AEF_20131218_02.ds')
erm_fname = op.join(data_path, 'MEG', 'bst_auditory',
'S01_Noise_20131218_01.ds')
preload = not use_precomputed
raw = read_raw_ctf(raw_fname1, preload=preload)
n_times_run1 = raw.n_times
mne.io.concatenate_raws([raw, read_raw_ctf(raw_fname2, preload=preload)])
raw_erm = read_raw_ctf(erm_fname, preload=preload)
raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'})
if not use_precomputed:
# Leave out the two EEG channels for easier computation of forward.
raw.pick(['meg', 'stim', 'misc', 'eog', 'ecg'])
annotations_df = pd.DataFrame()
offset = n_times_run1
for idx in [1, 2]:
csv_fname = op.join(data_path, 'MEG', 'bst_auditory',
'events_bad_0%s.csv' % idx)
df = pd.read_csv(csv_fname, header=None,
names=['onset', 'duration', 'id', 'label'])
print('Events from run {0}:'.format(idx))
print(df)
df['onset'] += offset * (idx - 1)
annotations_df = pd.concat([annotations_df, df], axis=0)
saccades_events = df[df['label'] == 'saccade'].values[:, :3].astype(int)
# Conversion from samples to times:
onsets = annotations_df['onset'].values / raw.info['sfreq']
durations = annotations_df['duration'].values / raw.info['sfreq']
descriptions = annotations_df['label'].values
annotations = mne.Annotations(onsets, durations, descriptions)
raw.set_annotations(annotations)
del onsets, durations, descriptions
saccade_epochs = mne.Epochs(raw, saccades_events, 1, 0., 0.5, preload=True,
reject_by_annotation=False)
projs_saccade = mne.compute_proj_epochs(saccade_epochs, n_mag=1, n_eeg=0,
desc_prefix='saccade')
if use_precomputed:
proj_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-eog-proj.fif')
projs_eog = mne.read_proj(proj_fname)[0]
else:
projs_eog, _ = mne.preprocessing.compute_proj_eog(raw.load_data(),
n_mag=1, n_eeg=0)
raw.add_proj(projs_saccade)
raw.add_proj(projs_eog)
del saccade_epochs, saccades_events, projs_eog, projs_saccade # To save memory
raw.plot(block=True)
if not use_precomputed:
raw.plot_psd(tmax=np.inf, picks='meg')
notches = np.arange(60, 181, 60)
raw.notch_filter(notches, phase='zero-double', fir_design='firwin2')
raw.plot_psd(tmax=np.inf, picks='meg')
if not use_precomputed:
raw.filter(None, 100., h_trans_bandwidth=0.5, filter_length='10s',
phase='zero-double', fir_design='firwin2')
tmin, tmax = -0.1, 0.5
event_id = dict(standard=1, deviant=2)
reject = dict(mag=4e-12, eog=250e-6)
# find events
events = mne.find_events(raw, stim_channel='UPPT001')
sound_data = raw[raw.ch_names.index('UADC001-4408')][0][0]
onsets = np.where(np.abs(sound_data) > 2. * np.std(sound_data))[0]
min_diff = int(0.5 * raw.info['sfreq'])
diffs = np.concatenate([[min_diff + 1], np.diff(onsets)])
onsets = onsets[diffs > min_diff]
assert len(onsets) == len(events)
diffs = 1000. * (events[:, 0] - onsets) / raw.info['sfreq']
print('Trigger delay removed (μ ± σ): %0.1f ± %0.1f ms'
% (np.mean(diffs), np.std(diffs)))
events[:, 0] = onsets
del sound_data, diffs
raw.info['bads'] = ['MLO52-4408', 'MRT51-4408', 'MLO42-4408', 'MLO43-4408']
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=['meg', 'eog'],
baseline=(None, 0), reject=reject, preload=False,
proj=True)
epochs.drop_bad()
epochs_standard = mne.concatenate_epochs([epochs['standard'][range(40)],
epochs['standard'][182:222]])
epochs_standard.load_data() # Resampling to save memory.
epochs_standard.resample(600, npad='auto')
epochs_deviant = epochs['deviant'].load_data()
epochs_deviant.resample(600, npad='auto')
del epochs
evoked_std = epochs_standard.average()
evoked_dev = epochs_deviant.average()
del epochs_standard, epochs_deviant
for evoked in (evoked_std, evoked_dev):
evoked.filter(l_freq=None, h_freq=40., fir_design='firwin')
evoked_std.plot(window_title='Standard', gfp=True, time_unit='s')
evoked_dev.plot(window_title='Deviant', gfp=True, time_unit='s')
times = np.arange(0.05, 0.301, 0.025)
evoked_std.plot_topomap(times=times, title='Standard', time_unit='s')
evoked_dev.plot_topomap(times=times, title='Deviant', time_unit='s')
evoked_difference = combine_evoked([evoked_dev, -evoked_std], weights='equal')
evoked_difference.plot(window_title='Difference', gfp=True, time_unit='s')
reject = dict(mag=4e-12)
cov = mne.compute_raw_covariance(raw_erm, reject=reject)
cov.plot(raw_erm.info)
del raw_erm
trans_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-trans.fif')
trans = mne.read_trans(trans_fname)
if use_precomputed:
fwd_fname = op.join(data_path, 'MEG', 'bst_auditory',
'bst_auditory-meg-oct-6-fwd.fif')
fwd = mne.read_forward_solution(fwd_fname)
else:
src = mne.setup_source_space(subject, spacing='ico4',
subjects_dir=subjects_dir, overwrite=True)
model = mne.make_bem_model(subject=subject, ico=4, conductivity=[0.3],
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
fwd = mne.make_forward_solution(evoked_std.info, trans=trans, src=src,
bem=bem)
inv = mne.minimum_norm.make_inverse_operator(evoked_std.info, fwd, cov)
snr = 3.0
lambda2 = 1.0 / snr ** 2
del fwd
stc_standard = mne.minimum_norm.apply_inverse(evoked_std, inv, lambda2, 'dSPM')
brain = stc_standard.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_standard, brain
stc_deviant = mne.minimum_norm.apply_inverse(evoked_dev, inv, lambda2, 'dSPM')
brain = stc_deviant.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.1, time_unit='s')
del stc_deviant, brain
stc_difference = apply_inverse(evoked_difference, inv, lambda2, 'dSPM')
brain = stc_difference.plot(subjects_dir=subjects_dir, subject=subject,
surface='inflated', time_viewer=False, hemi='lh',
initial_time=0.15, time_unit='s')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.