Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
2,800
|
<ASSISTANT_TASK:>
Python Code:
# test image stack
arr = []
for i in range(50):
b = np.random.rand(500,500)
b= (b*(2**16-1)).astype('uint16')
arr.append(b)
# snap (MPL)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
plt.imshow(img, cmap='gray')
display.clear_output(wait=True)
display.display(plt.gcf())
button.on_click(on_button_clicked)
# snap (CV2)
button = widgets.Button(description='Snap')
display.display(button)
def on_button_clicked(b):
img=arr.pop()
cv2.imshow('Video',img)
cv2.waitKey(30)
button.on_click(on_button_clicked)
import numpy as np
import cv2
from IPython import display
# test image stack
a = []
for i in range(50):
b = np.zeros((500,500))
b[i:i+100, i:i+100]=1.0
b=b*255
b=b.astype('uint8')
a.append(b)
# video (MPL) (slow, doesn't work well)
# for img in a:
# plt.imshow(img, cmap='gray')
# display.clear_output(wait=True)
# display.display(plt.gcf())
# video (CV2)
cv2.namedWindow('Video',cv2.WINDOW_NORMAL)
for img in a:
b = cv2.imshow('Video',img)
cv2.resizeWindow('Video', 500,500)
cv2.moveWindow('Video',0,0)
display.clear_output(wait=True)
print np.random.randn(1)
if cv2.waitKey(30) >= 0:
break
cv2.destroyAllWindows()
# video with button (CV2)
button = widgets.Button(description='Live')
display.display(button)
def on_button_clicked(b):
for img in a:
cv2.imshow('Video',img)
cv2.waitKey(30)
display.clear_output(wait=True)
print np.random.randn(1)
button.on_click(on_button_clicked)
button = widgets.ToggleButton(description='Live', value=False)
def on_click(change):
display.clear_output(wait=True)
print change['new']
button.observe(on_click, names='value')
display.display(button)
import time
b1 = widgets.Button(description='b1')
b2 = widgets.Button(description='b2')
def ctrlloop():
def b1_click(b):
for i in range(10):
print 'b1', i
time.sleep(0.5)
def b2_click(b):
for i in range(10):
print 'b2', i
# dl = widgets.jsdlink((button, 'value'), (vid, 'value'))
b1.on_click(b1_click)
b2.on_click(b2_click)
widgets.HBox([b1,b2])
play = widgets.Play(
interval=160,
value=50,
min=0,
max=100,
step=1,
description="Press play",
disabled=False
)
slider = widgets.IntSlider()
widgets.jslink((play, 'value'), (slider, 'value'))
widgets.HBox([play, slider])
f = open('temp.msg','wb')
f.write(str(1))
f.close()
# icons are from "font-awesome"
x_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-left')
x_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-right')
y_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon='arrow-up')
y_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
xy_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
xy_cluster = widgets.VBox([ widgets.HBox([x_minus,x_plus]), widgets.HBox([y_minus, y_plus]) ])
z_minus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-up')
z_plus = widgets.Button(
description='',
disabled=False,
button_style='',
icon = 'arrow-down')
z_slider = widgets.VBox([widgets.FloatText(description='speed', width='30%',value=50),widgets.IntSlider(width=100, step=10)])
z_cluster = widgets.VBox([ z_minus, z_plus])
widgets.HBox([xy_cluster, xy_slider, z_cluster, z_slider])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Video
Step2: GUI and BUTTONS
Step3: Arrows
|
2,801
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import urllib.request
import numpy as np
import simplejson as json
import pandas as pd
from netCDF4 import Dataset, date2num, num2date
import ipywidgets as widgets
from IPython.display import display, clear_output
import dateutil.parser
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import time
from po_data_process import get_data_from_raster_API,get_data_from_point_API,get_variables_from_detail_api,read_data_to_json,generate_raster_api_query,get_units
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
dataset_keys = ['cmc_gdwps_wave_model_global','dwd_wam_europe','dwd_wam_global',
'cmems_medsea_wave_analysis_forecast_0042','noaa_ww3_global_0.5d']
server = "http://api.planetos.com/v1/datasets/"
API_key = open('APIKEY').readlines()[0].strip() #'<YOUR API KEY HERE>'
latitude = 'Please add location for point data'
longitude = 'Please add location for point data'
longitude_west = 'Please add location for raster data'
longitude_east = 'Please add location for raster data'
latitude_south = 'Please add location for raster data'
latitude_north = 'Please add location for raster data'
default_point_api_loc = {'cmc_gdwps_wave_model_global':['39','-69'],'dwd_wam_europe':['56.3','17.8'],
'dwd_wam_global':['39','-69'],'cmems_medsea_wave_analysis_forecast_0042':['38.3','5.8'],'noaa_ww3_global_0.5d':['39','-69']}
default_raster_api_loc = {'cmc_gdwps_wave_model_global':['36','39','-62','-70'],'dwd_wam_europe':['55.2','56.5','17.4','20.2'],
'dwd_wam_global':['36','39','-62','-70'],'cmems_medsea_wave_analysis_forecast_0042':['35.8','37.8','16.5','19.5'],
'noaa_ww3_global_0.5d':['36','39','-62','-70']}
droplist0 = list(dataset_keys)
selecter0 = widgets.Dropdown(
options=droplist0,
value=droplist0[0],
description='Select dataset:',
disabled=False,
button_style=''
)
display(selecter0)
dataset_key = selecter0.value
if latitude.isdigit() == False or longitude.isdigit() == False:
latitude = default_point_api_loc[dataset_key][0]
longitude = default_point_api_loc[dataset_key][1]
if latitude_south.isdigit() == False or latitude_north.isdigit() == False or longitude_east.isdigit == False \
or longitude_west.isdigit() == False:
latitude_south = default_raster_api_loc[dataset_key][0]
latitude_north = default_raster_api_loc[dataset_key][1]
longitude_east = default_raster_api_loc[dataset_key][2]
longitude_west = default_raster_api_loc[dataset_key][3]
def get_sample_var_name(variables, selected_variable, selected_level):
## The human readable variable name is not the same as the compact one in API request
for i in variables:
if i['longName'] == selected_variable:
var = i['name']
elif i["longName"] == selected_variable + "@" + selected_level:
var = i['name']
return var
point_data_frame = get_data_from_point_API(dataset_key,longitude,latitude,API_key)
dataset_variables = get_variables_from_detail_api(server,dataset_key,API_key)
vardict = {}
levelset = []
for i in dataset_variables:
if '@' in i['longName']:
if not i['longName'].split("@")[1] in levelset:
levelset.append(i['longName'].split("@")[1])
varname, varlevel = i['longName'].split("@")
else:
if not 'Ground or water surface' in levelset:
levelset.append('Ground or water surface')
varname, varlevel = i['longName'], 'Ground or water surface'
if not varlevel in vardict:
vardict[varlevel] = []
vardict[varlevel].append(varname)
else:
vardict[varlevel].append(varname)
droplist = list(levelset)
selecter = widgets.Dropdown(
options = droplist,
value = droplist[0],
description = 'Select level type:',
disabled = False,
button_style = ''
)
selecter2 = widgets.Dropdown(
options = sorted(vardict[selecter.value]),
value = vardict[selecter.value][0],
description = 'Select variable:',
disabled = False,
button_style = ''
)
def select_from_list2(sender):
selecter2.options = vardict[selecter.value]
def plot_selected_variable(sender):
clear_output()
sample_var_name = get_sample_var_name(dataset_variables, selecter2.value, selecter.value)
#sample_point_data_json = read_data_to_json(generate_point_api_query(**{'var':sample_var_name,'count':100000}))
sample_point_data_pd = get_data_from_point_API(dataset_key,longitude,latitude,API_key,var=sample_var_name,count=100000)
unit = get_units(dataset_key,sample_var_name,API_key)
fig = plt.figure(figsize=(11,4))
## find how many vertical levels we have
if 'z' in sample_point_data_pd:
zlevels = sample_point_data_pd['z'].unique()
if len(zlevels) != 1:
print("Warning: more than one vertical level detected ", zlevels)
for i in zlevels:
pdata=np.array(sample_point_data_pd[sample_point_data_pd['z'] == i][sample_var_name],dtype=np.float)
if np.sum(np.isnan(pdata)) != pdata.shape[0]:
plt.plot(sample_point_data_pd[sample_point_data_pd['z'] == i]['time'].apply(dateutil.parser.parse),pdata,label=i)
else:
print("Cannot plot all empty values!")
else:
pdata = np.array(sample_point_data_pd[sample_var_name],dtype = np.float)
if np.sum(np.isnan(pdata)) != pdata.shape[0]:
plt.plot(sample_point_data_pd['time'].apply(dateutil.parser.parse), pdata, '*-', label=sample_var_name)
else:
print("Cannot plot all empty values!")
plt.legend()
plt.grid()
fig.autofmt_xdate()
plt.xlabel('Date')
plt.title(sample_var_name)
plt.ylabel(unit)
plt.show()
display(selecter)
display(selecter2)
selecter.observe(select_from_list2)
selecter2.observe(plot_selected_variable, names='value')
sample_var_name = get_sample_var_name(dataset_variables, selecter2.value, selecter.value)
sample_raster_data = get_data_from_raster_API(dataset_key, longitude_west, latitude_south,
longitude_east, latitude_north, API_key,var=sample_var_name,
count=1000)
data_min = np.amin([np.nanmin(np.array(i,dtype=float)) for i in sample_raster_data[sample_var_name]])
data_max = np.amax([np.nanmax(np.array(i,dtype=float)) for i in sample_raster_data[sample_var_name]])
raster_latitude = sample_raster_data['indexAxes'][0][0][1]
raster_longitude = sample_raster_data['indexAxes'][0][1][1]
m = Basemap(projection='merc',llcrnrlat=float(latitude_south),urcrnrlat=float(latitude_north),\
llcrnrlon=float(longitude_west),urcrnrlon=float(longitude_east),lat_ts=(float(latitude_south)+float(latitude_north))/2,resolution='i')
lonmap, latmap = m(np.meshgrid(raster_longitude,raster_latitude)[0],np.meshgrid(raster_longitude, raster_latitude)[1])
def loadimg(k):
unit = get_units(dataset_key,sample_var_name,API_key)
fig=plt.figure(figsize=(7,5))
data = np.ma.masked_invalid(np.array(sample_raster_data[sample_var_name][k],dtype=float))
m.pcolormesh(lonmap,latmap,data,vmax=data_max,vmin=data_min)
m.drawcoastlines()
m.drawcountries()
plt.title(selecter2.value + " " + sample_raster_data['time'][k],fontsize=10)
cbar = plt.colorbar()
cbar.set_label(unit)
print("Maximum: ",np.nanmax(data))
print("Minimum: ",np.nanmin(data))
plt.show()
widgets.interact(loadimg, k=widgets.IntSlider(min=0,max=(len(sample_raster_data)-1),step=1,value=0, layout=widgets.Layout(width='100%')))
loadimg(0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='red'>Please put your datahub API key into a file called APIKEY and place it to the notebook folder or assign your API key directly to the variable API_key!</font>
Step2: Change latitude and longitude to your desired location. Longitude/latitude west/east are for raster API example.
Step3: If you don't want to choose location by yourself, default locations will be used.
Step4: Making dropdown for WAM dataset keys, where you can choose WAM model you would like to use. Note that when using Europe/ North-Atlantic/ North-Pacific/ Mediterranean Sea data points have to be from that area!
Step5: Point data API
Step6: Getting variables for choosen dataset
Step7: Now let's plot the data. We are making two dropdown lists. From first you have to choose level type. It means that data is separated by contexts. Context here means just a set of spatial and temporal dimensions for particular variable, like NetCDF dimensions. Each variable has values only in one context. So the NaN's do not come from the API request, but from the current way we use to create the Dataframe. It is not difficult to filter out variables from the dataframe, but for any use case besides observing the data, it is more reasonable to query only the right data from the server.
Step8: Raster API
Step9: Longitude and latitude data is necessary for map and it is defined at the beginning of the notebook by user.
Step10: Now let's map our data!
Step11: As time slider for image above does not work in GitHub preview, let's make image from first time moment. Although, it will work in your local environment.
|
2,802
|
<ASSISTANT_TASK:>
Python Code:
# Installs the latest version of TFC compatible with the installed TF version.
!pip install tensorflow-compression~=$(pip show tensorflow | perl -p -0777 -e 's/.*Version: (\d\.\d).*/\1.0/sg')
# Downloads the 'models' directory from Github.
![[ -e /tfc ]] || git clone https://github.com/tensorflow/compression /tfc
%cd /tfc/models
# Checks if tfci.py is available.
import tfci
import tensorflow as tf
if not tf.config.list_physical_devices('GPU'):
print('WARNING: No GPU found. Might be slow!')
else:
print('Found GPU.')
import os
import zipfile
from google.colab import files
import collections
from PIL import Image
from IPython.display import Image as DisplayImage
from IPython.display import Javascript
from IPython.core.display import display, HTML
import tfci
import urllib.request
tf.get_logger().setLevel('WARN') # Only show Warnings
FILES_DIR = '/content/files'
OUT_DIR = '/content/out'
DEFAULT_IMAGE_URL = ('https://storage.googleapis.com/hific/clic2020/'
'images/originals/ad249bba099568403dc6b97bc37f8d74.png')
os.makedirs(FILES_DIR, exist_ok=True)
os.makedirs(OUT_DIR, exist_ok=True)
File = collections.namedtuple('File', ['full_path', 'num_bytes', 'bpp'])
def print_html(html):
display(HTML(html + '<br/>'))
def make_cell_large():
display(Javascript(
'''google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})'''))
def get_default_image(output_dir):
output_path = os.path.join(output_dir, os.path.basename(DEFAULT_IMAGE_URL))
print('Downloading', DEFAULT_IMAGE_URL, '\n->', output_path)
urllib.request.urlretrieve(DEFAULT_IMAGE_URL, output_path)
#@title Setup { vertical-output: false, run: "auto", display-mode: "form" }
#@markdown #### Custom Images
#@markdown Tick the following if you want to upload your own images to compress.
#@markdown Otherwise, a default image will be used.
#@markdown
#@markdown **Note**: We support JPG and PNG (without alpha channels).
#@markdown
upload_custom_images = False #@param {type:"boolean", label:"HI"}
if upload_custom_images:
uploaded = files.upload()
for name, content in uploaded.items():
with open(os.path.join(FILES_DIR, name), 'wb') as fout:
print('Writing', name, '...')
fout.write(content)
#@markdown #### Select a model
#@markdown Different models target different bitrates.
model = 'hific-lo' #@param ["hific-lo", "hific-mi", "hific-hi"]
if 'upload_custom_images' not in locals():
print('ERROR: Please run the previous cell!')
# Setting defaults anyway.
upload_custom_images = False
model = 'hific-lo'
all_files = os.listdir(FILES_DIR)
if not upload_custom_images or not all_files:
print('Downloading default image...')
get_default_image(FILES_DIR)
print()
all_files = os.listdir(FILES_DIR)
print(f'Got the following files ({len(all_files)}):')
for file_name in all_files:
img = Image.open(os.path.join(FILES_DIR, file_name))
w, h = img.size
img = img.resize((w // 15, h // 15))
print('- ' + file_name + ':')
display(img)
SUPPORTED_EXT = {'.png', '.jpg'}
all_files = os.listdir(FILES_DIR)
if not all_files:
raise ValueError("Please upload images!")
def get_bpp(image_dimensions, num_bytes):
w, h = image_dimensions
return num_bytes * 8 / (w * h)
def has_alpha(img_p):
im = Image.open(img_p)
return im.mode == 'RGBA'
all_outputs = []
for file_name in all_files:
if os.path.isdir(file_name):
continue
if not any(file_name.endswith(ext) for ext in SUPPORTED_EXT):
print('Skipping', file_name, '...')
continue
full_path = os.path.join(FILES_DIR, file_name)
if has_alpha(full_path):
print('Skipping because of alpha channel:', file_name)
continue
file_name, _ = os.path.splitext(file_name)
compressed_path = os.path.join(OUT_DIR, f'{file_name}_{model}.tfci')
output_path = os.path.join(OUT_DIR, f'{file_name}_{model}.png')
if os.path.isfile(output_path):
print('Exists already:', output_path)
num_bytes = os.path.getsize(compressed_path)
all_outputs.append(
File(output_path, num_bytes,
get_bpp(Image.open(full_path).size, num_bytes)))
continue
print('Compressing', file_name, 'with', model, '...')
tfci.compress(model, full_path, compressed_path)
num_bytes = os.path.getsize(compressed_path)
print(f'Compressed to {num_bytes} bytes.')
print('Decompressing...')
tfci.decompress(compressed_path, output_path)
all_outputs.append(
File(output_path, num_bytes,
get_bpp(Image.open(full_path).size, num_bytes)))
print('All done!')
make_cell_large() # Larger output window.
for file in all_outputs:
print_html('<hr/>')
print(f'Showing {file.full_path} | {file.num_bytes//1000}kB | {file.bpp:.4f}bpp')
display(Image.open(file.full_path))
print_html('<hr/>')
ZIP = '/content/images.zip'
with zipfile.ZipFile(ZIP, 'w') as zf:
for f in all_outputs:
path_with_bpp = f.full_path.replace('.png', f'-{f.bpp:.3f}bpp.png')
zf.write(f.full_path, os.path.basename(path_with_bpp))
files.download(ZIP)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enabling GPU
Step2: Imports and Definitions
Step3: Load files
Step4: Compress images
Step5: Show output
Step6: Download all compressed images.
|
2,803
|
<ASSISTANT_TASK:>
Python Code:
# Importing standard libraries
import numpy as np
import pandas as pd
from ionchannelABC import IonChannelModel
icat = IonChannelModel('icat',
'models/Generic_iCaT.mmt',
vvar='membrane.V',
logvars=['environment.time',
'icat.G_CaT',
'icat.i_CaT'])
import data.icat.data_icat as data
from ionchannelABC import (Experiment,
ExperimentData,
ExperimentStimProtocol)
vsteps, peak_curr, errs, N = data.IV_Nguyen()
nguyen_data = ExperimentData(x=vsteps, y=peak_curr,
N=N, errs=errs,
err_type='SEM') # this flag is currently not used but may change in future version
stim_times = [5000, 300, 500] # describes the course of one voltage step in time
stim_levels = [-75, vsteps, -75] # each entry of levels corresponds to the time above
def max_icat(data):
return max(data[0]['icat.i_CaT'], key=abs)
nguyen_protocol = ExperimentStimProtocol(stim_times,
stim_levels,
measure_index=1, # index from `stim_times` and `stim_levels`
measure_fn=max_icat)
nguyen_conditions = dict(Ca_o=5000, # extracellular Ca2+ concentration of 5000uM
Ca_subSL=0.2, # sub-sarcolemmal (i.e. intracellular) Ca2+ concentration of 0.2uM
T=295) # experiment temperature of 295K
nguyen_experiment = Experiment(nguyen_protocol, nguyen_data, nguyen_conditions)
icat.add_experiments([nguyen_experiment])
test = icat.sample({}) # empty dictionary as we are not overwriting any of the parameters in the model definition yet
import matplotlib.pyplot as plt
import seaborn as sns
from ionchannelABC import plot_sim_results
%matplotlib inline
plot_sim_results(test, obs=icat.get_experiment_data())
from channels.icat_generic import icat as model
test = model.sample({})
plot_sim_results(test, obs=model.get_experiment_data())
from pyabc import (RV, Distribution) # we use two classes from the pyabc library for this definition
limits = dict(g_CaT=(0, 2), # these parameter keys are specific to the icat model being investigated
v_offset=(0, 500),
Vhalf_b=(-100, 100),
k_b=(0, 10),
c_bb=(0, 10),
c_ab=(0, 100),
sigma_b=(0, 100),
Vmax_b=(-100, 100),
Vhalf_g=(-100, 100),
k_g=(-10, 0),
c_bg=(0, 50),
c_ag=(0, 500),
sigma_g=(0, 100),
Vmax_g=(-100, 100))
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
from ionchannelABC import (IonChannelDistance, plot_distance_weights)
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']
distance_fn = IonChannelDistance(obs=obs, exp_map=exp, err_bars=errs, err_th=0.1)
plot_distance_weights(model, distance_fn)
import tempfile, os
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "example.db"))
print(db_path)
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
from pyabc import ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.populationstrategy import ConstantPopulationSize
from pyabc.sampler import MulticoreEvalParallelSampler
from ionchannelABC import (ion_channel_sum_stats_calculator,
IonChannelAcceptor,
IonChannelDistance,
EfficientMultivariateNormalTransition)
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.1),
population_size=ConstantPopulationSize(1000),
summary_statistics=ion_channel_sum_stats_calculator,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(),
sampler=MulticoreEvalParallelSampler(n_procs=12),
acceptor=IonChannelAcceptor())
from pyabc import History
history = History('sqlite:///results/icat-generic/hl-1_icat-generic.db')
history.all_runs()
df, w = history.get_distribution(m=0)
evolution = history.get_all_populations()
sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
from ionchannelABC import plot_parameters_kde
plot_parameters_kde(df, w, limits, aspect=12, height=0.8)
n_samples = 10 # increasing this number will produce a better approximation to the true output, recommended: >= 100
# we keep 10 to keep running time low
parameter_samples = df.sample(n=n_samples, weights=w, replace=True)
parameter_samples.head()
parameter_samples = parameter_samples.to_dict(orient='records')
samples = pd.DataFrame({})
for i, theta in enumerate(parameter_samples):
output = model.sample(pars=theta, n_x=50) # n_x changes the resolution of the independent variable
# sometimes this can cause problems with output tending to zero/inf at
# (e.g.) exact reversal potential of the channel model
output['sample'] = i
output['distribution'] = 'posterior'
samples = samples.append(output, ignore_index=True)
g = plot_sim_results(samples, obs=measurements)
xlabels = ["voltage, mV", "voltage, mV", "voltage, mV", "time, ms", "time, ms","voltage, mV"]
ylabels = ["current density, pA/pF", "activation", "inactivation", "recovery", "normalised current","current density, pA/pF"]
for ax, xl in zip(g.axes.flatten(), xlabels):
ax.set_xlabel(xl)
for ax, yl in zip(g.axes.flatten(), ylabels):
ax.set_ylabel(yl)
peak_curr_mean = np.mean(samples[samples.exp==0].groupby('sample').min()['y'])
peak_curr_std = np.std(samples[samples.exp==0].groupby('sample').min()['y'])
print('Peak current density: {0:4.2f} +/- {1:4.2f} pA/pF'.format(peak_curr_mean, peak_curr_std))
peak_curr_V_indices = samples[samples.exp==0].groupby('sample').idxmin()['y']
peak_curr_V_mean = np.mean(samples.iloc[peak_curr_V_indices]['x'])
peak_curr_V_std = np.std(samples.iloc[peak_curr_V_indices]['x'])
print('Voltage of peak current density: {0:4.2f} +/- {1:4.2f} mV'.format(peak_curr_V_mean, peak_curr_V_std))
distance_fn = IonChannelDistance(obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.1)
parameters = ['icat.'+k for k in limits.keys()]
print(parameters)
from ionchannelABC import (calculate_parameter_sensitivity,
plot_parameter_sensitivity,
plot_regression_fit)
fitted, regression_fit, r2 = calculate_parameter_sensitivity(
model,
parameters,
distance_fn,
sigma=0.05, # affects how far parameters are perturbed from original values to test sensitivity
n_samples=20) # set to reduced value for demonstration, typically around 1000 in practical use
plot_parameter_sensitivity(fitted, plot_cutoff=0.05)
plot_regression_fit(regression_fit, r2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up an ion channel model and experiments
Step2: Now that we have loaded a cell model, we need to specify how we will test it to compare with experimental data. We use the ExperimentData and ExperimentStimProtocol classes to specify the experimental dataset and experimental protocol respectively. These are then combined in the Experiment class. The data is specified in a separate .py file with functions to return the x, y and, if available, error bars extracted from graphs.
Step3: The stimulation protocol is defined from the experimental methods of the data source. It should be replicated as close as possible to reproduce experimental conditions. This example shows a standard 'I-V curve' testing peak current density at different voltage steps from a resting potential. The transmembrane potential is held at a resting potential of -75mV for sufficient time for the channel to reach its steady-state (we assume 5000ms here), it is stepped to each test potential for 300ms and then returned to the resting potential.
Step4: Having defined what we are doing with the model, we need to define what we do with the simulation data and which part of the protocol (i.e. index of stim_times and stim_levels) we are interested in extracting the data from. The simulation will return a list of pandas.Dataframe containing each of logvars defined in the ion channel model declaration. Here, we want to reduce this data to just the peak current density at the step potential (i.e. index 1 in stim_times and stim_levels). Our list will only have length 1 because we are only interested in data from this point in the protocol, but more complex protocols may return longer lists.
Step5: The final key part of defining the experiment is the experimental conditions, which includes extra/intracellular ion concentrations and temperature reported in the data source. Here, the dictionary keys refer to variables in the [membrane] field of the MMT ion channel definition file.
Step6: We then add the experiment to the IonChannelModel defined previously. We can test it runs using the sample method with default parameters to debug any problems at this stage.
Step7: The plot_sim_results function makes it easy to plot the output of simulations.
Step8: Clearly the default parameters in the MMT file are not quite right, but we are able to run the simulation and compare to the results.
Step9: Setting up parameter inference for the defined model
Step10: We can now define additional requirements for the ABC-SMC algorithm. We need a distance function to measure how well our model can approximate experimental data.
Step11: We also need to assign a database file for the pyabc implementation of the ABC-SMC algorithm to store information about the ABC particles at intermediate steps as it runs. A temporary location with sufficient storage is a good choice as these files can become quite large for long ABC runs. This can be defined by setting the $TMPDIR environment variable as described in the installation instructions.
Step12: Running the ABC algorithm
Step13: ABCSMC from the pyabc library is the main class used for the algorithm. It initialises with a number of options which are well described in the pyabc documentation. Note we initialize some of the passed objects at this stage and do not pass in pre-initialised variables, particulary for the distance function.
Step14: The algorithm is initialised and run as specified in pyabc documentation. These lines are not set to run as the algorithm can take several hours to days to finish for large models. Following steps will use a previous run example.
Step15: First we can check the convergence of the epsilon value over iterations of the ABC algorithm.
Step16: We can check the posterior distribution of parameters for this model using the plot_parameters_kde function. This can highlight any parameters which were unidentifiable given the available experimental data.
Step17: We can generate some samples of model output using the posterior distribution of parameters to observe the effect on model output. We first create a sampling dataset then use the plot_sim_results function.
Step18: In this example, we see low variation of the model output around the experimental data across experiments. However, are all parameters well identified? (Consider the KDE posterior parameter distribution plot).
Step19: Or if we are interested in the voltage at which the peak current occurs.
Step20: That concludes the main portion of this introduction. Further functionality is included below. For further examples of using the library, see the additional notebooks included for multiple HL-1 cardiac myocyte ion channels in the docs/examples folder.
Step21: The calculate_parameter_sensitivity function carries out the calculations, and the output can be analysed using the plot_parameter_sensitivity and plot_regression_fit functions.
Step22: See Sobie et al, 2009 for an interpretation of the beta values and goodness-of-fit plots. In summary, a high beta value indicates the model has high sensitivity to changes in that parameter for a particular experiment protocol. However, this is conditional on a reasonable goodness-of-fit indicating the multivariable regression model is valid within this small pertubation space.
|
2,804
|
<ASSISTANT_TASK:>
Python Code:
# Author: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm.resolution_matrix import make_inverse_resolution_matrix
from mne.minimum_norm.spatial_resolution import resolution_metrics
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd_emeg = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
# read forward solution with EEG and MEG
forward_emeg = mne.read_forward_solution(fname_fwd_emeg)
# forward operator with fixed source orientations
forward_emeg = mne.convert_forward_solution(forward_emeg, surf_ori=True,
force_fixed=True)
# create a forward solution with MEG only
forward_meg = mne.pick_types_forward(forward_emeg, meg=True, eeg=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution for MEG and EEGMEG
inv_emeg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_emeg, noise_cov=noise_cov, loose=0.,
depth=None)
inv_meg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_meg, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
rm_emeg = make_inverse_resolution_matrix(forward_emeg, inv_emeg,
method='MNE', lambda2=lambda2)
ple_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='peak_err')
sd_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='sd_ext')
del rm_emeg
rm_meg = make_inverse_resolution_matrix(forward_meg, inv_meg,
method='MNE', lambda2=lambda2)
ple_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='peak_err')
sd_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='sd_ext')
del rm_meg
brain_ple_emeg = ple_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=1,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_emeg.add_text(0.1, 0.9, 'PLE PSF EMEG', 'title', font_size=16)
brain_ple_meg = ple_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=2,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_meg.add_text(0.1, 0.9, 'PLE PSF MEG', 'title', font_size=16)
diff_ple = ple_psf_emeg - ple_psf_meg
brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=3,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_ple_diff.add_text(0.1, 0.9, 'PLE EMEG-MEG', 'title', font_size=16)
brain_sd_emeg = sd_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=4,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_emeg.add_text(0.1, 0.9, 'SD PSF EMEG', 'title', font_size=16)
brain_sd_meg = sd_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=5,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_meg.add_text(0.1, 0.9, 'SD PSF MEG', 'title', font_size=16)
diff_sd = sd_psf_emeg - sd_psf_meg
brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=6,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_sd_diff.add_text(0.1, 0.9, 'SD EMEG-MEG', 'title', font_size=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: EEGMEG
Step2: MEG
Step3: Visualization
Step4: For MEG only
Step5: Subtract the two distributions and plot this difference
Step6: These plots show that with respect to peak localization error, adding EEG to
Step7: For MEG only
Step8: Subtract the two distributions and plot this difference
|
2,805
|
<ASSISTANT_TASK:>
Python Code:
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import precision_score, recall_score, f1_score
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,matplotlib
filepath = os.path.join('data', 'creditcard.csv')
df = pd.read_csv(filepath)
print('dimension: ', df.shape)
df.head()
df['hour'] = np.ceil(df['Time'].values / 3600) % 24
fraud_over_hour = df.pivot_table(values='Amount', index='hour', columns='Class', aggfunc='count')
fraud_over_hour
plt.rcParams['font.size'] = 12
plt.rcParams['figure.figsize'] = 8, 6
plt.plot(fraud_over_hour[1])
plt.title('Fraudulent Transaction over Hour')
plt.ylabel('Fraudulent Count')
plt.xlabel('Hour')
plt.show()
# prepare the dataset for modeling;
# extract the features and labels, perform a quick train/test split
label = df['Class']
pca_cols = [col for col in df.columns if col.startswith('V')]
input_cols = ['hour', 'Amount'] + pca_cols
df = df[input_cols]
df_train, df_test, y_train, y_test = train_test_split(
df, label, stratify=label, test_size=0.35, random_state=1)
print('training data dimension:', df_train.shape)
df_train.head()
# we'll be using linear models later, hence
# we standardize our features to ensure they are
# all at the same scale
standardize = StandardScaler()
X_train = standardize.fit_transform(df_train)
X_test = standardize.transform(df_test)
label_distribution = np.bincount(label) / label.size
print('labels distribution:', label_distribution)
print('Fraud is {}% of our data'.format(label_distribution[1] * 100))
fig = plt.figure(figsize=(15, 8))
ax1 = fig.add_subplot(1, 2, 1)
ax1.set_xlim([-0.05, 1.05])
ax1.set_ylim([-0.05, 1.05])
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
ax1.set_title('PR Curve')
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_xlim([-0.05, 1.05])
ax2.set_ylim([-0.05, 1.05])
ax2.set_xlabel('False Positive Rate')
ax2.set_ylabel('True Positive Rate')
ax2.set_title('ROC Curve')
f1_scores = []
recall_scores = []
precision_scores = []
pos_weights = [1, 10, 25, 50, 100, 10000]
for pos_weight in pos_weights:
lr_model = LogisticRegression(class_weight={0: 1, 1: pos_weight})
lr_model.fit(X_train, y_train)
# plot the precision-recall curve and AUC curve
pred_prob = lr_model.predict_proba(X_test)[:, 1]
precision, recall, _ = precision_recall_curve(y_test, pred_prob)
tpr, fpr, _ = roc_curve(y_test, pred_prob)
ax1.plot(recall, precision, label=pos_weight)
ax2.plot(tpr, fpr, label=pos_weight)
# track the precision, recall and f1 score
pred = lr_model.predict(X_test)
f1_test = f1_score(y_test, pred)
recall_test = recall_score(y_test, pred)
precision_test = precision_score(y_test, pred)
f1_scores.append(f1_test)
recall_scores.append(recall_test)
precision_scores.append(precision_test)
ax1.legend(loc='lower left')
ax2.legend(loc='lower right')
plt.show()
def score_barplot(precision_scores, recall_scores, f1_scores, pos_weights, figsize=(8, 6)):
Visualize precision/recall/f1 score at different class weighting values.
width = 0.3
ind = np.arange(len(precision_scores))
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
b1 = ax.bar(ind, precision_scores, width, color='lightskyblue')
b2 = ax.bar(ind + width, recall_scores, width, color='lightcoral')
b3 = ax.bar(ind + (2 * width), f1_scores, width, color='gold')
ax.set_xticks(ind + width)
ax.set_xticklabels(pos_weights)
ax.set_ylabel('score')
ax.set_xlabel('positive weights')
ax.set_ylim(0, 1.3)
ax.legend(handles=[b1, b2, b3], labels=['precision', 'recall', 'f1'])
plt.tight_layout()
plt.show()
score_barplot(precision_scores, recall_scores, f1_scores, pos_weights)
# this code chunk shows the same idea applies when using tree-based models
f1_scores = []
recall_scores = []
precision_scores = []
pos_weights = [1, 10, 100, 10000]
for pos_weight in pos_weights:
rf_model = RandomForestClassifier(n_estimators=50, max_depth=6, n_jobs=-1,
class_weight={0: 1, 1: pos_weight})
rf_model.fit(df_train, y_train)
# track the precision, recall and f1 score
pred = rf_model.predict(df_test)
f1_test = f1_score(y_test, pred)
recall_test = recall_score(y_test, pred)
precision_test = precision_score(y_test, pred)
f1_scores.append(f1_test)
recall_scores.append(recall_test)
precision_scores.append(precision_test)
score_barplot(precision_scores, recall_scores, f1_scores, pos_weights)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Evaluating Imbalanced Datasets
Step2: A brief description of the dataset based on the data overview section from the download source.
Step3: Class Weighting
Step5: A good classifier would have a PR (Precision/Recall) curve closer to the upper-right corner and a ROC curve to the upper-left corner. Based on the plot above, we can see that while both curves uses the same underlying data, i.e. the real class labels and the predicted probability, the two charts can tell different stories, with some weights seem to perform better based on the precision/recall curve's chart.
Step6: Judging from the plot above, the can see that when the weight's value is set at 10, we seem to have strike a good balance between precision and recall (this setting has the highest f1 score, we'll have a deeper discussion on f1 score in the next section), where our model can detect 80% of the fraudulent transaction, while not annoying a bunch of customers with false positives. Another observation is that if we were to set the class weighting value to 10,000 we would be able to increase our recall score at the expense of more mis-classified legit cases (as depicted by the low precision score).
|
2,806
|
<ASSISTANT_TASK:>
Python Code:
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores >= threshold
### END CODE HERE ###
# Step 4: Apply the mask to scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
# GRADED FUNCTION: iou
def iou(box1, box2):
Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (x1, y1, x2, y2)
box2 -- second box, list object with coordinates (x1, y1, x2, y2)
# Calculate the (y1, x1, y2, x2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 5 lines)
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[2], box2[2])
inter_area = max(xi2-xi1, 0) * max(yi2-yi1, 0)
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = max(box1[2]-box1[0], 0) * max(box1[3]-box1[1], 0)
box2_area = max(box2[2]-box2[0], 0) * max(box2[3]-box2[1], 0)
union_area = max(box1_area + box2_area - inter_area, 0)
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = 0 if union_area <= 0 else inter_area / union_area
### END CODE HERE ###
return iou
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou = " + str(iou(box1, box2)))
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes, scores, max_output_size=max_boxes, iou_threshold=iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, iou_threshold=iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
sess = K.get_session()
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
yolo_model = load_model("model_data/yolo.h5")
yolo_model.summary()
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
def predict(sess, image_file):
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the preditions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run(feed_dict={yolo_model.input:image_data, K.learning_phase():0}, fetches=[scores, boxes, classes])
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Important Note
Step4: Expected Output
Step6: Expected Output
Step8: Expected Output
Step9: Expected Output
Step10: 3.1 - Defining classes, anchors and image shape.
Step11: 3.2 - Loading a pretrained model
Step12: This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
Step13: Note
Step14: You added yolo_outputs to your graph. This set of 4 tensors is ready to be used as input by your yolo_eval function.
Step16: 3.5 - Run the graph on an image
Step17: Run the following cell on the "test.jpg" image to verify that your function is correct.
|
2,807
|
<ASSISTANT_TASK:>
Python Code:
print('Hello)
1 / 0
open('doesnotexistfile.txt')
print(locals()['__builtins__'])
import sys
def divide(a,b):
try:
return a / b
except:
print(sys.exc_info()[0])
divide (1,2)
divide (2,0) # This will be captured by the 'except' clause
# print custom error message
def divide(a,b):
try:
return a / b
except:
print('Error occured',sys.exc_info()[0])
divide (1,2)
divide (2,0) # This will be captured by the 'except' clause
def divide(a,b):
try:
return a / b
except (ZeroDivisionError):
print('Number cannot be divided by zero or non-integer')
except:
print('Error Occured',sys.exc_info()[0])
divide (1,2)
divide (2,0) # This will be captured by the 'except - zero division error' clause
divide (2,'a') # This will be captured by the generic 'except' clause
def divide(a,b):
try:
return a / b
except (ZeroDivisionError, TypeError): # use a tuple to capture multiple errors
print('Number cannot be divided by zero or non-integer')
except:
print('Error Occured',sys.exc_info()[0])
divide (1,2)
divide (2,0) # This will be captured by the 'except - zero division error' clause
divide (2,'a') # This will be captured by the generic 'except' clause
import sys
try:
f = open('myfile.txt')
s = f.readline()
i = int(s.strip())
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer.")
except:
print("Unexpected error:", sys.exc_info()[0])
raise
for arg in sys.argv[1:]:
try:
f = open(arg, 'r')
except OSError:
print('cannot open', arg)
else:
print(arg, 'has', len(f.readlines()), 'lines')
f.close()
try:
raise Exception('1002','Custom Exception Occured')
except Exception as inst:
print(type(inst))
print(inst)
print(inst.args)
errno, errdesc = inst.args
print('Error Number:',errno)
print('Error Description:',errdesc)
def func_will_fail():
return 1 / 0
try:
func_will_fail()
except ZeroDivisionError as err:
print('Handling Error - ',err)
raise NameError('Error Occured')
try:
raise NameError('Error Captured')
except NameError:
print('Captured Exception')
raise
class CustomError(Exception):
pass
raise CustomError()
raise CustomError('Unexpected Error Occured')
# define Python user-defined exceptions
class Error(Exception):
Base class for other exceptions
pass
class ValueTooSmallError(Error):
Raised when the input value is too small
pass
class ValueTooLargeError(Error):
Raised when the input value is too large
pass
# our main program
# user guesses a number until he/she gets it right
# you need to guess this number
number = 10
while True:
try:
i_num = int(input("Enter a number: "))
if i_num < number:
raise ValueTooSmallError
elif i_num > number:
raise ValueTooLargeError
break
except ValueTooSmallError:
print("This value is too small, try again!")
print()
except ValueTooLargeError:
print("This value is too large, try again!")
print()
print("Congratulations! You guessed it correctly.")
class Error(Exception):
Base class for exceptions in this module.
pass
class InputError(Error):
Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
def __init__(self, expression, message):
self.expression = expression
self.message = message
class TransitionError(Error):
Raised when an operation attempts a state transition that's not
allowed.
Attributes:
previous -- state at beginning of transition
next -- attempted new state
message -- explanation of why the specific transition is not allowed
def __init__(self, previous, next, message):
self.previous = previous
self.next = next
self.message = message
try:
raise KeyBoardInterrupt
finally:
print('Bye')
def divide(a,b):
try:
result = a / b
except ZeroDivisionError:
print('Number cannot be divided by zero')
else:
print('Result',result)
finally:
print('Executed Finally Clause')
divide(2,1)
divide(2,0)
divide('1','2')
for line in open("myfile.txt"):
print(line, end="")
with open("test.txt") as f:
for line in f:
print(line, end="")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exceptions
Step2: Built-in Exceptions
Step3: Following are some of the built-in exceptions.
Step4: Catching Specific Exceptions
Step5: The last except clause may omit the exception name(s), to serve as a wildcard. Use this with extreme caution, since it is easy to mask a real programming error in this way! It can also be used to print an error message and then re-raise the exception (allowing a caller to handle the exception as well)
Step6: The try … except statement has an optional else clause, which, when present, must follow all except clauses. It is useful for code that must be executed if the try clause does not raise an exception. For example
Step7: The use of the else clause is better than adding additional code to the try clause because it avoids accidentally catching an exception that wasn’t raised by the code being protected by the try … except statement.
Step8: Exception handlers don’t just handle exceptions if they occur immediately in the try clause, but also if they occur inside functions that are called (even indirectly) in the try clause. For example
Step9: Raise Exceptions
Step10: If you need to determine whether an exception was raised but don’t intend to handle it, a simpler form of the raise statement allows you to re-raise the exception
Step11: User Exceptions
Step15: Here, we have created a user-defined exception called CustomError which is derived from the Exception class. This new exception can be raised, like other exceptions, using the raise statement with an optional error message.
Step19: Here, we have defined a base class called Error.
Step20: Clean up Actions
Step21: Please note that the TypeError raised by dividing two strings is not handled by the except clause and therefore re-raised after the finally clause has been executed.
Step22: The problem with this code is that it leaves the file open for an indeterminate amount of time after this part of the code has finished executing. This is not a best practice.
|
2,808
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from neus import pyramid
from neus import partition
# instantiate a window object
center = [1.0]
width = [0.5]
win = pyramid.Pyramid(center, width)
# plot the support of the pyramid object
x = np.linspace(0.0, 2.0, 100)
out = [win([i]) for i in x]
plt.plot(x, out)
# create an empty partition object
sys = partition.Partition()
# create a list of winodw objects
width = 0.75
centers = [x for x in np.arange(-3, 3)]
# now add the windows to partition
for center in centers:
win = pyramid.Pyramid([center], [width])
sys.append(win)
print sys
# element access
print sys[0]
# slicing
print sys[2:4]
# list concatenation
print sys[2:4] + [sys[0]]
# choose a point with nonzero support
cv = [-2.5]
# return normalized vector of the support
print sys(cv)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Window objects
Step2: Partition objects
Step3: Partition objects act like callable lists so some of the expected list operations also work with partitons
Step4: The partition's call routine returns the normalized vector of supports
|
2,809
|
<ASSISTANT_TASK:>
Python Code:
import ibis
import os
hdfs_port = os.environ.get('IBIS_WEBHDFS_PORT', 50070)
hdfs = ibis.hdfs_connect(host='quickstart.cloudera', port=hdfs_port)
con = ibis.impala.connect(host='quickstart.cloudera', database='ibis_testing',
hdfs_client=hdfs)
print('Hello!')
table = con.table('functional_alltypes')
table.limit(5)
proj = table['bool_col', 'int_col', 'double_col']
proj.limit(5)
to_select = ['bool_col', 'int_col']
table[to_select].limit(5)
table.select(['int_col', 'double_col']).limit(5)
bigger_expr = (table.int_col * 2).name('bigger_ints')
proj2 = table['int_col', bigger_expr]
proj2.limit(5)
table2 = table.add_column(bigger_expr)
table2.limit(5)
table.select([table, bigger_expr]).limit(5)
foo_constant = ibis.literal(5).name('foo')
table.select([table.bigint_col, foo_constant]).limit(5)
region = con.table('tpch_region')
nation = con.table('tpch_nation')
customer = con.table('tpch_customer')
lineitem = con.table('tpch_lineitem')
join_expr = region.r_regionkey == nation.n_regionkey
joined = region.inner_join(nation, join_expr)
table_ref = joined[nation, region.r_name.name('region')]
table_ref.columns
table_ref.limit(5)
agged = table_ref.aggregate([table_ref.n_name.count().name('nrows')], by=['region'])
agged
joined.group_by(region.r_name).size()
table = con.table('functional_alltypes')
hour_dim = table.timestamp_col.hour().name('hour')
hourly_mean = (table.group_by(hour_dim)
.aggregate([table.double_col.mean().name('avg_double')]))
hourly_mean
mad = (table.double_col - hourly_mean.avg_double).abs().mean().name('MAD')
join_expr = hour_dim == hourly_mean.hour
result = (table.inner_join(hourly_mean, join_expr)
.group_by(table.string_col)
.aggregate([mad]))
result
table = con.table('functional_alltypes')
keys = ['string_col', (table.bigint_col > 40).ifelse('high', 'low').name('bigint_tier')]
metrics = [table.double_col.sum().name('total')]
agged = (table
.filter(table.int_col < 8)
.group_by(keys)
.aggregate(metrics))
sorted_agged = agged.sort_by(['bigint_tier', ('total', False)])
sorted_agged
agged.sort_by(ibis.desc('total'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Projections
Step2: First, the basics
Step3: You can make a list of columns you want, too, and pass that
Step4: You can also use the explicit projection or select functions
Step5: We can add new columns by using named column expressions
Step6: Adding columns is a shortcut for projection. In Ibis, adding columns always produces a new table reference
Step7: In more complicated projections involving joins, we may need to refer to all of the columns in a table at once. This is how add_column works. We just pass the whole table in the projection
Step8: To use constants in projections, we have to use a special ibis.literal function
Step9: Joins
Step10: region and nation are connected by their respective regionkey columns
Step11: If you have multiple join conditions, either compose them yourself (like filters) or pass a list to the join function
Step12: Things like group_by work with unmaterialized joins, too, as you would hope.
Step13: Explicit join materialization
Step14: Okay, great, now how about the MAD? The only trick here is that we can form an aggregate metric from the two tables, and we then have to join it later. Ibis will not figure out how to join the tables automatically for us.
Step15: This metric is only valid if used in the context of table joined with hourly_mean, so let's do that. Writing down the join condition is simply a matter of writing
Step16: Now let's compute the MAD grouped by string_col
Step17: Sorting
Step18: For sorting in descending order, you can use the special ibis.desc function
|
2,810
|
<ASSISTANT_TASK:>
Python Code:
# import
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.utils import shuffle
import mahotas as mh
from mahotas.features import surf
import glob
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
%matplotlib inline
cluster1 = np.random.uniform(0.5, 1.5, (2, 10))
cluster2 = np.random.uniform(3.5, 4.5, (2, 10))
x = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 7, 9])
y = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 1, 3])
plt.plot(x,y, 'ob')
plt.margins(0.2)
X = np.hstack((cluster1, cluster2)).T
X = np.vstack((x, y)).T
K = range(1, 10)
meandistortions = []
for k in K:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X)
meandistortions.append(sum(np.min(cdist(X, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X.shape[0])
plt.plot(K, meandistortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Average distortion')
plt.title('Selecting k with the Elbow Method')
plt.show()
plt.figure(figsize=(12,9))
plt.subplot(3, 2, 1)
x1 = np.array([1, 2, 3, 1, 5, 6, 5, 5, 6, 7, 8, 9, 7, 9])
x2 = np.array([1, 3, 2, 2, 8, 6, 7, 6, 7, 1, 2, 1, 1, 3])
X = np.array(list(zip(x1, x2))).reshape(len(x1), 2)
#print(list(zip(x1, x2)))
plt.xlim([0, 10])
plt.ylim([0, 10])
plt.title('Instances')
plt.scatter(x1, x2)
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'b']
markers = ['o', 's', 'D', 'v', '^', 'p', '*', '+']
tests = [2, 3, 4, 5, 8]
subplot_counter = 1
for t in tests:
subplot_counter += 1
plt.subplot(3, 2, subplot_counter)
kmeans_model = KMeans(n_clusters=t).fit(X)
for i, l in enumerate(kmeans_model.labels_):
plt.plot(x1[i], x2[i], color=colors[l], marker=markers[l], ls='None')
plt.xlim([0, 10])
plt.ylim([0, 10])
plt.title('K = %s, silhouette coefficient = %.03f' % (
t, metrics.silhouette_score(X, kmeans_model.labels_, metric='euclidean')))
plt.show()
original_img = np.array(mh.imread('data/atul.jpg'), dtype=np.float64) / 255
original_dimensions = tuple(original_img.shape)
width, height, depth = tuple(original_img.shape)
image_flattened = np.reshape(original_img, (width * height, depth))
image_array_sample = shuffle(image_flattened, random_state=0)[:1000]
estimator = KMeans(n_clusters=64, random_state=0)
estimator.fit(image_array_sample)
cluster_assignments = estimator.predict(image_flattened)
compressed_palette = estimator.cluster_centers_
compressed_img = np.zeros((width, height, compressed_palette.shape[1]))
label_idx = 0
for i in range(width):
for j in range(height):
compressed_img[i][j] = compressed_palette[cluster_assignments[label_idx]]
label_idx += 1
plt.subplot(122)
plt.title('Original Image')
plt.imshow(original_img)
plt.axis('off')
plt.subplot(121)
plt.title('Compressed Image')
plt.imshow(compressed_img)
plt.axis('off')
plt.show()
all_instance_filenames = []
all_instance_targets = []
for f in glob.glob('C:/Users/atul.singh/Downloads/cat_dog_test/test1/*.jpg'):
target = 1 if 'cat' in f else 0
all_instance_filenames.append(f)
all_instance_targets.append(target)
surf_features = []
counter = 0
for f in all_instance_filenames:
#print('Reading image:', f)
image = mh.imread(f, as_grey=True)
surf_features.append(surf.surf(image)[:, 5:])
train_len = int(len(all_instance_filenames) * .60)
X_train_surf_features = np.concatenate(surf_features[:train_len])
X_test_surf_feautres = np.concatenate(surf_features[train_len:])
y_train = all_instance_targets[:train_len]
y_test = all_instance_targets[train_len:]
n_clusters = 300
print ('Clustering', len(X_train_surf_features), 'features')
estimator = MiniBatchKMeans(n_clusters=n_clusters)
estimator.fit_transform(X_train_surf_features)
X_train = []
for instance in surf_features[:train_len]:
clusters = estimator.predict(instance)
features = np.bincount(clusters)
if len(features) < n_clusters:
features = np.append(features, np.zeros((1, n_clusterslen(features))))
X_train.append(features)
X_test = []
for instance in surf_features[train_len:]:
clusters = estimator.predict(instance)
features = np.bincount(clusters)
if len(features) < n_clusters:
features = np.append(features, np.zeros((1, n_clusterslen(features))))
X_test.append(features)
clf = LogisticRegression(C=0.001, penalty='l2')
clf.fit_transform(X_train, y_train)
predictions = clf.predict(X_test)
print(classification_report(y_test, predictions))
print('Precision: ', precision_score(y_test, predictions))
print('Recall: ', recall_score(y_test, predictions))
print('Accuracy: ', accuracy_score(y_test, predictions))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Evaluating clusters
Step2: Image Quantization
Step3: Clustering to learn features
|
2,811
|
<ASSISTANT_TASK:>
Python Code:
precipitation_df.head()
precipitation_df.tail()
precipitation_df['Country or Area'].values
precipitation_df = precipitation_df.set_index(precipitation_df["Country or Area"])
precipitation_df.drop(['Country or Area'], axis=1, inplace=True)
precipitation_df.head()
%matplotlib inline
germany = precipitation_df.loc['Germany']
ax = germany.plot(kind='bar', figsize=(8,5), title="Precipitation of %s between 1990 to 2009" % germany.name)
ax.set_ylabel("Precipitation (million cubic meters)")
ax.set_xlabel("Years");
import matplotlib.pyplot as plt, numpy as np
france = precipitation_df.loc['France']
years = france.index
index = np.arange(len(years))
plt.figure(figsize=(8,5))
bar_width = 0.5
plt.bar(index, france, bar_width, color='b')
plt.xlabel("Year")
plt.ylabel("Precipitation (million cubic meters)")
plt.title("Precipitation of %s between 1990 to 2009" % france.name, y=1.08)
plt.xticks(index + bar_width, years, rotation=90)
plt.show()
precipitation_df["SUM"] = precipitation_df.sum(axis=1)
precipitation_sorted_df = precipitation_df.sort_values(by="SUM",ascending=False)
top5_sums = pd.Series(precipitation_sorted_df["SUM"].head(5))
top5_sums
top5_bars = precipitation_sorted_df[years][0:5].transpose()
ax = top5_bars.plot(figsize=(10,8), marker='o', linestyle='-', title="Top 5 Countries with highest Precipitation")
ax.set_xlabel("Years")
ax.set_ylabel("Precipitation (million cubic meters)");
precipitation_sums = top5_sums
other_sums = precipitation_sorted_df["SUM"][5:].sum()
precipitation_sums["Other"] = other_sums
plt.axis('equal')
plt.title("Annual precipitation percentage",y=1.08)
plt.pie(
precipitation_sums,
labels=precipitation_sums.index,
colors=['blue', 'green', 'red', 'turquoise', 'magenta','yellow'],
autopct="%1.2f%%",
radius=1.25);
chile = precipitation_sorted_df.loc['Chile'][:-1]
years = chile.index
chile.plot(kind='bar', figsize=(8,5), title="Precipitation of %s between 1990 to 2009" % chile.name)
plt.xlabel("Year")
plt.ylabel("Precipitation (million cubic meters)");
index = chile[chile>0.0].index
fit = np.polyfit(index.astype('int'), chile[index].values,1)
print("slope: " + str(fit[0]))
plt.plot(index, chile[index],'.')
plt.plot(index, fit[0]*index.astype('int') + fit[1], '-', color='red')
plt.title("Precipitation Trend for Chile")
plt.xlabel("Year")
plt.ylabel("Precipitation (million cubic meters)")
plt.show()
def compute_slope(row):
This method computes the slope of a fittet line for given data points.
For arguments containing less than two valid data points it returns a NaN.
slope = np.nan
years = row[row>0.0].index
if(len(years)>=2):
fit = np.polyfit(years.astype('int'), row[years].values,1)
slope = fit[0]
return slope
precipitation_df["TREND"] = precipitation_sorted_df[years].apply(compute_slope, axis=1)
precipitation_df.head()
negative_trends = precipitation_df[precipitation_df["TREND"]<0.0]
negative_trends.index
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each row in the table lists
Step2: Having the names of the countries or areas as a column comes in handy, but it would be more useful to have the names as an index to the rows. To manipulate the DataFrame in this way, run the next cell
Step3: <a id="plot_data"></a>
Step4: Now use pandas wrappers around the matplotlib routines. Because you changed the index of the table rows, you can access the precipitation measurements of a country or area by name. For example, run the following cell to plot the annual precipitation of Germany
Step5: Another possibility is to import the matplotlib library and use it directly for plotting the precipitation values in France
Step6: Plot the countries or areas with the highest total precipitation
Step7: Next, sort the DataFrame by total precipitation and print the top five countries or areas with the highest total precipitation
Step8: Now, plot the top five countries next to each another in a line graph for a better comparison of the recordings
Step9: Finally, compare the annual precipitation for the top five countries. China has the highest annual precipitation, followed by Colombia. The line graph also shows that some values are missing, which makes comparing measurements difficult.
Step10: The pie chart shows you that nearly a quarter of the precipitation fell in China, and that more than half of the precipitation fell in the five countries with the highest annual precipitation.
Step11: A line of best fit is a straight line that is the best approximation of the given set of data.
Step13: For Chile, the line plot shows a positive trend in annual precipitation, despite the fact that during the last couple of years the annual precipitation decreased.
Step14: After defining the compute_slope() method, use the pandas apply() method to compute the slope for each row in the DataFrame and add the computed slope value to a new column called TREND
Step15: If the value is positive, the annual precipitation trend for the country is positive. If the value is negative, the annual precipitation trend is negative. Values near zero indicate a stable condition.
|
2,812
|
<ASSISTANT_TASK:>
Python Code:
a = array([5,8,12,13,100,18,74])
print a
print "max:", a.max()
print "min: ", a.min()
print u"součet:", a.sum()
print u'dékla:', a.size
linspace(2,5,9)
linspace(0,5,4)
linspace(0,1,20)
arange(5)
arange(3,10)
arange(10,20,0.2)
zeros(5)
ones(20)
t=linspace(0,10,11)
print t
print t[1:5]
print t[-1]
print t[-2]
print t[-3]
print t[5:-3]
t=arange(0,100,1)
print t
print t[0:20:3]
print t[::9]
t[::3]=0
print t
t[20:]=7
print t
a=array([1.,2.,3.,4.])
b=array([9.,8.,7.,6.])
print a
print b
print a+b
print a*b
print a/b
print a
print a+10
print b
print b*3
print a
print a**2
print a**3
x=linspace(0.1,4,20)
y=exp(x)/6.
z=log(x)
s=5*sin(x)
plot(x,y, 'x-',x,z, 'x-', x,s,'x-')
grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: linspace
Step2: arange
Step3: zeros, ones
Step4: Nejen výřezy
Step5: Ve výřezech lze prvky i přeskakovat
Step6: Výřez je možné použít i k přiřazení
Step7: Pokud vás to zajímá víc, podívejte se na knihovnu numexpr.
Step8: Fungují samozřejmě běžné matematické funkce
|
2,813
|
<ASSISTANT_TASK:>
Python Code:
lambda0 = 6301.5
JUp = 1.0
JLow = 1.0
gUp = 2.5
gLow = 0.0
lambdaStart = 6300.8
lambdaStep = 0.01
nLambda = 150
wavelength = lambdaStart + np.arange(nLambda) * lambdaStep
lineInfo = np.asarray([lambda0, JUp, JLow, gUp, gLow, lambdaStart, lambdaStep])
s = pymilne.milne(nLambda, lineInfo)
BField = 100.0
BTheta = 0.0
BChi = 0.0
VMac = 0.0
damping = 0.04
B0 = 1.0
B1 = 5.0
mu = 1.0
VDop = 0.080
kl = 6.75
modelSingle = np.asarray([BField, BTheta, BChi, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
cont = B0+B1
f, ax = pl.subplots(ncols=2, nrows=1, figsize=(12,6))
ax[0].plot(wavelength, stokes[0,:] / cont)
ax[1].plot(wavelength, stokes[3,:] / cont)
ax[0].set_xlabel('Wavelength [$\AA$]')
ax[1].set_xlabel('Wavelength [$\AA$]')
ax[0].set_ylabel('I/I$_c$')
ax[1].set_ylabel('V/I$_c$')
ax[0].ticklabel_format(useOffset=False)
ax[1].ticklabel_format(useOffset=False)
pl.tight_layout()
geff = 0.5*(gUp+gLow) + 0.25*(gUp-gLow)*(JUp*(JUp+1)-JLow*(JLow+1))
diffI = np.gradient(stokes[0,:], lambdaStep)
alpha = -4.6686e-13*6301.5**2*geff
print "Estimated field = {0} G".format(np.sum(stokes[3,:] * diffI) / np.sum(diffI**2) / alpha)
BField = 10500.0
BTheta = 0.0
BChi = 0.0
VMac = 0.0
damping = 0.04
B0 = 1.0
B1 = 5.0
mu = 1.0
VDop = 0.080
kl = 6.75
modelSingle = np.asarray([BField, BTheta, BChi, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
cont = B0+B1
f, ax = pl.subplots(ncols=2, nrows=1, figsize=(12,6))
ax[0].plot(wavelength, stokes[0,:] / cont)
ax[1].plot(wavelength, stokes[3,:] / cont)
ax[0].set_xlabel('Wavelength [$\AA$]')
ax[1].set_xlabel('Wavelength [$\AA$]')
ax[0].set_ylabel('I/I$_c$')
ax[1].set_ylabel('V/I$_c$')
ax[0].ticklabel_format(useOffset=False)
ax[1].ticklabel_format(useOffset=False)
pl.tight_layout()
maxLoc = np.argmax(stokes[3,:])
deltaL = lambda0 - wavelength[maxLoc]
print "Estimated field = {0} G".format(deltaL / (4.6686e-13*6301.**2*gUp))
lambda0 = 6302.5
JUp = 1.0
JLow = 1.0
gUp = 2.5
gLow = 0.0
lambdaStart = 6301.8
lambdaStep = 0.03
nLambda = 50
wavelength = lambdaStart + np.arange(nLambda) * lambdaStep
lineInfo = np.asarray([lambda0, JUp, JLow, gUp, gLow, lambdaStart, lambdaStep])
s = pymilne.milne(nLambda, lineInfo)
BField = 100.0
BTheta = 0.0
BChi = 0.0
VMac = 0.0
damping = 0.04
B0 = 1.0
B1 = 5.0
mu = 1.0
VDop = 0.040
kl = 6.75
pV = np.exp(-(wavelength-lambda0-60e-3)**2 / 75e-3**2)
pQ = np.exp(-(wavelength-lambda0-120e-3)**2 / 75e-3**2)
nB = 100
B = np.linspace(0.0,3500.0,nB)
Bpar = np.zeros((2,4,nB))
Bperp = np.zeros((4,nB))
SV = np.zeros((2,4,nB))
SQ = np.zeros((4,nB))
pal = sn.color_palette()
thetas = [0, 30, 60, 85]
for j in range(4):
for i in range(nB):
modelSingle = np.asarray([B[i], thetas[j], 0.0, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
Bpar[0,j,i] = B[i] * np.cos(thetas[j]*np.pi/180.0)
Bperp[j,i] = B[i] * np.sin(thetas[j]*np.pi/180.0)
SV[0,j,i] = scipy.integrate.simps(stokes[3,:] * pV, x=wavelength) / scipy.integrate.simps(stokes[0,:] * pV, x=wavelength)
SQ[j,i] = -scipy.integrate.simps(stokes[1,:] * pQ, x=wavelength) / scipy.integrate.simps(stokes[0,:] * pQ, x=wavelength)
modelSingle = np.asarray([B[i], 180.0-thetas[j], 0.0, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
Bpar[1,j,i] = B[i] * np.cos((180.0-thetas[j])*np.pi/180.0)
SV[1,j,i] = scipy.integrate.simps(stokes[3,:] * pV, x=wavelength) / scipy.integrate.simps(stokes[0,:] * pV, x=wavelength)
pl.close('all')
f, ax = pl.subplots()
for j in range(4):
ax.plot(Bpar[0,j,:], SV[0,j,:], color=pal[j], label=r'$\theta_B=${0}'.format(thetas[j]))
ax.plot(Bpar[1,j,:], SV[1,j,:], color=pal[j])
ax.set_xlabel('B$_\parallel$')
ax.set_ylabel('S$_V$')
ax.set_xlim([-3500,3500])
pl.legend()
f, ax = pl.subplots()
for j in range(4):
ax.plot(Bperp[j,:], SQ[j,:], color=pal[j], label=r'$\theta_B=${0}'.format(thetas[j]))
ax.set_xlabel('B$_\perp$')
ax.set_ylabel('S$_Q$')
ax.set_xlim([0,3500])
pl.legend(loc='upper left')
lambda0 = 5250.21
JUp = 0.0
JLow = 1.0
gUp = 0.0
gLow = 3.0
lambdaStart = 5249.5
lambdaStep = 0.015
nLambda = 100
wavelength = lambdaStart + np.arange(nLambda) * lambdaStep
lineInfo = np.asarray([lambda0, JUp, JLow, gUp, gLow, lambdaStart, lambdaStep])
s = pymilne.milne(nLambda, lineInfo)
BField = 100.0
BTheta = 0.0
BChi = 0.0
VMac = 0.0
damping = 0.04
B0 = 1.0
B1 = 12.0
mu = 1.0
VDop = 0.032
kl = 6.1
nB = 100
B = np.linspace(0.0,3500.0,nB)
Bpar = np.zeros((4,nB))
BEstimated = np.zeros((4,nB))
pal = sn.color_palette()
thetas = [15, 30, 60, 75]
for j in range(4):
for i in range(nB):
modelSingle = np.asarray([B[i], thetas[j], 0.0, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
Ic = stokes[0,0]
Iplus = 0.5*(stokes[0,:] + stokes[3,:])
Iminus = 0.5*(stokes[0,:] - stokes[3,:])
lambdaPlus = scipy.integrate.simps((0.5*Ic - Iplus) * wavelength, x=wavelength) / scipy.integrate.simps((0.5*Ic - Iplus), x=wavelength)
lambdaMinus = scipy.integrate.simps((0.5*Ic - Iminus) * wavelength, x=wavelength) / scipy.integrate.simps((0.5*Ic - Iminus), x=wavelength)
BEstimated[j,i] = 1.071e9 / (3.0 * 5250.21**2) * (lambdaPlus-lambdaMinus)*1e3
Bpar[j,i] = B[i] * np.cos(thetas[j]*np.pi/180.0)
pl.close('all')
f, ax = pl.subplots()
for j in range(4):
ax.plot(Bpar[j,:], (BEstimated[j,:]-Bpar[j,:]) / Bpar[j,:], color=pal[j], label=r'$\theta_B=${0}'.format(thetas[j]))
ax.set_xlabel('B$_\parallel$')
ax.set_ylabel('Relative error')
ax.set_xlim([0,3500])
pl.legend()
thetaReal = np.linspace(0.0,180.0,200)
x = np.asarray([1.0,0.1,0.01])
f, ax = pl.subplots()
tanTheta = np.tan(thetaReal*np.pi/180.0)
ind = np.where(tanTheta < 0.0)
for i in range(3):
out = np.arctan2(tanTheta, np.sqrt(x[i]))
out[ind] += np.pi
ax.plot(thetaReal, out*180.0/np.pi, label='x={0}'.format(x[i]))
ax.set_xlabel(r'$\theta_m$')
ax.set_ylabel(r'$\theta_\mathrm{app}$')
pl.legend(loc='upper left')
# First line
lambda0 = 6302.5
JUp = 1.0
JLow = 1.0
gUp = 2.5
gLow = 0.0
lambdaStart = 6301.8
lambdaStep = 0.03
nLambda = 50
wavelength = lambdaStart + np.arange(nLambda) * lambdaStep
lineInfo = np.asarray([lambda0, JUp, JLow, gUp, gLow, lambdaStart, lambdaStep])
s = pymilne.milne(nLambda, lineInfo)
VMac = 0.0
damping = 0.04
B0 = 1.0
B1 = 5.0
mu = 1.0
VDop = 0.040
kl = 6.75
pV = np.exp(-(wavelength-lambda0-60e-3)**2 / 75e-3**2)
nB = 100
B = np.linspace(0.0,3500.0,nB)
SV1 = np.zeros((4,nB))
SV2 = np.zeros((4,nB))
pal = sn.color_palette()
thetas = [0, 30, 60, 85]
for j in range(4):
for i in range(nB):
modelSingle = np.asarray([B[i], thetas[j], 0.0, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
SV1[j,i] = scipy.integrate.simps(stokes[3,:] * pV, x=wavelength) / scipy.integrate.simps(stokes[0,:] * pV, x=wavelength)
# Second line
lambda0 = 6301.5
JUp = 1.0
JLow = 1.0
gUp = 1.5
gLow = 0.0
lambdaStart = 6300.8
lambdaStep = 0.03
nLambda = 50
wavelength = lambdaStart + np.arange(nLambda) * lambdaStep
lineInfo = np.asarray([lambda0, JUp, JLow, gUp, gLow, lambdaStart, lambdaStep])
s = pymilne.milne(nLambda, lineInfo)
pV = np.exp(-(wavelength-lambda0-60e-3)**2 / 75e-3**2)
thetas = [0, 30, 60, 85]
for j in range(4):
for i in range(nB):
modelSingle = np.asarray([B[i], thetas[j], 0.0, VMac, damping, B0, B1, VDop, kl])
stokes = s.synth(modelSingle,mu)
SV2[j,i] = scipy.integrate.simps(stokes[3,:] * pV, x=wavelength) / scipy.integrate.simps(stokes[0,:] * pV, x=wavelength)
f, ax = pl.subplots()
for i in range(4):
ax.plot(B, SV1[i,:] / SV2[i,:])
for i in range(4):
ax.plot(B, SV1[i,:] / SV2[i,:], color=pal[i], label=r'$\theta_B$={0}'.format(thetas[i]))
ax.set_xlabel('B [G]')
ax.set_ylabel('Line ratio')
ax.axhline(2.5/1.5, color=pal[4])
pl.legend(loc='lower left')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Strong-field approximation Stokes V
Step2: Longitudinal magnetograph
Step3: Center-of-gravity
Step4: Influence of filling factor on inclination of magnetic field
Step5: Line ratio technique
|
2,814
|
<ASSISTANT_TASK:>
Python Code:
from mysolr import Solr
PDBE_SOLR_URL = "http://wwwdev.ebi.ac.uk/pdbe/search/pdb"
solr = Solr(PDBE_SOLR_URL, version=4)
UNLIMITED_ROWS = 10000000 # necessary because default in mysolr is mere 10
import logging, sys
#reload(logging) # reload is just a hack to make logging work in the notebook, it's usually unnecessary
logging.basicConfig( level=logging.INFO, stream=sys.stdout,
format='LOG|%(asctime)s|%(levelname)s %(message)s', datefmt='%d-%b-%Y %H:%M:%S' )
logging.getLogger("requests").setLevel(logging.WARNING)
def join_with_AND(selectors) :
return " AND ".join(
["%s:%s" % (k,v) for k,v in selectors]
)
def molecule_name_facet_search(selectors) :
response = solr.search(**{
"rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id", "q" : join_with_AND(selectors),
"facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1,
"facet.field" : "molecule_name",
})
num_mols = len(response.documents)
mol_name_counts = response.facets['facet_fields']['molecule_name']
logging.info("%d molecules found with %d distinct molecule_names." % (num_mols, len(mol_name_counts.keys())))
for mol_name, nmol in mol_name_counts.items() :
logging.info("%3d molecules are named as %s" % (nmol, mol_name))
molecule_name_facet_search([
( 'molecule_name' , '/.*[Cc]arbonic.*[aA]nhydrase.*/'),
])
selectors = [
( 'molecule_name' , '/.*[Cc]arbonic.*[aA]nhydrase.*/'),
('NOT molecule_name' , '(/.*Putative.*/ OR /.*Inhibitor.*/)'),
]
molecule_name_facet_search(selectors)
response = solr.search(**{
"rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id",
"q" : join_with_AND(selectors),
"facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1,
"facet.field" : "experimental_method",
"group" : "true", "group.facet" : "true",
"group.field" : "pdb_id",
})
expt_counts = response.facets['facet_fields']['experimental_method']
logging.info("There are %d experimental methods with this protein's structure has been studied." % len(expt_counts))
for expt, count in expt_counts.items() :
logging.info("%s : %d" % (expt,count))
response = solr.search(**{
"rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id",
"q" : join_with_AND(selectors),
"facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1,
"facet.field" : "deposition_year",
"group" : "true", "group.facet" : "true",
"group.field" : "pdb_id",
})
year_counts = response.facets['facet_fields']['deposition_year']
logging.info("There are %d years in which this protein's structure has been studied." % len(year_counts))
for year in sorted(year_counts.keys(), key=lambda x : int(x)) :
logging.info("%s : %d" % (year,year_counts[year]))
response = solr.search(**{
"rows" : UNLIMITED_ROWS, "fl" : "pdb_id, entity_id",
"q" : join_with_AND(selectors),
"facet" : "true", "facet.limit" : UNLIMITED_ROWS, "facet.mincount" : 1,
"facet.field" : "resolution",
"facet.range" : "resolution",
"f.resolution.facet.range.start" : "0.0",
"f.resolution.facet.range.end" : "100",
"f.resolution.facet.range.gap" : "0.5",
"f.resolution.facet.range.other" : "between",
"f.resolution.facet.range.include" : "upper",
"group" : "true", "group.facet" : "true",
"group.field" : "pdb_id",
})
import string, collections
resol_counts = response.facets['facet_ranges']['resolution']['counts']
resol_counts = collections.OrderedDict([(resol_counts[rci], resol_counts[rci+1]) for rci in range(0, len(resol_counts), 2)])
logging.info("Resolutions at which this protein has been solved is as follows:")
for resol in sorted(resol_counts.keys(), key=lambda x : string.atof(x)) :
logging.info("%3d entries in resolution bin starting %s" % (resol_counts[resol], resol))
response = solr.search(**{
"rows" : UNLIMITED_ROWS,
"fl" : "pdb_id, entity_id, deposition_year, resolution",
"q" : join_with_AND(selectors),
})
resbin_width = 0.5
def resol_bin(resol) :
import decimal
return decimal.Decimal(int(resol/resbin_width) * resbin_width)
yearbin_width = 5
def depyear_bin(year) :
return (year / yearbin_width) * yearbin_width
entry_counted = set()
counts = collections.defaultdict( lambda : collections.defaultdict( lambda: 0 ) )
for adoc in response.documents :
if adoc['pdb_id'] not in entry_counted :
res_bin = resol_bin(adoc['resolution'])
year_bin = depyear_bin(adoc['deposition_year'])
counts[year_bin][res_bin] += 1
import itertools
year_bins = sorted(counts.keys())
resol_bins = sorted(set( itertools.chain(*[v.keys() for v in counts.values()]) ))
logging.info(" " + " ".join("%.1f-%.1f" % (rb,float(rb)+resbin_width) for rb in resol_bins))
for year in year_bins :
to_print = ["%d-%d" % (year,year+yearbin_width)]
total = 0
for resol in resol_bins :
total += counts.get(year, {}).get(resol, 0)
for resol in resol_bins :
count = counts.get(year, {}).get(resol, 0)
to_print.append(count) #, #int(count*100./total),
logging.info(to_print[0] + " ".join(["%5d" % tp for tp in to_print[1:]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find your protein
Step2: Let us assume we are interested in carbonic anhydrases. We write the protein name as a regular expression allowing for case changes on start of word.
Step3: Note that there are some unintended hits - one putative and another inhibitor. Let us filter those out.
Step4: We can also sharpen our search considerably by using annotations like GO, SCOP etc. But the filters should strike a balance in removing spurious hits and keeping genuine ones.
Step5: Count entries by year of deposition
Step6: Note that we do not have to facet on one field at a time - we could have facetted on multiple fields individually in the same call - just provide comma-separated fields list.
Step7: Hierarchical facetting
|
2,815
|
<ASSISTANT_TASK:>
Python Code:
from fig_utils import *
import matplotlib.pyplot as plt
import time
%matplotlib inline
# Parameters
country_names = ['nigeria', 'tanzania', 'uganda', 'malawi', 'pooled']
country_paths = ['../data/output/LSMS/nigeria/',
'../data/output/LSMS/tanzania/',
'../data/output/LSMS/uganda/',
'../data/output/LSMS/malawi/',
'../data/output/LSMS/pooled/']
survey = 'lsms'
dimension = 100
k = 10
trials = 10
points = 30
alpha_low = -2
alpha_high = 5
cmap = 'Greens'
t0 = time.time()
performance_matrix = evaluate_models(country_names, country_paths, survey,
dimension, k, trials, points,
alpha_low, alpha_high, cmap)
t1 = time.time()
print 'Time elapsed: {} seconds'.format(t1-t0)
print 'Corresponding values:'
print performance_matrix
# Parameters
country_names = ['nigeria', 'tanzania', 'uganda', 'malawi', 'rwanda',
'pooled']
country_paths = ['../data/output/DHS/nigeria/',
'../data/output/DHS/tanzania/',
'../data/output/DHS/uganda/',
'../data/output/DHS/malawi/',
'../data/output/DHS/rwanda/',
'../data/output/DHS/pooled/']
survey = 'dhs'
dimension = 100
k = 10
trials = 10
points = 30
alpha_low = -2
alpha_high = 5
cmap = 'Blues'
t0 = time.time()
performance_matrix = evaluate_models(country_names, country_paths, survey,
dimension, k, trials, points,
alpha_low, alpha_high, cmap)
t1 = time.time()
print 'Time elapsed: {} seconds'.format(t1-t0)
print 'Corresponding values:'
print performance_matrix
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Out-of-country performance
Step2: Panel B
|
2,816
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np
mylist = [3, 6, 1, 0, 10, 3]
mylist
print("The first element of 'mylist' is: " + str(mylist[0]))
print("The second element of 'mylist' is: " + str(mylist[1]))
myarray = np.array(mylist) # equivalent to np.array([3, 6, 1, 0, 10, 3])
myarray
# look at what type mylist is
type(mylist)
# look at what type myarray is
type(myarray)
print("The first element of 'myarray' is: " + str(myarray[0]))
print("The second element of 'myarray' is: " + str(myarray[1]))
print("mylist:", mylist)
print("myarray:", myarray)
myarray[1:]
myarray[:-1]
myarray[1:-1]
myarray[::2]
myarray[1::2]
myarray[::-1]
mylist = [3, 6, 1, 0, 10, 22]
mylist_plus1 = []
for x in mylist:
mylist_plus1.append(x + 1)
mylist_plus1
mylist = [3, 6, 1, 0, 10, 22]
mylist_plus1 = [x + 1 for x in mylist]
mylist_plus1
myarray = np.array([3, 6, 1, 0, 10, 22])
myarray_plus1 = myarray + 1
myarray_plus1
print("Subtraction: \t" + str(myarray - 2))
print("Multiplication:\t" + str(myarray * 10))
print("Squared: \t" + str(myarray ** 2))
print("Square root: \t" + str(np.sqrt(myarray)))
print("Exponential: \t" + str(np.exp(myarray)))
list_a = [1, 2, 3, 4, 5]
list_b = [6, 7, 8, 9, 10]
list_c = [list_a[i] + list_b[i] for i in range(len(list_a))]
list_c
array_a = np.array(list_a) # equivalent to np.array([1, 2, 3, 4, 5])
array_b = np.array(list_b) # equivalent to np.array([6, 7, 8, 9, 10])
array_c = array_a + array_b
array_c
list_a + list_b
print("Subtraction: \t" + str(array_a - array_b))
print("Multiplication:\t" + str(array_a * array_b))
print("Exponent: \t" + str(array_a ** array_b))
print("Division: \t" + str(array_a / array_b))
mylist = []
mylist.append(7)
mylist.append(2)
mylist
myarray = np.empty(2) # create an array of size 2
myarray[0] = 7
myarray[1] = 2
myarray
# create an array of numbers from 0 to 3
np.arange(3)
# create an array of numbers from 1 to 5
np.arange(1, 5)
# create an array of every third number between 2 and 10
np.arange(2, 10, 3)
# create an array of numbers between 0.1 and 1.1 spaced by 0.1
np.arange(0.1, 1.1, 0.1)
mylist = [3, 6, 1, 10, 22]
total = 0
for number in mylist:
total += number
total
# you can also just do np.sum(mylist) -- it converts it to an
# array for you!
myarray = np.array(mylist)
np.sum(myarray)
def euclidean_distance(a, b):
Computes the Euclidean distance between a and b.
Hint: your solution can be done in a single line of code!
Parameters
----------
a, b : numpy arrays or scalars with the same size
Returns
-------
the Euclidean distance between a and b
### BEGIN SOLUTION
return np.sqrt(np.sum((a - b) ** 2))
### END SOLUTION
# add your own test cases in this cell!
from nose.tools import assert_equal, assert_raises
# check euclidean distance of size 3 integer arrays
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
assert_equal(euclidean_distance(a, b), 5.196152422706632)
# check euclidean distance of size 4 float arrays
x = np.array([3.6, 7., 203., 3.])
y = np.array([6., 20.2, 1., 2.])
assert_equal(euclidean_distance(x, y), 202.44752406487959)
# check euclidean distance of scalars
assert_equal(euclidean_distance(1, 0.5), 0.5)
# check that an error is thrown if the arrays are different sizes
a = np.array([1, 2, 3])
b = np.array([4, 5])
assert_raises(ValueError, euclidean_distance, a, b)
assert_raises(ValueError, euclidean_distance, b, a)
print("Success!")
arr = np.zeros((3, 4))
arr
arr.shape
np.zeros(3).shape
np.zeros((3,))
np.zeros(3)
arr = np.zeros((3, 4))
arr.size
arr = np.arange(32).reshape((8, 4))
arr
# set the 3rd element in the 1st row to 0
arr[0, 2] = 0
arr
arr[0, 2]
# the first row
arr[0]
# the second column
arr[:, 1]
# select a rectangular region from the array
arr[2:5, 1:3]
arr = np.arange(10)
arr2 = arr
arr2[::2] = arr2[::2] * 2
print("arr: " + str(arr))
print("arr2: " + str(arr2))
arr = np.arange(10)
arr2 = arr.copy()
arr2[::2] = arr2[::2] * 2
print("arr: " + str(arr))
print("arr2: " + str(arr2))
def border(n, m):
Creates an array with shape (n, m) that is all zeros
except for the border (i.e., the first and last rows and
columns), which should be filled with ones.
Hint: you should be able to do this in three lines
(including the return statement)
Parameters
----------
n, m: int
Number of rows and number of columns
Returns
-------
numpy array with shape (n, m)
### BEGIN SOLUTION
arr = np.ones((n, m))
arr[1:-1, 1:-1] = 0
return arr
### END SOLUTION
# add your own test cases in this cell!
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
# check a few small examples explicitly
assert_array_equal(border(1, 1), [[1]])
assert_array_equal(border(2, 2), [[1, 1], [1, 1]])
assert_array_equal(border(3, 3), [[1, 1, 1], [1, 0, 1], [1, 1, 1]])
assert_array_equal(border(3, 4), [[1, 1, 1, 1], [1, 0, 0, 1], [1, 1, 1, 1]])
# check a few large and random examples
for i in range(10):
n, m = np.random.randint(2, 1000, 2)
result = border(n, m)
# check dtype and array shape
assert_equal(result.dtype, np.float)
assert_equal(result.shape, (n, m))
# check the borders
assert (result[0] == 1).all()
assert (result[-1] == 1).all()
assert (result[:, 0] == 1).all()
assert (result[:, -1] == 1).all()
# check that everything else is zero
assert np.sum(result) == (2*n + 2*m - 4)
print("Success!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, we have access to all NumPy functions via the variable np (this is the convention in the Scientific Python community for referring to NumPy). We can take a look at what this variable actually is, and see that it is in fact the numpy module (remember that you will need to have run the cell above before np will be defined!)
Step2: NumPy is incredibly powerful and has many features, but this can be a bit intimidating when you're first starting to use it. If you are familiar with other scientific computing languages, the following guides may be of use
Step3: And we can access an element via its index. To get the first element, we use an index of 0
Step4: To get the second element, we use an index of 1
Step5: And so on.
Step6: <div class="alert alert-info">
Step7: We can get elements from a NumPy array in exactly the same way as we get elements from a list
Step8: Array slicing
Step9: Now, to get all elements except the first
Step10: To get all elements except the last
Step11: To get all elements except the first and the last
Step12: To get every other element of the array (beginning from the first element)
Step13: To get every element of the array (beginning from the second element)
Step14: And to reverse the array
Step15: Array computations
Step16: Or, you could use a list comprehension
Step17: <div class="alert alert-danger">
Step18: This won't work with normal lists. For example, if you ran mylist + 1, you'd get an error like this
Step19: Working with multiple arrays
Step20: With NumPy arrays, we just have to add the arrays together
Step21: <div class="alert alert-warning">
Step22: Just as when we are working with a single array, we can add, subtract, divide, multiply, etc. several arrays together
Step23: Creating and modifying arrays
Step24: However, you cannot do this with NumPy arrays. If you tried to run the following code, for example
Step25: <div class="alert alert-info">
Step26: "Vectorized" computations
Step27: Using NumPy arrays, we can just use the np.sum function
Step29: <div class="alert alert-info">
Step30: <div class="alert alert-warning">Remember that you need to execute the cell above (with your definition of <code>euclidean_distance</code>), and then run the cell below to check your answer. If you make changes to the cell with your answer, you will need to <i>first</i> re-run that cell, and <i>then</i> re-run the test cell to check your answer again.</div>
Step31: Creating multidimensional arrays
Step32: <div class="alert alert-info">
Step33: Note that for 1-D arrays, the shape returned by the shape attribute is still a tuple, even though it only has a length of one
Step34: This also means that we can create 1-D arrays by passing a length one tuple. Thus, the following two arrays are identical
Step35: <div class="alert alert-danger">There is a warning that goes with this, however
Step36: We can also create arrays and then reshape them into any shape, provided the new array has the same size as the old array
Step37: Accessing and modifying multidimensional array elements
Step38: We can also access the element on it's own, without having the equals sign and the stuff to the right of it
Step39: We frequently will want to access ranges of elements. In NumPy, the first dimension (or axis) corresponds to the rows of the array, and the second axis corresponds to the columns. For example, to look at the first row of the array
Step40: To look at columns, we use the following syntax
Step41: The colon in the first position essentially means "select from every row". So, we can interpret arr[
Step42: <div class="alert alert-warning">Note
Step43: Note that arr and arr2 both have the same values! This is because the line arr2 = arr doesn't actually copy the array
Step45: Exercise
|
2,817
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from simmit import smartplus as sim
from simmit import identify as iden
import os
dir = os.path.dirname(os.path.realpath('__file__'))
x = np.arange(0,182,2)
path_data = dir + '/data/'
peak_file = 'Npeaks0.dat'
y = sim.get_densities(x, path_data, peak_file, False)
fig = plt.figure()
plt.grid(True)
plt.plot(x,y, c='black')
NPhases_file = dir + '/data/Nellipsoids0.dat'
NPhases = pd.read_csv(NPhases_file, delimiter=r'\s+', index_col=False, engine='python')
NPhases[::]
umat_name = 'MIMTN' #This is the 5 character code for the Mori-Tanaka homogenization for composites with a matrix and ellipsoidal reinforcments
nstatev = 0
nphases = 2 #The number of phases
num_file = 0 #The num of the file that contains the subphases
int1 = 20
int2 = 20
psi_rve = 0.
theta_rve = 0.
phi_rve = 0.
props = np.array([nphases, num_file, int1, int2, 0])
path_data = 'data'
path_results = 'results'
Nfile_init = 'Nellipsoids0.dat'
Nfile_disc = 'Nellipsoids1.dat'
nphases_rve = 36
num_phase_disc = 1
sim.ODF_discretization(nphases_rve, num_phase_disc, 0., 180., umat_name, props, path_data, peak_file, Nfile_init, Nfile_disc, 1)
NPhases_file = dir + '/data/Nellipsoids1.dat'
NPhases = pd.read_csv(NPhases_file, delimiter=r'\s+', index_col=False, engine='python')
#We plot here the five first phases
NPhases[:5]
#Plot the concentration and the angle
c, angle = np.loadtxt(NPhases_file, usecols=(4,5), skiprows=2, unpack=True)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
# the histogram of the data
xs = np.arange(0,180,5)
rects1 = ax1.bar(xs, c, width=5, color='r', align='center')
ax2.plot(x, y, 'b-')
ax1.set_xlabel('X data')
ax1.set_ylabel('Y1 data', color='g')
ax2.set_ylabel('Y2 data', color='b')
ax1.set_ylim([0,0.025])
ax2.set_ylim([0,0.25])
plt.show()
#plt.grid(True)
#plt.plot(angle,c, c='black')
plt.show()
#Run the simulation
pathfile = 'path.txt'
nphases = 37 #The number of phases
num_file = 1 #The num of the file that contains the subphases
props = np.array([nphases, num_file, int1, int2])
outputfile = 'results_MTN.txt'
sim.solver(umat_name, props, nstatev, psi_rve, theta_rve, phi_rve, path_data, path_results, pathfile, outputfile)
fig = plt.figure()
outputfile_macro = dir + '/' + path_results + '/results_MTN_global-0.txt'
e11, e22, e33, e12, e13, e23, s11, s22, s33, s12, s13, s23 = np.loadtxt(outputfile_macro, usecols=(8,9,10,11,12,13,14,15,16,17,18,19), unpack=True)
plt.grid(True)
plt.plot(e11,s11, c='black')
for i in range(8,12):
outputfile_micro = dir + '/' + path_results + '/results_MTN_global-0-' + str(i) + '.txt'
e11, e22, e33, e12, e13, e23, s11, s22, s33, s12, s13, s23 = np.loadtxt(outputfile_micro, usecols=(8,9,10,11,12,13,14,15,16,17,18,19), unpack=True)
plt.grid(True)
plt.plot(e11,s11, c='red')
plt.xlabel('Strain')
plt.ylabel('Stress (MPa)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this Python Notebook we will show how to properly run a simulation of a composite material, providing the ODF (orientation density function) of the reinforcments.
Step2: In the previous graph we can see a multi-peak ODF (peaks are modeled using PEARSONVII functions). It actually represent quite well the microstructure of injected plates.
|
2,818
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'mpi-esm-1-2-hr', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
2,819
|
<ASSISTANT_TASK:>
Python Code:
np.random.seed(0)
data = np.random.randn(10, 10)
from ipywidgets import *
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data)
fig
grid_map.display_format = ".2f"
grid_map.font_style = {"font-size": "16px", "fill": "blue", "font-weight": "bold"}
axes_options = {
"column": {"visible": False},
"row": {"visible": False},
"color": {"visible": False},
}
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data, axes_options=axes_options)
fig
fig = plt.figure(padding_y=0.0)
plt.scales(scales={"x": LinearScale(), "y": LinearScale(reverse=True)})
## The data along the rows is not uniform. Hence the 5th row(from top) of the map
## is twice the height of the remaining rows.
row_data = np.arange(10)
row_data[5:] = np.arange(6, 11)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
print(row_data.shape)
print(column_data.shape)
print(data.shape)
fig = plt.figure(padding_y=0.0)
plt.scales(scales={"x": LinearScale(), "y": LinearScale(reverse=True)})
row_data = np.arange(11)
column_data = np.arange(10, 21)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
fig = plt.figure(padding_y=0.0)
plt.scales(scales={"x": LinearScale(), "y": LinearScale(reverse=True, max=15)})
row_data = np.arange(10)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data)
fig
fig = plt.figure(padding_y=0.0)
plt.scales(scales={"x": LinearScale(), "y": LinearScale(reverse=True, min=-5, max=15)})
row_data = np.arange(10)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data, row_align="end")
fig
fig = plt.figure(padding_y=0.0)
plt.scales(scales={"x": LinearScale(), "y": LinearScale(reverse=True, min=-5, max=15)})
row_data = np.arange(9)
column_data = np.arange(10, 20)
grid_map = plt.gridheatmap(data, row=row_data, column=column_data, row_align="end")
fig
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(data, opacity=0.3, stroke="white", axes_options=axes_options)
fig
data = np.random.randn(10, 10)
fig = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(
data,
interactions={"click": "select"},
selected_style={"stroke": "blue", "stroke-width": 3},
axes_options=axes_options,
)
fig
grid_map.selected
import numpy as np
from IPython.display import display
np.random.seed(0)
data = np.random.randn(10, 10)
figure = plt.figure(padding_y=0.0)
grid_map = plt.gridheatmap(
data,
interactions={"click": "select"},
selected_style={"stroke": "blue", "stroke-width": 3},
)
from ipywidgets import Output
out = Output()
@out.capture()
def print_event(self, target):
print(target)
# test
print_event(1, "test output")
grid_map.on_element_click(print_event)
display(figure)
display(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Heat map
Step2: Hide tick_labels and color axis using 'axes_options'
Step3: Non Uniform Heat map
Step4: Alignment of the data with respect to the grid
Step5: By default, for N points along any dimension, data aligns to the start of the rectangles in the grid.
Step6: By changing the row_align and column_align properties, the grid can extend in the opposite direction
Step7: For N+1 points on any direction, the grid extends infintely in both directions
Step8: Changing opacity and stroke
Step9: Selections on the grid map
Step10: The selected trait of a GridHeatMap contains a list of lists, with each sub-list containing the row and column index of a selected cell.
Step11: Registering on_element_click event handler
|
2,820
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.sparse as sps
from porepy.numerics.ad.forward_mode import Ad_array
import porepy.numerics.ad.functions as af
x = Ad_array(2, 1)
y = x**2 + 3
print('y value is: ', y.val)
print('dy/dx is: ', y.jac)
h = af.exp(y)
print('h value is: ', h.val)
print('dh/dx is: ', h.jac)
y = Ad_array(7, 4)
h = af.exp(y)
print('h value is: ', h.val)
print('dh/dx is: ', h.jac)
x = Ad_array(np.array([1,2,3]), sps.diags([1,1,1]))
A = sps.csc_matrix(np.array([[0,2,3],[4,0,6],[7,8,0]]))
y = A*x + x**2
print('Analytic y value: ')
print(np.array([14, 26, 32]))
print('Analytic y jacobian:')
print(np.array([[2,2,3],[4,4,6],[7,8,6]]),'\n')
print('Ad y value: ')
print(y.val)
print('Ad y jacobian:')
print(y.jac.A)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scalar AD-variables
Step2: We can now define a function $y=x^2 + 3$
Step3: To obtain the function value and the derivative we can call .val and .jac
Step4: $y$ is also an AD variable as a function of $x$. We can use it to declare further functions, e.g., $h(x) = e^{y(x)}$. To take the exponential of an Ad_array we need to call the exponential function found in the AD module
Step5: If we knew the value and jacobian of $y$ we could alternatively skip initiating $x$ and initiate $y$ directly
Step6: Arrays of AD-variables
Step7: As for the scalar case, it is straight forward to define functions using normal Python programming. Let us declare the function
|
2,821
|
<ASSISTANT_TASK:>
Python Code:
from lammps import IPyLammps
L = IPyLammps()
# 3d Lennard-Jones melt
L.units("lj")
L.atom_style("atomic")
L.atom_modify("map array")
L.lattice("fcc", 0.8442)
L.region("box block", 0, 4, 0, 4, 0, 4)
L.create_box(1, "box")
L.create_atoms(1, "box")
L.mass(1, 1.0)
L.velocity("all create", 1.44, 87287, "loop geom")
L.pair_style("lj/cut", 2.5)
L.pair_coeff(1, 1, 1.0, 1.0, 2.5)
L.neighbor(0.3, "bin")
L.neigh_modify("delay 0 every 20 check no")
L.fix("1 all nve")
L.variable("fx atom fx")
L.run(10)
L.image(zoom=1)
L.system
L.system.natoms
L.communication
L.fixes
L.computes
L.dumps
L.groups
L.variable("a index 2")
L.variables
L.variable("t equal temp")
L.variables
import sys
if sys.version_info < (3, 0):
# In Python 2 'print' is a restricted keyword, which is why you have to use the lmp_print function instead.
x = float(L.lmp_print('"${a}"'))
else:
# In Python 3 the print function can be redefined.
# x = float(L.print('"${a}"')")
# To avoid a syntax error in Python 2 executions of this notebook, this line is packed into an eval statement
x = float(eval("L.print('\"${a}\"')"))
x
L.variables['t'].value
L.eval("v_t/2.0")
L.variable("b index a b c")
L.variables['b'].value
L.eval("v_b")
L.variables['b'].definition
L.lmp.command('variable i loop 10')
L.variable("i loop 10")
L.variables['i'].value
L.next("i")
L.variables['i'].value
L.eval("ke")
L.atoms[0]
dir(L.atoms[0])
L.atoms[0].position
L.atoms[0].id
L.atoms[0].velocity
L.atoms[0].force
L.atoms[0].type
L.variables['fx'].value
L.runs
L.runs[0]
L.runs[0].thermo
L.runs[0].thermo
dir(L.runs[0].thermo)
L = IPyLammps()
# enable command history
L.enable_cmd_history = True
# 3d Lennard-Jones melt
L.units("lj")
L.atom_style("atomic")
L.atom_modify("map array")
L.lattice("fcc", 0.8442)
L.region("box block", 0, 4, 0, 4, 0, 4)
L.create_box(1, "box")
L.create_atoms(1, "box")
L.mass(1, 1.0)
L.velocity("all create", 1.44, 87287, "loop geom")
L.pair_style("lj/cut", 2.5)
L.pair_coeff(1, 1, 1.0, 1.0, 2.5)
L.neighbor(0.3, "bin")
L.neigh_modify("delay 0 every 20 check no")
L.fix("1 all nve")
L.run(10)
# write LAMMPS input script with all commands executed so far (including implicit ones)
L.write_script("in.output")
!cat in.output
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize the initial state
Step2: Queries about LAMMPS simulation
Step3: Working with LAMMPS Variables
Step4: Accessing Atom data
Step5: Accessing thermo data
Step6: Saving session to as LAMMPS input file
|
2,822
|
<ASSISTANT_TASK:>
Python Code:
from mpl_toolkits.mplot3d import Axes3D, axes3d
fig, ax = plt.subplots(1, 1, subplot_kw={'projection': '3d'})
X, Y, Z = axes3d.get_test_data(0.05)
ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)
plt.show()
from mpl_toolkits.axes_grid1 import AxesGrid
fig = plt.figure()
grid = AxesGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 2),
axes_pad = 0.2,
share_all=True,
label_mode = "L", # similar to "label_outer"
cbar_location = "right",
cbar_mode="single",
)
extent = (-3,4,-4,3)
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
grid.cbar_axes[0].colorbar(im)
plt.show()
# %load http://matplotlib.org/mpl_examples/axes_grid/demo_parasite_axes2.py
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
fig = plt.fig
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(0, 2)
host.set_ylim(0, 2)
host.set_xlabel("Distance")
host.set_ylabel("Density")
par1.set_ylabel("Temperature")
par2.set_ylabel("Velocity")
p1, = host.plot([0, 1, 2], [0, 1, 2], label="Density")
p2, = par1.plot([0, 1, 2], [0, 3, 2], label="Temperature")
p3, = par2.plot([0, 1, 2], [50, 30, 15], label="Velocity")
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
#plt.savefig("Test")
%load http://matplotlib.org/mpl_toolkits/axes_grid/examples/demo_floating_axes.py
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
import numpy as np
import mpl_toolkits.axisartist.angle_helper as angle_helper
from matplotlib.projections import PolarAxes
from mpl_toolkits.axisartist.grid_finder import FixedLocator, MaxNLocator, \
DictFormatter
def setup_axes1(fig, rect):
A simple one.
tr = Affine2D().scale(2, 1).rotate_deg(30)
grid_helper = floating_axes.GridHelperCurveLinear(tr, extremes=(0, 4, 0, 4))
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
fig.add_subplot(ax1)
aux_ax = ax1.get_aux_axes(tr)
grid_helper.grid_finder.grid_locator1._nbins = 4
grid_helper.grid_finder.grid_locator2._nbins = 4
return ax1, aux_ax
def setup_axes2(fig, rect):
With custom locator and formatter.
Note that the extreme values are swapped.
#tr_scale = Affine2D().scale(np.pi/180., 1.)
tr = PolarAxes.PolarTransform()
pi = np.pi
angle_ticks = [(0, r"$0$"),
(.25*pi, r"$\frac{1}{4}\pi$"),
(.5*pi, r"$\frac{1}{2}\pi$")]
grid_locator1 = FixedLocator([v for v, s in angle_ticks])
tick_formatter1 = DictFormatter(dict(angle_ticks))
grid_locator2 = MaxNLocator(2)
grid_helper = floating_axes.GridHelperCurveLinear(tr,
extremes=(.5*pi, 0, 2, 1),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
fig.add_subplot(ax1)
# create a parasite axes whose transData in RA, cz
aux_ax = ax1.get_aux_axes(tr)
aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax
ax1.patch.zorder=0.9 # but this has a side effect that the patch is
# drawn twice, and possibly over some other
# artists. So, we decrease the zorder a bit to
# prevent this.
return ax1, aux_ax
def setup_axes3(fig, rect):
Sometimes, things like axis_direction need to be adjusted.
# rotate a bit for better orientation
tr_rotate = Affine2D().translate(-95, 0)
# scale degree to radians
tr_scale = Affine2D().scale(np.pi/180., 1.)
tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()
grid_locator1 = angle_helper.LocatorHMS(4)
tick_formatter1 = angle_helper.FormatterHMS()
grid_locator2 = MaxNLocator(3)
ra0, ra1 = 8.*15, 14.*15
cz0, cz1 = 0, 14000
grid_helper = floating_axes.GridHelperCurveLinear(tr,
extremes=(ra0, ra1, cz0, cz1),
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1,
tick_formatter2=None,
)
ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)
fig.add_subplot(ax1)
# adjust axis
ax1.axis["left"].set_axis_direction("bottom")
ax1.axis["right"].set_axis_direction("top")
ax1.axis["bottom"].set_visible(False)
ax1.axis["top"].set_axis_direction("bottom")
ax1.axis["top"].toggle(ticklabels=True, label=True)
ax1.axis["top"].major_ticklabels.set_axis_direction("top")
ax1.axis["top"].label.set_axis_direction("top")
ax1.axis["left"].label.set_text(r"cz [km$^{-1}$]")
ax1.axis["top"].label.set_text(r"$\alpha_{1950}$")
# create a parasite axes whose transData in RA, cz
aux_ax = ax1.get_aux_axes(tr)
aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax
ax1.patch.zorder=0.9 # but this has a side effect that the patch is
# drawn twice, and possibly over some other
# artists. So, we decrease the zorder a bit to
# prevent this.
return ax1, aux_ax
if 1:
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(8, 4))
fig.subplots_adjust(wspace=0.3, left=0.05, right=0.95)
ax1, aux_ax2 = setup_axes1(fig, 131)
aux_ax2.bar([0, 1, 2, 3], [3, 2, 1, 3])
#theta = np.random.rand(10) #*.5*np.pi
#radius = np.random.rand(10) #+1.
#aux_ax1.scatter(theta, radius)
ax2, aux_ax2 = setup_axes2(fig, 132)
theta = np.random.rand(10)*.5*np.pi
radius = np.random.rand(10)+1.
aux_ax2.scatter(theta, radius)
ax3, aux_ax3 = setup_axes3(fig, 133)
theta = (8 + np.random.rand(10)*(14-8))*15. # in degrees
radius = np.random.rand(10)*14000.
aux_ax3.scatter(theta, radius)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: axes_grid1
Step2: This next feature is commonly requested on the mailing lists. The problem is that most people who request it don't quite know how to describe it. We call it "Parasite Axes".
Step6: And finally, as a nice teaser of what else axes_grid1 can do...
|
2,823
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data, a=0.1, b=0.9):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
x_min = np.amin(image_data)
x_max = np.amax(image_data)
return a + (image_data - x_min) * (b - a) / (x_max - x_min)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
print(test_labels.shape)
print(train_labels.shape)
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
train_features[:10]
train_features.shape
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
n_input = 784
n_classes = 10
features = tf.placeholder(tf.float32, [None, n_input])
labels = tf.placeholder(tf.float32, [None, n_classes])
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([n_input, n_classes]))
biases = tf.Variable(tf.zeros([n_classes]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 1
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
2,824
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
2,825
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
!pip install -q imageio
!pip install -q opencv-python
!pip install -q git+https://github.com/tensorflow/docs
#@title Import the necessary modules
# TensorFlow and TF-Hub modules.
from absl import logging
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_docs.vis import embed
logging.set_verbosity(logging.ERROR)
# Some modules to help with reading the UCF101 dataset.
import random
import re
import os
import tempfile
import ssl
import cv2
import numpy as np
# Some modules to display an animation using imageio.
import imageio
from IPython import display
from urllib import request # requires python3
#@title Helper functions for the UCF101 dataset
# Utilities to fetch videos from UCF101 dataset
UCF_ROOT = "https://www.crcv.ucf.edu/THUMOS14/UCF101/UCF101/"
_VIDEO_LIST = None
_CACHE_DIR = tempfile.mkdtemp()
# As of July 2020, crcv.ucf.edu doesn't use a certificate accepted by the
# default Colab environment anymore.
unverified_context = ssl._create_unverified_context()
def list_ucf_videos():
Lists videos available in UCF101 dataset.
global _VIDEO_LIST
if not _VIDEO_LIST:
index = request.urlopen(UCF_ROOT, context=unverified_context).read().decode("utf-8")
videos = re.findall("(v_[\w_]+\.avi)", index)
_VIDEO_LIST = sorted(set(videos))
return list(_VIDEO_LIST)
def fetch_ucf_video(video):
Fetchs a video and cache into local filesystem.
cache_path = os.path.join(_CACHE_DIR, video)
if not os.path.exists(cache_path):
urlpath = request.urljoin(UCF_ROOT, video)
print("Fetching %s => %s" % (urlpath, cache_path))
data = request.urlopen(urlpath, context=unverified_context).read()
open(cache_path, "wb").write(data)
return cache_path
# Utilities to open video files using CV2
def crop_center_square(frame):
y, x = frame.shape[0:2]
min_dim = min(y, x)
start_x = (x // 2) - (min_dim // 2)
start_y = (y // 2) - (min_dim // 2)
return frame[start_y:start_y+min_dim,start_x:start_x+min_dim]
def load_video(path, max_frames=0, resize=(224, 224)):
cap = cv2.VideoCapture(path)
frames = []
try:
while True:
ret, frame = cap.read()
if not ret:
break
frame = crop_center_square(frame)
frame = cv2.resize(frame, resize)
frame = frame[:, :, [2, 1, 0]]
frames.append(frame)
if len(frames) == max_frames:
break
finally:
cap.release()
return np.array(frames) / 255.0
def to_gif(images):
converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
imageio.mimsave('./animation.gif', converted_images, fps=25)
return embed.embed_file('./animation.gif')
#@title Get the kinetics-400 labels
# Get the kinetics-400 action labels from the GitHub repository.
KINETICS_URL = "https://raw.githubusercontent.com/deepmind/kinetics-i3d/master/data/label_map.txt"
with request.urlopen(KINETICS_URL) as obj:
labels = [line.decode("utf-8").strip() for line in obj.readlines()]
print("Found %d labels." % len(labels))
# Get the list of videos in the dataset.
ucf_videos = list_ucf_videos()
categories = {}
for video in ucf_videos:
category = video[2:-12]
if category not in categories:
categories[category] = []
categories[category].append(video)
print("Found %d videos in %d categories." % (len(ucf_videos), len(categories)))
for category, sequences in categories.items():
summary = ", ".join(sequences[:2])
print("%-20s %4d videos (%s, ...)" % (category, len(sequences), summary))
# Get a sample cricket video.
video_path = fetch_ucf_video("v_CricketShot_g04_c02.avi")
sample_video = load_video(video_path)
sample_video.shape
i3d = hub.load("https://tfhub.dev/deepmind/i3d-kinetics-400/1").signatures['default']
def predict(sample_video):
# Add a batch axis to the to the sample video.
model_input = tf.constant(sample_video, dtype=tf.float32)[tf.newaxis, ...]
logits = i3d(model_input)['default'][0]
probabilities = tf.nn.softmax(logits)
print("Top 5 actions:")
for i in np.argsort(probabilities)[::-1][:5]:
print(f" {labels[i]:22}: {probabilities[i] * 100:5.2f}%")
predict(sample_video)
!curl -O https://upload.wikimedia.org/wikipedia/commons/8/86/End_of_a_jam.ogv
video_path = "End_of_a_jam.ogv"
sample_video = load_video(video_path)[:100]
sample_video.shape
to_gif(sample_video)
predict(sample_video)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: 使用膨胀 3D CNN 进行动作识别
Step4: 使用 UCF101 数据集
Step5: 运行 ID3 模型并打印前 5 个动作预测。
Step6: 现在,尝试一个新的视频,地址为:https
|
2,826
|
<ASSISTANT_TASK:>
Python Code:
import fb_scraper.prodcons
APP_ID = ''
APP_ID_SECRET = ''
ACCESS_TOKEN = ''
mgr = fb_scraper.prodcons.Manager(
access_token=ACCESS_TOKEN,
api_key=APP_ID,
api_secret=APP_ID_SECRET
)
mgr.graph.extend_token()
mgr.start()
mgr.scrape_post('XXXXXXXXXXXXXX') # Where 'XXXXXXXXXXXXXXX' is the FULL post ID, i.e. GROUPID_POSTID
mgr.scrape_group('XXXXXXXXXXXXXX') # Where 'XXXXXXXXXXXXXXX' is the Group ID
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Producer/Consummer Manager
Step2: Extending ACCESS_TOKEN
Step3: Start scraping threads
Step4: Add scraping jobs
|
2,827
|
<ASSISTANT_TASK:>
Python Code:
# Q1. Create a graph
g = ...
with g.as_default():
# Define inputs
with tf.name_scope("inputs"):
a = tf.constant(2, tf.int32, name="a")
b = tf.constant(3, tf.int32, name="b")
# Ops
with tf.name_scope("ops"):
c = tf.multiply(a, b, name="c")
d = tf.add(a, b, name="d")
e = tf.subtract(c, d, name="e")
# Q2. Start a session
sess = ...
# Q3. Fetch c, d, e
_c, _d, _e = ...
print("c =", _c)
print("d =", _d)
print("e =", _e)
# Close the session
sess.close()
tf.reset_default_graph()
# Define inputs
a = tf.Variable(tf.random_uniform([]))
b_pl = tf.placeholder(tf.float32, [None])
# Ops
c = a * b_pl
d = a + b_pl
e = tf.reduce_sum(c)
f = tf.reduce_mean(d)
g = e - f
# initialize variable(s)
init = tf.global_variables_initializer()
# Update variable
update_op = tf.assign(a, a + g)
# Q4. Create a (summary) writer to `asset`
writer = ...
#Q5. Add `a` to summary.scalar
...
#Q6. Add `c` and `d` to summary.histogram
...
#Q7. Merge all summaries.
summaries = ...
# Start a session
sess = tf.Session()
# Initialize Variable(s)
sess.run(init)
# Fetch the value of c, d, and e.
for step in range(5):
_b = np.arange(10, dtype=np.float32)
_, summaries_proto = sess.run([update_op, summaries], {b_pl:_b})
# Q8. Attach summaries_proto to TensorBoard.
...
# Close the session
sess.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q4-8. You are to implement the graph below. Complete the code.
|
2,828
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
#%config InlineBackend.figure_format = 'pdf'
import freqopttest.tst as tst
import kgof
import kgof.data as data
import kgof.density as density
import kgof.goftest as gof
import kgof.intertst as tgof
import kgof.kernel as ker
import kgof.util as util
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 18
}
plt.rc('font', **font)
plt.rc('lines', linewidth=2)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# true p
seed = 20
d = 1
# sample
n = 800
alpha = 0.05
# number of test locations to use
J = 2
mean = np.zeros(d)
variance = 1
p = density.IsotropicNormal(mean, variance)
q_mean = mean.copy()
q_variance = variance
# q_mean[0] = 1
# ds = data.DSIsotropicNormal(q_mean, q_variance)
q_means = np.array([ [0], [0]])
q_variances = np.array([0.01, 1])
ds = data.DSIsoGaussianMixture(q_means, q_variances, pmix=[0.2, 0.8])
# ds = data.DSIsoGaussianMixture(p_means, p_variances)
dat = ds.sample(n, seed=seed+2)
tr, te = dat.split_tr_te(tr_proportion=0.5, seed=2)
# Test
Xtr = tr.data()
sig2 = util.meddistance(Xtr, subsample=1000)**2
# random test locations
V0 = util.fit_gaussian_draw(Xtr, J, seed=seed+1)
me_rand = tgof.GaussMETest(p, sig2, V0, alpha=alpha, seed=seed)
me_rand_result = me_rand.perform_test(te)
me_rand_result
#kstein.compute_stat(dat)
op = {'n_test_locs': J, 'seed': seed+5, 'max_iter': 200,
'batch_proportion': 1.0, 'locs_step_size': 1.0,
'gwidth_step_size': 0.1, 'tol_fun': 1e-4}
# optimize on the training set
me_opt = tgof.GaussMETestOpt(p, n_locs=J, tr_proportion=0.5, alpha=alpha, seed=seed+1)
# Give the ME test the full data. Internally the data are divided into tr and te.
me_opt_result = me_opt.perform_test(dat, op)
me_opt_result
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test with random test locations
Step2: Test with optimized test locations
|
2,829
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline as spline
from pathlib import Path
%load_ext autoreload
%autoreload 2
hmcode_dir = Path("/home/steven/Documents/Projects/halos/HALOMOD/other-codes/HMcode")
def read_power(fname):
# Each column is the power at a different redshift.
with open(fname, 'r') as fl:
line = fl.readline().split("#####")[-1].split(' ')[1:]
redshifts = [float(x) for x in line]
data = np.genfromtxt(fname, skip_header=1)
k = data[:, 0]
return k, redshifts, data[:, 1:]
k, redshifts, data = read_power(hmcode_dir / 'power.dat')
from halomod import DMHaloModel
hm = DMHaloModel(
exclusion_model=None,
sd_bias_model=None,
transfer_model="EH_BAO",
cosmo_params={
"Tcmb0":2.725, # Line 596
'Om0': 0.3, # Line 587
'Ob0': 0.05, # Line 589
'H0': 70.0 # Line 591
},
hc_spectrum="linear",
halo_concentration_model="Bullock01",
halo_concentration_params={"K":4, "F":0.01}, # Line 376
hmf_model="SMT",
sigma_8 = 0.8, # Line 593
n = 0.96, # Line 594
Mmin=2, # Line 795
Mmax=18, # Line 796,
lnk_min = np.log(1e-3), # Line 50
lnk_max = np.log(1e2), # Line 51
dlnk = np.log(1e2/1e-3) / 129, # Line 52
dlog10m = 16 / 256,
mdef_model='SOMean',
disable_mass_conversion=True
)
fig,ax = plt.subplots(2,1,sharex=True,subplot_kw={"xscale":'log',"xlim":(1e-3,1e4)}, figsize=(13, 7))
ax[0].plot(hm.k,hm.power_auto_matter * hm.k**3/ (2*np.pi**2),label="tot")
ax[0].plot(hm.k,hm.power_1h_auto_matter * hm.k**3/ (2*np.pi**2),label='1h')
ax[0].plot(hm.k,hm.power_2h_auto_matter * hm.k**3/ (2*np.pi**2),label="2h")
ax[0].plot(k,data[:,0],label="hmcode")
ax[0].set_yscale('log')
ax[0].set_ylim((1e-16,1e5))
ax[0].legend(loc=0)
spl = spline(np.log(hm.k),np.log(hm.power_auto_matter*hm.k**3/(2*np.pi**2)))
ax[1].plot(k,np.exp(spl(np.log(k)))/data[:,0] -1)
ax[1].grid(True)
hmc_intermediate = np.genfromtxt("/home/steven/Documents/Projects/halos/HALOMOD/other-codes/HMcode/mass_data.dat")
m = hmc_intermediate[0]
def get(thing, iz=0):
things = ['rv', 'nu', 'rr', 'sig', 'sigf', 'zc', 'c', 'gnu']
indx = iz * len(things) + 1 + things.index(thing)
return hmc_intermediate[indx]
# Redshift 0
r = spline(hm.m, hm.radii)(m)
plt.plot(m, r / get('rr')-1)
plt.xscale('log')
sig = spline(hm.m, hm.sigma)(m)
plt.plot(m, sig/get('sig') -1)
# There's an "rsplit" parameter of 1e-2 in HMcode where the integral is treated differently
# Maybe it corresponds to the break?
plt.axvline(m[np.where(r > 1e-2)[0][0]])
plt.xscale('log')
hm.update(lnk_max=np.log(1e5))
sig = spline(hm.m, hm.sigma)(m)
plt.plot(m, sig/get('sig') -1)
# There's an "rsplit" parameter of 1e-2 in HMcode where the integral is treated differently
# Maybe it corresponds to the break?
plt.axvline(m[np.where(r > 1e-2)[0][0]])
plt.xscale('log')
nu = np.sqrt(spline(hm.m, hm.nu)(m))
plt.plot(m, nu/get('nu')-1)
plt.xscale('log')
hmc_growth = np.genfromtxt("/home/steven/Documents/Projects/halos/HALOMOD/other-codes/HMcode/growth_data.dat")
zz = 1/hmc_growth[:,0] -1
hm_growth_fn = hm.growth.growth_factor_fn()
hm.update(growth_params={"dlna": 0.01, "amin": 1e-12})
from scipy.integrate import quad
intg = lambda z: (1 + z)/hm.cosmo.H(z).value**3
g0 = quad(intg, 0, np.inf)[0]
growths = []
for i, z in enumerate(zz):
growths.append(quad(intg, z, np.inf)[0] * hm.cosmo.H(z).value/(g0 * hm.cosmo.H(0).value))
hm_growths = np.array([hm.growth.growth_factor(z) for z in zz])
plt.plot(zz, hm_growths/hmc_growth[:, 1] -1)
#plt.plot(zz, [hm.growth.growth_factor(z) for z in zz])
#plt.plot(zz, growths)
plt.xscale('log')
#plt.yscale('log')
plt.plot(zz, hm_growths/hmc_growth[:, 1] -1)
#plt.plot(zz, [hm.growth.growth_factor(z) for z in zz])
#plt.plot(zz, growths)
plt.xscale('log')
plt.xlim(1e-2, 10)
plt.ylim(-0.02, 0.02)
#plt.yscale('log')
nuf = 1.686 / get('sigf')
r = hm.halo_concentration.filter.mass_to_radius(hm.halo_concentration.params["F"] * m, hm.halo_concentration.mean_density0)
hm_nu = hm.halo_concentration.filter.nu(r, 1.686)
hm_nu = np.sqrt(spline(hm.m, hm_nu)(m))
plt.plot(m, hm_nu/nuf - 1)
plt.xscale('log')
zc = spline(hm.m, hm.halo_concentration.zc(hm.m))(m)
print("Maximum collapse redshift: ", zc.max())
plt.plot(m, zc/get('zc')-1)
plt.xscale('log')
plt.ylim(-0.01,0.01)
c = spline(hm.m, hm.cmz_relation)(m)
plt.plot(m, c/get('c')-1)
plt.xscale('log')
k, _, data_lin = read_power(hmcode_dir / 'power_linear.dat')
spl = spline(hm.k,hm.delta_k)
plt.plot(k, np.abs(spl(k)/data_lin[:,0] -1) )
plt.xscale('log')
plt.yscale('log')
k, _, data_2h = read_power(hmcode_dir / 'power_2halo.dat')
spl = spline(hm.k, hm.power_2h_auto_matter * hm.k**3 / (2*np.pi**2))
plt.plot(k,np.abs(spl(k)/data_2h[:, 0] -1 ) )
plt.xscale('log')
plt.yscale('log')
gnu = spline(hm.m, hm.fsigma / np.sqrt(hm.nu))(m)
plt.plot(m, gnu /get('gnu') - 1)
plt.xscale('log')
plt.ylim(-.01,.01)
plt.plot(hm.m, hm.halo_profile._halo_mass_to_radius(m)/get('rv') - 1)
plt.xscale('log')
ukm = np.genfromtxt(hmcode_dir / 'ukm.dat')
with open(hmcode_dir / '1h_integrand.dat') as fl:
kk = float(fl.readline().split('=')[-1].strip())
hm_ukm = hm.halo_profile.u(kk, m)
plt.plot(m, hm_ukm/ukm[0] - 1)
plt.xscale('log')
k, _, data_1h = read_power(hmcode_dir / 'power_1halo.dat')
spl = spline(hm.k,hm.power_1h_auto_matter * hm.k**3 / (2*np.pi**2))
plt.plot(k,np.abs(spl(k)/data_1h[:, 0]-1))
plt.xscale('log')
plt.yscale('log')
fig,ax = plt.subplots(2,1,sharex=True,subplot_kw={"xscale":'log',"xlim":(1e-3,1e4)}, figsize=(13, 7))
ax[0].plot(hm.k,hm.power_auto_matter * hm.k**3/ (2*np.pi**2),label="tot")
ax[0].plot(hm.k,hm.power_1h_auto_matter * hm.k**3/ (2*np.pi**2),label='1h')
ax[0].plot(hm.k,hm.power_2h_auto_matter * hm.k**3/ (2*np.pi**2),label="2h")
ax[0].plot(k,data[:,0],label="hmcode")
ax[0].set_yscale('log')
ax[0].set_ylim((1e-16,1e5))
ax[0].legend(loc=0)
spl = spline(np.log(hm.k),np.log(hm.power_auto_matter*hm.k**3/(2*np.pi**2)))
ax[1].plot(k,np.exp(spl(np.log(k)))/data[:,0] -1)
ax[1].grid(True)
hm.update(z=4)
fig,ax = plt.subplots(2,1,sharex=True,subplot_kw={"xscale":'log',"xlim":(1e-3,1e4)}, figsize=(13, 7))
ax[0].plot(hm.k,hm.power_auto_matter * hm.k**3/ (2*np.pi**2),label="tot")
ax[0].plot(hm.k,hm.power_1h_auto_matter * hm.k**3/ (2*np.pi**2),label='1h')
ax[0].plot(hm.k,hm.power_2h_auto_matter * hm.k**3/ (2*np.pi**2),label="2h")
ax[0].plot(k,data[:,-1],label="hmcode")
ax[0].set_yscale('log')
ax[0].set_ylim((1e-16,1e5))
ax[0].legend(loc=0)
spl = spline(np.log(hm.k),np.log(hm.power_auto_matter*hm.k**3/(2*np.pi**2)))
ax[1].plot(k,np.exp(spl(np.log(k)))/data[:,-1] -1)
ax[1].grid(True)
r = spline(hm.m, hm.radii)(m)
plt.plot(m, r / get('rr', iz=15)-1)
plt.xscale('log')
sig = spline(hm.m, hm.sigma)(m)
plt.plot(m, sig/get('sig',iz=15) -1)
plt.xscale('log')
nu = np.sqrt(spline(hm.m, hm.nu)(m))
plt.plot(m, nu/get('nu', iz=15)-1)
plt.xscale('log')
nuf = 1.686 / get('sigf', iz=15)
r = hm.halo_concentration.filter.mass_to_radius(hm.halo_concentration.params["F"] * m, hm.halo_concentration.mean_density0)
hm_nu = hm.halo_concentration.filter.nu(r, 1.686) / hm.growth_factor**2
hm_nu = np.sqrt(spline(hm.m, hm_nu)(m))
plt.plot(m, hm_nu)
plt.plot(m, nuf)
plt.xscale('log')
plt.plot(m, hm_nu/nuf - 1)
plt.xscale('log')
zc = hm.halo_concentration.zc(m, z=4)
print("Maximum collapse redshift: ", zc.max())
plt.plot(m, zc/get('zc',iz=15)-1)
plt.xscale('log')
#plt.ylim(-0.05,0.05)
c = spline(hm.m, hm.cmz_relation)(m)
plt.plot(m, c/get('c', iz=15)-1)
plt.xscale('log')
spl = spline(hm.k, hm.power_2h_auto_matter * hm.k**3 / (2*np.pi**2))
plt.plot(k,np.abs(spl(k)/data_2h[:, -1] -1 ) )
plt.xscale('log')
plt.yscale('log')
gnu = spline(hm.m, hm.fsigma / np.sqrt(hm.nu))(m)
plt.plot(m, gnu /get('gnu',iz=15) - 1)
plt.xscale('log')
plt.ylim(-.05,.05)
plt.plot(hm.m, hm.halo_profile._halo_mass_to_radius(m)/get('rv',iz=15) - 1)
plt.xscale('log')
hm_ukm = hm.halo_profile.u(kk, m)
plt.plot(m, hm_ukm/ukm[-1] - 1)
plt.xscale('log')
spl = spline(hm.k,hm.power_1h_auto_matter * hm.k**3 / (2*np.pi**2))
plt.plot(k,np.abs(spl(k)/data_1h[:, -1]-1))
plt.xscale('log')
plt.yscale('log')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run HMcode
Step2: Make halomod model
Step3: The big picture (1h+2h)
Step4: Intermediate Products
Step5: Mass to Radius
Step6: sigma
Step7: We see that $\sigma$ is quite different at low masses. HMCode uses a "rapidising" function (Line 1839) to make the integral faster -- potentially that is causing problems at low mass. On the other hand, maybe hmf is doing the wrong thing?
Step8: This fixes the issue. So even though HMCode seems to be able to get away with using a smaller k-range, hmf cannot.
Step9: Growth Function
Step10: Though there is some discrepancy for large redshifts (>100), the discrepancy seems to be in favour of hmf (when compared to pure quadrature integration) and anyway, discrepancy is well under 5% for redshifts actually used (as collapse redshift).
Step11: Sig-f
Step12: Collapse Redshift
Step13: Concentration
Step14: 2-halo
Step15: I think we can be pretty confident that our linear power spectrum is lining up, to within 0.06%
Step16: As we would hope, this is precisely the same plot as for the linear power. There are some weird things around the scale of the BAO peak, which may even come from our spline interpolation, but things are pretty close overall.
Step17: Virial Radius
Step18: Halo Profile (u)
Step19: 1-halo
Step20: Now, the way that HMCode is written is a bit confusing on the face of it, but that's because it integrates over $\nu$ instead of $m$ directly. It turns out this is actually a little easier than integrating over $m$. Here's the math for posterity
Step21: There is a 60% difference at small scales here... !!??
Step22: High Redshift
Step23: Seems to be just the 1-halo term which hasn't evolved properly. Let's look at the bits again.
Step24: Sigma
Step25: This is obviously off by a fraction of a percent... not sure if that's worth worrying about.
Step26: Sig-f
Step27: Collapse Redshift
Step28: Concentration
Step29: 2-halo
Step30: Mass Function
Step31: The difference here seems to be due to slight differences in $\nu$ (of 0.5%) which get blown up in the exponential of the mass function
Step32: Ukm
Step33: 1-halo
|
2,830
|
<ASSISTANT_TASK:>
Python Code:
ri = (40, 50, 60, 70, 80, 90, 100, 108)
ro = (40.7, 52.9, 69, 90.5, 123, 182, 305, 500)
Cq = (46.3, 47.0, 46.9, 47.0, 47.0, 47.0, 46.9, 46.9)
#Cq = (46.3, 49.5, 46.9, 49.5, 47.0, 51.7, 46.9, 54)
Cg = (1.5, 1.44, 1.47, 1.45, 1.46, 1.49, 1.42, 1.48)
Cgnd = (39.0, 30.9, 32.8, 35.6, 41.9, 54.1, 74.0, 108)
r_xy = (np.nan, 110, np.nan, 175, np.nan, 290, np.nan, 560)
qubits = []
for i in range(len(ri)):
q = qubit.Qubit('OCSQubit{:03}'.format(ri[i]))
q.C_g = Cg[i]*1e-15
q.C_q = Cq[i]*1e-15
q.C_resToGnd = Cgnd[i]*1e-15
q.cpw_length = (700-(ro[i]+15+20+1*ri[i]))*1e-6 # ro + d + 2*g + m*ri
qubits.append(q)
selectedQubits = [deepcopy(qubits[i]) for i in (5, 1, 7, 3)]
# The qubits that are actually to be used
ri = (108, 108, 108, 108)
ro = (500, 500, 500, 500)
Cq = (46.9, 46.9, 46.9, 46.9)
Cg = (1.48, 1.48, 1.48, 1.48)
Cgnd = (108, 108, 108, 108)
r_xy = (560, 560, 560, 560)
selectedQubits = (0, 1, 2, 3)
qubits = []
for i in range(len(ri)):
q = qubit.Qubit('OCSQubit{:03}'.format(ri[i]))
q.C_g = Cg[i]*1e-15
q.C_q = Cq[i]*1e-15
q.C_resToGnd = Cgnd[i]*1e-15
q.cpw_length = (700-(ro[i]+15+20+1*ri[i]))*1e-6 # ro + d + 2*g + m*ri
qubits.append(q)
# calculate the capacitance from the anharm
[1e15*(e**2/2/a)/2e6/pi/hbar for a in (406,410,362,746)]
cpw = cpwtools.CPW(material='al', w=10., s=7.)
print cpw
quarter_curve = 2*pi*50/4
coupling_length = 200 # this coupling length varies depending on desired Qc.
tot_length = quarter_curve*(1+2+2+2+2) + 2*850 + (1335-700) + 200 + 350 + coupling_length
def L4FromQubit(q):
L4 = cpwtools.QuarterLResonator(cpw, tot_length)
seriesCap = q.C_g*q.C_q/(q.C_g+q.C_q)
L4.addCapacitiveCoupling('g', seriesCap, Z0 = 0)
L4.addCapacitiveCoupling('c_coupler', q.C_resToGnd, Z0 = 0)
return L4
f0 = 6.0e9
print('How much do the capacitors and change in res length of the qubits pull the resonance?')
print('{:>11} {:>9} {:>8} {:>8}'.format('', 'length', 'f_l', 'C_r'))
L4 = L4FromQubit(qubits[1])
l0 = L4.setLengthFromFreq(f0) - qubits[1].cpw_length
for q in qubits:
L4 = L4FromQubit(q)
L4.l = l0 + q.cpw_length
q.C_r = L4.C()
q.omega_r = L4.wl()
q.omega_q = 2*pi*(f0-1e9)
print('{:>11}: {:>7.2f}um {:>5.3f}GHz {:>6.2f}fF'.format(
q.name, 1e6*l0, L4.fl()/1e9, 1e15*L4.C()))
print
base_length = quarter_curve*(1+2+2+2+2) + (1335-700) + 2*600 + (533-392) + coupling_length
res_lengths = [base_length + 2*l for l in (728, 684, 641, 599)]
Deltas = (1, 1., 1, 1.)
for i,q in enumerate(selectedQubits):
L4 = L4FromQubit(q)
L4.l = 1e-6*res_lengths[i] + q.cpw_length
q.C_r = L4.C()
q.omega_r = L4.wl()
q.omega_q = 2*pi*(L4.fl()-Deltas[i]*1e9)
print L4.fl()/1e9, q.omega_r/2e9/pi, q.omega_q/2e9/pi
# Add in measured frequencies if we know them
# D5
f_meas = (5.80, 5.89, 6.01, 6.14)
f_q_meas = (4.58, 5.31, 4.17, 4.77)
# E5
#f_meas = (5.81, 5.91, 6.03, 6.13)
#f_q_meas = (4.76, 6.26, 4.75, 4.83)
for i,q in enumerate(selectedQubits):
q.omega_r = 2*pi*1e9*f_meas[i]
q.omega_q = 2*pi*1e9*f_q_meas[i]
qb = deepcopy(qubits[1])
g = 2*pi*30e6 # qubit-resonator coupling in Hz
print('Range of C_q on the mask:')
print "C_q = 30fF: E_c = {:.2f}MHz".format( qb.E_c(30e6)/(2*pi*hbar)*1e15 )
print "C_q = 95fF: E_c = {:.2f}MHz".format( qb.E_c(95e6)/(2*pi*hbar)*1e15 )
print
print('Ideal:')
print "Transmon: E_c = 250MHz: C_sigma = C_q + C_g = {:.2f}fF".format( e**2/2/250e6/(2*pi*hbar)*1e15 )
print "Charge Sensitive: E_c = 385MHz: C_sigma = C_q + C_g = {:.2f}fF".format( e**2/2/410e6/(2*pi*hbar)*1e15 )
# What variation in C_g should be included on mask for the C_q variation we have?
print( '{:>7} {:>9} {:>7}'.format('C_q', 'g', 'C_g') )
for C_q_ in [85e-15, 29e-15, e**2/2/250e6]:
for g_ in [2*pi*25e6, 2*pi*50e6, 2*pi*200e6]:
qb.C_q = C_q_
print "{:>5.2f}fF {:>6.2f}MHz {:>5.2f}fF".format(
1e15*C_q_, g_/2/pi/1e6, 1e15*qb.cap_g(g_))
# With caps chosen from the mask:
print "{:>11} {:>10} {:>10} {:>10} {:>10} {:>10} {:>10}".format(
'', 'C_q', 'E_c', 'E_j', 'alpha', 'g', 'C_g')
for q in qubits:
print "{:>11}: {:8.2f}fF {:7.2f}MHz {:7.2f}GHz {:7.2f}MHz {:7.2f}MHz {:8.2f}fF".format(
q.name, 1e15*q.C_q, -q.E_c()/(2*pi*hbar)/1e6, q.E_j()/2/pi/hbar/1e9, q.alpha(q.E_c(),q.E_j())/(2*pi)/1e6, g/2/pi/1e6, 1e15*q.cap_g(g))
# We choose the closest g capacitance from the mask
print "{:>11} {:>10} {:>10} {:>10} {:>10} {:>7} {:>10} {:>9} {:>10} {:>9}".format(
'', 'C_g', 'g', 'Chi_0/2pi', 'Chi/2pi', 'Q_r', 'kappa', '1/kappa', 'I_c', 'n_crit')
for q in selectedQubits:
print "{:>11}: {:>8.2f}fF {:>7.2f}MHz {:>7.2f}MHz {:>7.2f}MHz {:>7.0f} {:>7.2f}MHz {:>7.0f}ns {:>8.2f}nA {:>9.0f}".format(
q.name, 1e15*q.cap_g(q.g()), q.g()/2/pi/1e6, 1e-6*q.Chi_0()/2/pi, 1e-6*q.Chi()/2/pi, q.Q_r(),
q.omega_r/q.Q_r()*1e-6/2/pi, q.Q_r()/q.omega_r*1e9, q.I_c()*1e9, ((q.omega_q-q.omega_r)/2/q.g())**2)
delta = 380e-6; #2\Delta/e in V
Jc = 1e8*673e-9 # A/cm^2
nJJs = (2,1,1,2)
print( '{:>8} {:>7} {:>6} {:>13}'.format('', 'I_c', 'R_N', 'width') )
for i,q in enumerate(selectedQubits):
print("{}: {:>5.2f}nA {:>5.2f}k {} x {:.3f}nm".format(
q.name, q.I_c()*1e9, 1e-3*pi/4*delta/q.I_c(), nJJs[i], 1e9*q.I_c()/(1e4*Jc)/100e-9/nJJs[i] ))
print( '{:>11} {:>6} {:>17}'.format('', 'Ej/Ec', 'Charge dispersion') )
for q in selectedQubits:
print "{}: {:>6.3f} {:>15.3f}MHz".format(q.name, q.E_j()/q.E_c(), q.charge_dispersion()/2/pi/hbar/1e6)
cpw.setKineticInductanceCorrection(False)
print cpw
cpwx = cpwtools.CPWWithBridges(material='al', w=1e6*cpw.w, s=1e6*cpw.s, bridgeSpacing = 250, bridgeWidth = 3, t_oxide=0.16)
cpwx.setKineticInductanceCorrection(False)
print cpwx
d = 5
MperL = inductiveCoupling.inductiveCoupling.CalcMutual(cpw.w*1e6, cpw.w*1e6, cpw.s*1e6, cpw.s*1e6, d, 10*cpw.w*1e6)[0]
MperL = 9e-8 # experimentally measured
print( '{:>11} {:>7} {:>15}'.format('', 'M', 'coupling length') )
for q in selectedQubits:
M = 1/(np.sqrt(q.Q_r()*pi/8/cpw.z0()**2)*q.omega_r)
print "{:>11}: {:>5.2f}pH {:>13.2f}um".format(q.name, M*1e12, (M-1e-12)/MperL*1e6)
print( '{:>5} {:>8}'.format('Q_c', 'l_c') )
for q in [3000,6000,9000,15000,21000,27000,33000,99000]:
print "{:>5} {:>6.2f}um".format(q,1/(np.sqrt(q*pi/8/cpw.z0()**2)*qubits[2].omega_r)/MperL*1e6)
l_curve = 2*pi*50/4
tot_length = l_curve*(1+2+2+2+1)*2 + 4*750 + 2569 + 4*450 + 2*106
purcell = cpwtools.HalfLResonator(cpw,tot_length)
purcell.addCapacitiveCoupling('in', 40e-15)
purcell.addCapacitiveCoupling('out', 130e-15)
print( "f_max = {:.3f}GHz Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.Qc('in'), purcell.Qc('out') ) )
purcell.l = (tot_length + 503*4)*1e-6
print( "f_min = {:.3f}GHz Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.Qc('in'), purcell.Qc('out') ) )
print
print('The measured purcell filter (no crossovers) seems to be 150-200MHz below expected. This has been accounted for below.')
f0 = (qubits[1].omega_r + qubits[2].omega_r)/2/2/pi
purcell.setLengthFromFreq(f0 + 175e6) # The measured purcell filter (no crossovers) seems to be 150-200MHz below expected.
print "f = {:.2f}GHz l = {:.3f}um offset = {:.3f}um Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.l*1e6, (purcell.l*1e6-tot_length)/4, purcell.Qc('in'), purcell.Qc('out') )
print "V_out/V_in =", (purcell.Qc('in')/purcell.Qc('out'))**0.5
print "{:.2f}% power lost through input".format( 100*purcell.Ql()/purcell.Qc('in') )
print "{:.2f}% power lost through output".format( 100*purcell.Ql()/purcell.Qc('out') )
print "{:.2f}% power lost internally".format( 100*purcell.Ql()/purcell.Qint() )
print
print "The purcell filter frequency goes up by 310MHz when crossovers are added:"
purcellx = deepcopy(purcell)
purcellx.cpw = cpwx
print "f = {:.2f}GHz l = {:.3f}um Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcellx.fl(), purcellx.l*1e6, purcellx.Qc('in'), purcellx.Qc('out') )
print "Purcell Filter FWHM = {:.2f}MHz".format(2*pi*f0/purcell.Ql()/2/pi/1e6)
print "Purcell Filter Q_l = {:.2f}".format(purcell.Ql())
print
print('T1 Limits:')
print('{:>11} {:>10} {:>11}'.format('', 'no purcell', 'yes purcell'))
for q in qubits:
kappa_r = q.omega_r/q.Q_r()
Delta = q.omega_q - q.omega_r
#print "{}: T1 limit (no purcell) = {:.2f}us T1 limit (purcell) = {:.2f}us".format(
print "{:>11}: {:>8.2f}us {:>9.2f}us".format(
q.name, (Delta/q.g())**2/kappa_r * 1e6, (Delta/q.g())**2 * (q.omega_r/q.omega_q) * (2*Delta/q.omega_r*purcell.Ql())**2/kappa_r * 1e6 )
C_q = qubits[2].C_q
L_q = 1/(qubits[2].omega_q**2 * C_q)
R_s = 50
C_s = 0.1e-15
Q_s = 1/(qubits[2].omega_q * R_s * C_s)
R_p = R_s*(1 + Q_s**2)
C_p = C_s * Q_s**2/(1 + Q_s**2)
omega = 1/np.sqrt((C_q+C_p)*L_q)
Q_xy = omega*R_p*(C_q+C_p)
print("f: {:.3f}GHz --> {:.3f}GHz".format( 1e-9/np.sqrt(C_q*L_q)/2/pi, 1e-9*omega/2/pi))
print("Q = {:.2f}".format(Q_xy))
print("1/kappa = {:.2f}us".format(1e6*Q_xy/omega))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CPW
Step2: $\lambda/4$ readout resonators
Step3: Qubit parameters
Step4: Feedline with and without crossovers
Step5: Inductive Coupling
Step6: Purcell Filter
Step7: Loss from XY line
|
2,831
|
<ASSISTANT_TASK:>
Python Code:
from oedes.fvm import mesh1d
from oedes import progressbar, testing, init_notebook, models, context
init_notebook()
%matplotlib inline
import matplotlib.pylab as plt
import numpy as np
params = {'T': 2500.,
'electron.mu': 1e-6,
'electron.energy': 0.,
'electron.N0': 5e26,
'hole.mu': 1e-6,
'hole.energy': -5.,
'hole.N0': 5e26,
'electrode0.workfunction': 2.5,
'electrode0.voltage': 2.,
'electrode1.workfunction': 2.5,
'electrode1.voltage': 0.,
'cation.mu': 1e-6,
'anion.mu': 1e-6,
'npi': 2e43,
'epsilon_r': 3.
}
L = 350e-9
mesh = mesh1d(L=L, epsilon_r=3.4)
cinit = 1.25e25
model = models.BaseModel()
models.std.electronic_device(model, mesh, 'pn')
cation, anion, ic = models.std.add_ions(model, mesh, zc=1, za=-1)
model.setUp()
xinit=ic(cinit=1e24)
c=context(model,x=xinit)
c.transient(params,1,1e-10, reltol=1, abstol=1e15, relfail=20.)
o = c.output()
m = mesh
plt.plot(m.cells['center'] / L - 0.5, o['cation.c'], '.-', label='cations')
plt.plot(m.cells['center'] / L - 0.5, o['anion.c'], '.-', label='anions')
testing.store(o['cation.c'], rtol=1e-6)
testing.store(o['anion.c'], rtol=1e-6)
plt.yscale('log')
plt.legend(loc=0, frameon=False)
plt.ylabel('carrier density [$m^{-3}$]')
plt.xlabel('distance [reduced units]')
plt.xlim([-0.5, 0.5])
plt.ylim([1e23, 1e27])
testing.store(o['E'], rtol=1e-6)
plt.plot(m.faces['center'] / L - 0.5, o['E'], '.-')
plt.yscale('log')
plt.ylim([1e4, 1e10])
plt.xlim([-0.5, 0.5])
plt.xlabel('distance [reduced units]')
plt.ylabel('electric field [$Vm^{-1}$]')
plt.plot(m.cells['center'] / L - 0.5, o['hole.c'], '.-', label='holes')
plt.plot(m.cells['center'] / L - 0.5, o['electron.c'], '.-', label='electrons')
plt.plot(
m.cells['center'] /
L -
0.5,
o['R'] *
0.5e-7,
'.-',
label='recombination zone')
testing.store(o['hole.c'], rtol=1e-6)
testing.store(o['electron.c'], rtol=1e-6)
testing.store(o['R'], rtol=1e-6)
plt.xlabel('distance [reduced units]')
plt.ylabel('carrier density [$m^{-3}$]')
plt.xlim([-0.5, 0.5])
plt.legend(loc=0, frameon=False)
plt.plot(m.cells['center'] / L - 0.5, o['hole.c'], '.-', label='holes')
plt.xlabel('distance [reduced units]')
plt.ylabel('carrier density [$m^{-3}$]')
plt.xlim([-0.505, -0.4])
plt.legend(loc=0, frameon=False)
testing.store(o['hole.jdrift'], rtol=1e-6)
testing.store(o['hole.jdiff'], rtol=1e-6)
testing.store(o['electron.jdrift'], rtol=1e-6)
testing.store(o['electron.jdiff'], rtol=1e-6)
plt.plot(
m.faces['center'] /
L -
0.5,
o['hole.jdrift'] /
np.amax(
o['hole.jdrift']),
'.-',
label='$j^p_{drift}$')
plt.plot(
m.faces['center'] /
L -
0.5,
o['hole.jdiff'] /
np.amax(
o['hole.jdrift']),
'.-',
label='$j^p_{diff}$')
plt.xlim([-0.6, 0.1])
plt.legend(loc=0, frameon=False)
plt.xlabel('distance [reduced units]')
plt.ylabel('normalized current density')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model and parameters
Step2: Results
Step3: Distribution of the electric field
Step4: Distribution of electrons and holes
Step5: Distribution of holes near to contact
Step6: Comparison between drift and diffusion parts of the current
|
2,832
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import climlab
from climlab import constants as const
import cartopy.crs as ccrs # use cartopy to make some maps
ncep_url = "http://psl.noaa.gov/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_Ts = xr.open_dataset(ncep_url + "surface_gauss/skt.sfc.mon.1981-2010.ltm.nc", decode_times=False)
lat_ncep = ncep_Ts.lat; lon_ncep = ncep_Ts.lon
Ts_ncep = ncep_Ts.skt
print( Ts_ncep.shape)
maxTs = Ts_ncep.max(dim='time')
minTs = Ts_ncep.min(dim='time')
meanTs = Ts_ncep.mean(dim='time')
fig = plt.figure( figsize=(16,6) )
ax1 = fig.add_subplot(1,2,1, projection=ccrs.Robinson())
cax1 = ax1.pcolormesh(lon_ncep, lat_ncep, meanTs, cmap=plt.cm.seismic , transform=ccrs.PlateCarree())
cbar1 = plt.colorbar(cax1)
ax1.set_title('Annual mean surface temperature ($^\circ$C)', fontsize=14 )
ax2 = fig.add_subplot(1,2,2, projection=ccrs.Robinson())
cax2 = ax2.pcolormesh(lon_ncep, lat_ncep, maxTs - minTs, transform=ccrs.PlateCarree() )
cbar2 = plt.colorbar(cax2)
ax2.set_title('Seasonal temperature range ($^\circ$C)', fontsize=14)
for ax in [ax1,ax2]:
#ax.contour( lon_cesm, lat_cesm, topo.variables['LANDFRAC'][:], [0.5], colors='k');
#ax.set_xlabel('Longitude', fontsize=14 ); ax.set_ylabel('Latitude', fontsize=14 )
ax.coastlines()
Tmax = 65; Tmin = -Tmax; delT = 10
clevels = np.arange(Tmin,Tmax+delT,delT)
fig_zonobs, ax = plt.subplots( figsize=(10,6) )
cax = ax.contourf(np.arange(12)+0.5, lat_ncep,
Ts_ncep.mean(dim='lon').transpose(), levels=clevels,
cmap=plt.cm.seismic, vmin=Tmin, vmax=Tmax)
ax.set_xlabel('Month', fontsize=16)
ax.set_ylabel('Latitude', fontsize=16 )
cbar = plt.colorbar(cax)
ax.set_title('Zonal mean surface temperature (degC)', fontsize=20)
model1 = climlab.EBM_seasonal()
model1.integrate_years(1, verbose=True)
water_depths = np.array([2., 10., 50.])
num_depths = water_depths.size
Tann = np.empty( [model1.lat.size, num_depths] )
models = []
for n in range(num_depths):
models.append(climlab.EBM_seasonal(water_depth=water_depths[n]))
models[n].integrate_years(20., verbose=False )
models[n].integrate_years(1., verbose=False)
Tann[:,n] = np.squeeze(models[n].timeave['Ts'])
lat = model1.lat
plt.plot(lat, Tann)
plt.xlim(-90,90)
plt.xlabel('Latitude')
plt.ylabel('Temperature (degC)')
plt.title('Annual mean temperature in the EBM')
plt.legend( water_depths.astype(str) )
plt.show()
num_steps_per_year = int(model1.time['num_steps_per_year'])
Tyear = np.empty((lat.size, num_steps_per_year, num_depths))
for n in range(num_depths):
for m in range(num_steps_per_year):
models[n].step_forward()
Tyear[:,m,n] = np.squeeze(models[n].Ts)
fig = plt.figure( figsize=(16,10) )
ax = fig.add_subplot(2,num_depths,2)
cax = ax.contourf(np.arange(12)+0.5, lat_ncep,
Ts_ncep.mean(dim='lon').transpose(),
levels=clevels, cmap=plt.cm.seismic,
vmin=Tmin, vmax=Tmax)
ax.set_xlabel('Month')
ax.set_ylabel('Latitude')
cbar = plt.colorbar(cax)
ax.set_title('Zonal mean surface temperature - observed (degC)', fontsize=20)
for n in range(num_depths):
ax = fig.add_subplot(2,num_depths,num_depths+n+1)
cax = ax.contourf(4*np.arange(num_steps_per_year),
lat, Tyear[:,:,n], levels=clevels,
cmap=plt.cm.seismic, vmin=Tmin, vmax=Tmax)
cbar1 = plt.colorbar(cax)
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Days of year', fontsize=14 )
ax.set_ylabel('Latitude', fontsize=14 )
def initial_figure(models):
fig, axes = plt.subplots(1,len(models), figsize=(15,4))
lines = []
for n in range(len(models)):
ax = axes[n]
c1 = 'b'
Tsline = ax.plot(lat, models[n].Ts, c1)[0]
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Latitude', fontsize=14 )
if n == 0:
ax.set_ylabel('Temperature', fontsize=14, color=c1 )
ax.set_xlim([-90,90])
ax.set_ylim([-60,60])
for tl in ax.get_yticklabels():
tl.set_color(c1)
ax.grid()
c2 = 'r'
ax2 = ax.twinx()
Qline = ax2.plot(lat, models[n].insolation, c2)[0]
if n == 2:
ax2.set_ylabel('Insolation (W m$^{-2}$)', color=c2, fontsize=14)
for tl in ax2.get_yticklabels():
tl.set_color(c2)
ax2.set_xlim([-90,90])
ax2.set_ylim([0,600])
lines.append([Tsline, Qline])
return fig, axes, lines
def animate(step, models, lines):
for n, ebm in enumerate(models):
ebm.step_forward()
# The rest of this is just updating the plot
lines[n][0].set_ydata(ebm.Ts)
lines[n][1].set_ydata(ebm.insolation)
return lines
# Plot initial data
fig, axes, lines = initial_figure(models)
# Some imports needed to make and display animations
from IPython.display import HTML
from matplotlib import animation
num_steps = int(models[0].time['num_steps_per_year'])
ani = animation.FuncAnimation(fig, animate,
frames=num_steps,
interval=80,
fargs=(models, lines),
)
HTML(ani.to_html5_video())
orb_highobl = {'ecc':0., 'obliquity':90., 'long_peri':0.}
print(orb_highobl)
model_highobl = climlab.EBM_seasonal(orb=orb_highobl)
print(model_highobl.param['orb'])
Tann_highobl = np.empty( [lat.size, num_depths] )
models_highobl = []
for n in range(num_depths):
models_highobl.append(climlab.EBM_seasonal(water_depth=water_depths[n], orb=orb_highobl))
models_highobl[n].integrate_years(40., verbose=False )
models_highobl[n].integrate_years(1.)
Tann_highobl[:,n] = np.squeeze(models_highobl[n].timeave['Ts'])
Tyear_highobl = np.empty([lat.size, num_steps_per_year, num_depths])
for n in range(num_depths):
for m in range(num_steps_per_year):
models_highobl[n].step_forward()
Tyear_highobl[:,m,n] = np.squeeze(models_highobl[n].Ts)
fig = plt.figure( figsize=(16,5) )
Tmax_highobl = 125; Tmin_highobl = -Tmax_highobl; delT_highobl = 10
clevels_highobl = np.arange(Tmin_highobl, Tmax_highobl+delT_highobl, delT_highobl)
for n in range(num_depths):
ax = fig.add_subplot(1,num_depths,n+1)
cax = ax.contourf( 4*np.arange(num_steps_per_year), lat, Tyear_highobl[:,:,n],
levels=clevels_highobl, cmap=plt.cm.seismic, vmin=Tmin_highobl, vmax=Tmax_highobl )
cbar1 = plt.colorbar(cax)
ax.set_title('water depth = %.0f m' %models[n].param['water_depth'], fontsize=20 )
ax.set_xlabel('Days of year', fontsize=14 )
ax.set_ylabel('Latitude', fontsize=14 )
lat2 = np.linspace(-90, 90, 181)
days = np.linspace(1.,50.)/50 * const.days_per_year
Q_present = climlab.solar.insolation.daily_insolation( lat2, days )
Q_highobl = climlab.solar.insolation.daily_insolation( lat2, days, orb_highobl )
Q_present_ann = np.mean( Q_present, axis=1 )
Q_highobl_ann = np.mean( Q_highobl, axis=1 )
fig, ax = plt.subplots()
ax.plot( lat2, Q_present_ann, label='Earth' )
ax.plot( lat2, Q_highobl_ann, label='90deg obliquity' )
ax.grid()
ax.legend(loc='lower center')
ax.set_xlabel('Latitude', fontsize=14 )
ax.set_ylabel('W m$^{-2}$', fontsize=14 )
ax.set_title('Annual mean insolation for two different obliquities', fontsize=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make two maps
Step2: Make a contour plot of the zonal mean temperature as a function of time of year
Step3: Exploring the amplitude of the seasonal cycle with an EBM
Step4: All models should have the same annual mean temperature
Step5: There is no automatic function in climlab.EBM to keep track of minimum and maximum temperatures (though we might add that in the future!)
Step6: Make a figure to compare the observed zonal mean seasonal temperature cycle to what we get from the EBM with different heat capacities
Step7: Which one looks more realistic? Depends a bit on where you look. But overall, the observed seasonal cycle matches the 10 meter case best. The effective heat capacity governing the seasonal cycle of the zonal mean temperature is closer to 10 meters of water than to either 2 or 50 meters.
Step8: The seasonal cycle for a planet with 90º obliquity
Step9: Repeat the same procedure to calculate and store temperature throughout one year, after letting the models run out to equilibrium.
Step10: And plot the seasonal temperature cycle same as we did above
Step11: Note that the temperature range is much larger than for the Earth-like case above (but same contour interval, 10 degC).
|
2,833
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
from synset2vec import Synset2Vec
from im2vec import Image2Vec
from tagger import ZeroshotTagger
i2v = Image2Vec()
s2v = Synset2Vec()
tagger = ZeroshotTagger()
labels = map(str.strip, open('../data/synsets_ilsvrc12_test1k.txt').readlines())
from PIL import Image
import os
import numpy as np
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
#model = models.alexnet(pretrained=True)
model = models.resnet18(pretrained=True)
#model = models.resnet50(pretrained=True)
model.eval() # set it to the test mode
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
img_fname = 'lemon_shark.jpg'
img_pil = Image.open(img_fname).convert('RGB')
img_pil
img_tensor = preprocess(img_pil)
img_tensor.unsqueeze_(0)
img_var = Variable(img_tensor)
result = model(img_var)
softmax_layer = nn.Softmax()
probs = softmax_layer(result)
probs = probs.data.cpu().numpy()
probs = probs[0]
sorted_inds = [i[0] for i in sorted(enumerate(-probs), key=lambda x:x[1])]
for i in range(5):
index = sorted_inds[i]
print('Probability %0.2f%% => %s [%s]' % (probs[index] * 100, labels[index], s2v.explain(labels[index])))
img_vec = i2v.embedding(probs)
res = tagger.predict(img_vec, topk=5)
print ([(label, s2v.explain(label), score) for (label,score) in res])
for i in range(5):
label,score = res[i]
print('Probability %0.2f%% => %s [%s]' % (score * 100, label, s2v.explain(label)))
rootpath = os.path.join(os.environ['HOME'], 'VisualSearch')
collection = 'imagenet2hop-random2k'
imset_fname = os.path.join(rootpath, collection, 'ImageSets', '%s.txt' % collection)
imset = map(str.strip, open(imset_fname).readlines())
print ('nr of test images: %d' % len(imset))
def predict_y0(model, img_fname):
img_pil = Image.open(img_fname).convert('RGB')
img_tensor = preprocess(img_pil)
img_tensor.unsqueeze_(0)
img_var = Variable(img_tensor)
result = model(img_var)
softmax_layer = nn.Softmax()
probs = softmax_layer(result)
probs = probs.data.cpu().numpy()
probs = probs[0]
return probs
import random
random.seed(42)
random.shuffle(imset)
Ns = [1, 2, 5, 10]
hitN = [0] * len(Ns)
nr_metrics = len(Ns)
for progress,imageid in enumerate(imset):
true_label = imageid.split('_',1)[0]
img_fname = os.path.join(rootpath, collection, 'ImageData', '%s.jpg' % imageid)
score_y0 = predict_y0(model, img_fname)
img_vec = i2v.embedding(score_y0)
res = tagger.predict(img_vec, topk=10)
for i in range(nr_metrics):
top = Ns[i]
pred_set = set([x[0] for x in res[:top]])
hitN[i] += (true_label in pred_set)
if (progress+1)%100 == 0:
print (progress+1), 'images tagged ->', ' '.join(['%.1f' % (100.0*float(x)/(progress+1)) for x in hitN])
hitN = [float(x)/len(imset) for x in hitN]
print ' '.join(['hit%d' % x for x in Ns])
print ' '.join(['%.1f' % (x*100) for x in hitN])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the label set $Y_0$
Step2: 2. Load a pretrained CNN model
Step3: 3. Define a preprocess function for input images
Step4: 4. Perform zero-shot tagging for a single image
Step5: Now, predict $Y_0$ using the pretrained model.
Step6: Predict $Y_1$ using hierse
Step7: 5. Perform zero-shot tagging for a set of images
|
2,834
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import quandl
mydata = quandl.get("EIA/PET_RWTC_D")
mydata.head()
mydata.plot(figsize = (12, 6))
mydata = quandl.get("EIA/PET_RWTC_D",
returns = "numpy")
mydata = quandl.get("FRED/GDP",
start_date = "2001-12-31",
end_date = "2005-12-31")
mydata.head()
mydata = quandl.get(["NSE/OIL.1", "WIKI/AAPL.4"])
mydata.head()
# EXAMPLE
quandl.ApiConfig.api_key = "2qM_u-g8oxTV6JbhUWLn"
mydata = quandl.get("FRED/GDP")
# FOR STOCKS
mydata = quandl.get('WIKI/FB',
start_date = '2015-01-01',
end_date = '2017-01-01')
mydata.head()
mydata = quandl.get('WIKI/FB.1',
start_date = '2015-01-01',
end_date = '2017-01-01')
mydata.head()
mydata = quandl.get('WIKI/FB.7',
start_date = '2015-01-01',
end_date = '2017-01-01')
mydata.head()
houses = quandl.get('ZILLOW/M11_ZRIAH')
houses.head()
houses.plot(figsize = (12, 6))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a Basic Data Call
Step2: Note that you need to know the "Quandl code" of each dataset you download. In the above example, it is "EIA/PET_RWTC_D".
Step3: Specifying Data
Step4: Usage Limits
Step5: Database Codes
Step6: Housing Price Example
|
2,835
|
<ASSISTANT_TASK:>
Python Code:
# RUN THIS BLOCK FIRST TO SET UP VARIABLES!
a = True
b = False
x = 2
y = -2
cat = "Mittens"
print a
print (not a)
print (a == b)
print (a != b)
print (x == y)
print (x > y)
print (x = 2)
print (a and b)
print (a and not b)
print (a or b)
print (not b or a)
print not (b or a)
print (not b and a)
print not (b and a)
print (x == abs(y))
print len(cat)
print cat + x
print cat + str(x)
print float(x)
print ("i" in cat)
print ("g" in cat)
print ("Mit" in cat)
# RUN THIS BLOCK FIRST TO SET UP VARIABLES!
x = 2
y = -2
cat = "Mittens"
if (x % 2) == 0:
print "x is even"
else:
print "x is odd"
if (y - 4*x) < 0:
print "Invalid!"
else:
print "Banana"
if "Mit" in cat:
print "Hey Mits!"
else:
print "Where's Mits?"
x = "C"
if x == "A" or "B":
print "yes"
else:
print "no"
x = "C"
if (x == "A") or (x == "B"):
print "yes"
else:
print "no"
# RUN THIS BLOCK FIRST!
num1 = 3.14159
num2 = 6
num3 = 100
num4 = 6 * -5 - 4 * 2 + -7 * -8 + 3
motif = "GTAAGTCGGTAACGTAAGTCGGTAAC"
if num3 > num4:
print "Yes"
else:
print "No"
import random
random.randint(num2, num3)
if len(motif) <= 25:
print "Yes"
else:
print "No"
round(num1, 2)
if num4 >= 0:
print "It's positive"
else:
print "It's negative"
a = float(raw_input("Enter value for a: "))
b = float(raw_input("Enter value for b: "))
c = float(raw_input("Enter value for c: "))
underRoot = (b**2 - 4*a*c)
if underRoot >= 0:
x1 = ( (-b) + underRoot ** 0.5 ) / float(2*a)
x2 = ( (-b) - underRoot ** 0.5 ) / float(2*a)
print "x =", x1, "or", x2
else:
print "non-real answer"
dnaSeq = raw_input("Enter a DNA sequence: ")
motif = raw_input("Enter a motif to search for: ")
if len(motif) > len(dnaSeq):
print "Motif is too long."
dnaSeq = raw_input("Enter a DNA sequence: ")
motif = raw_input("Enter a motif to search for: ")
if len(motif) > len(dnaSeq):
print "Motif is too long."
else:
if motif in dnaSeq:
print "Motif found."
else:
print "Motif not found."
password = "Mittens123"
guess = raw_input("Enter the password: ")
if guess == password:
print "Correct!"
else:
print "Incorrect password. Access denied."
password = "Mittens123"
guess = raw_input("Enter the password: ")
if guess == password:
print "Correct!"
else:
guess = raw_input("Incorrect password. Try again: ")
if guess == password:
print "Correct!"
else:
guess = raw_input("Incorrect password. Try again: ")
if guess == password:
print "Correct!"
else:
print "Incorrect password. Access denied."
import random
randNum = random.randint(0,1)
print randNum
import random
headsCount = 0
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
if random.randint(0,1) == 1:
headsCount = headsCount + 1
print headsCount, "of 10 flips were heads"
import random
headsCount = 0
# random.randint(0,3) gives a 0, 1, 2, or 3
# if it's 0, 1, or 2, call it heads
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
if random.randint(0,3) < 3:
headsCount = headsCount + 1
print headsCount, "of 10 flips were heads"
num = int(raw_input("Enter a number: "))
if (num >= 50) and (num <= 100):
print "Your number is between 50 and 100"
else:
print "Your number is not between 50 and 100"
num = int(raw_input("Enter a number: "))
if (num % 7) == 0:
print "Your number is a multiple of 7"
else:
print "Your number is not a multiple of 7"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (^^ you might find it helpful to copy these variables somewhere you can easily see them when doing the problems. e.g. a piece of paper or a text file.)
Step2: A single = means assignment, and you can't do assignment within a print statement (assignments are always done on their own line). What we want here is
Step3: What is "in"? This is just another Python operator that you can use in your conditionals. As you may have guessed, (x in y) evaluates to True if x is found in y. This can be used to check if a string is contained in another string (e.g. "Mit" in "Mittens").
Step4: See below for explanation!
Step5: Surprised by the last two? It's important to note that when you want compare a variable against multiple things, you can only compare it to one thing at a time. Although it makes sense in English to say "is x equal to A or B?", in Python you must write
Step6: (A) Check if num3 is greater than num4. Print "yes" if it is, and "no" if it isn't.
Step7: (B) Print a random integer between num2 and num3.
Step8: (C) Check if the length of the string stored in motif is less than or equal to 25. Print "yes" if it is, and "no" if it isn't.
Step9: (D) Round num1 to the nearest hundredth.
Step10: (E) Check if num4 is positive or negative.
Step11: 4. Quadratic formula
Step12: [ Check your answer ]
Step13: (B) Adding to your code from part (A)
Step14: [ Check your answer ] Try running your code above using the following input and make sure your results match the expected output.
Step15: (B) Do this giving the user 3 chances to guess. Only prompt for another guess if the previous guess was incorrect. When they guess correctly, print a confirmation message and end the program. If they don't guess correctly in 3 tries, print "Access denied!" and end the program.
Step16: 7. Coin flip simulation (2pts)
Step17: You should see that you always get either 0 or 1. We will pretend that each time we run random.randint(0,1) we are flipping a coin, where 1 represents heads and 0 represents tails.
Step18: Extra problems (0pt)
Step19: (B) Using raw_input(), prompt the user for a number. Check if that number is between 50 and 100, and print different messages depending on the outcome.
Step20: (C) Using raw_input(), prompt the user for an integer. Check if the integer is a multiple of 7, and print different messages depending on the outcome.
|
2,836
|
<ASSISTANT_TASK:>
Python Code:
import math as m
def CountPairs(n ) :
cnt = 0
i = 1
while i * i <= n :
if(n % i == 0 ) :
div1 = i
div2 = n // i
sum = div1 + div2 ;
if(m . gcd(sum , n ) == 1 ) :
cnt += 1
i += 1
return cnt
n = 24
print(CountPairs(n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,837
|
<ASSISTANT_TASK:>
Python Code:
row = 'NO PARKING (SANITATION BROOM SYMBOL) 7AM-7:30AM EXCEPT SUNDAY'
assert from_time(row) == '07:00AM'
assert to_time(row) == '07:30AM'
special_case1 = 'NO PARKING (SANITATION BROOM SYMBOL) 11:30AM TO 1PM THURS'
assert from_time(special_case1) == '11:30AM'
assert to_time(special_case1) == '01:00PM'
special_case2 = 'NO PARKING (SANITATION BROOM SYMBOL) MOON & STARS (SYMBOLS) TUESDAY FRIDAY MIDNIGHT-3AM'
assert from_time(special_case2) == '12:00AM'
assert to_time(special_case2) == '03:00AM'
special_case3 = 'TRUCK (SYMBOL) TRUCK LOADING ONLY MONDAY-FRIDAY NOON-2PM'
assert from_time(special_case3) == '12:00PM'
assert to_time(special_case3) == '02:00PM'
special_case4 = 'NIGHT REGULATION (MOON & STARS SYMBOLS) NO PARKING (SANITATION BROOM SYMBOL) MIDNIGHT TO-3AM WED & SAT'
assert from_time(special_case4) == '12:00AM'
assert to_time(special_case4) == '03:00AM'
special_case5 = 'NO PARKING (SANITATION BROOM SYMBOL)8AM 11AM TUES & THURS'
assert from_time(special_case5) == '08:00AM'
assert to_time(special_case5) == '11:00AM'
special_case6 = 'NO PARKING (SANITATION BROOM SYMBOL) MONDAY THURSDAY 7AMM-7:30AM'
assert from_time(special_case6) == '07:00AM'
assert to_time(special_case6) == '07:30AM'
def filter_from_time(row):
if not pd.isnull(row['SIGNDESC1']):
return from_time(row['SIGNDESC1'])
return np.nan
def filter_to_time(row):
if not pd.isnull(row['SIGNDESC1']):
return to_time(row['SIGNDESC1'])
return np.nan
data['FROM_TIME'] = data.apply(filter_from_time, axis=1)
data['TO_TIME'] = data.apply(filter_to_time, axis=1)
data[['SIGNDESC1', 'FROM_TIME', 'TO_TIME']].head(10)
rows_with_AM_PM_but_time_NaN = data[(data['FROM_TIME'].isnull() | data['FROM_TIME'].isnull()) & (data['SIGNDESC1'].str.contains('[0-9]+(?:[AP]M)'))]
len(rows_with_AM_PM_but_time_NaN)
rows_with_AM_PM_but_time_NaN[['SIGNDESC1', 'FROM_TIME', 'TO_TIME']]
data.iloc[180670, data.columns.get_loc('SIGNDESC1')]
data.iloc[180670, data.columns.get_loc('FROM_TIME')] = '9AM'
data.iloc[180670, data.columns.get_loc('TO_TIME')] = '4AM'
data.iloc[212089, data.columns.get_loc('SIGNDESC1')]
data.iloc[212089, data.columns.get_loc('FROM_TIME')] = '10AM'
data.iloc[212089, data.columns.get_loc('TO_TIME')] = '11:30AM'
data.iloc[258938, data.columns.get_loc('SIGNDESC1')]
data.iloc[258938, data.columns.get_loc('FROM_TIME')] = '10AM'
data.iloc[258938, data.columns.get_loc('TO_TIME')] = '11:30AM'
data.iloc[258942, data.columns.get_loc('SIGNDESC1')]
data.iloc[258942, data.columns.get_loc('FROM_TIME')] = '10AM'
data.iloc[258942, data.columns.get_loc('TO_TIME')] = '11:30AM'
data.iloc[258944, data.columns.get_loc('SIGNDESC1')]
data.iloc[258944, data.columns.get_loc('FROM_TIME')] = '10AM'
data.iloc[258944, data.columns.get_loc('TO_TIME')] = '11:30AM'
data.iloc[283262, data.columns.get_loc('SIGNDESC1')]
data.iloc[283262, data.columns.get_loc('FROM_TIME')] = '6AM'
data.iloc[283262, data.columns.get_loc('TO_TIME')] = '7:30AM'
rows_with_AM_PM_but_time_NaN = data[(data['FROM_TIME'].isnull() | data['FROM_TIME'].isnull()) & (data['SIGNDESC1'].str.contains('[0-9]+(?:[AP]M)'))]
len(rows_with_AM_PM_but_time_NaN)
data[['SIGNDESC1', 'FROM_TIME', 'TO_TIME']]
data['SIGNDESC1'].head(20)
#https://regex101.com/r/fO4zL8/3
regex_to_extract_days_idv_days = r'\b((?:(?:MON|MONDAY|TUES|TUESDAY|WED|WEDNESDAY|THURS|THURSDAY|FRI|FRIDAY|SAT|SATURDAY|SUN|SUNDAY)\s*)+)(?=\s|$)'
regex_to_extract_days_with_range = r'(MON|TUES|WED|THURS|FRI|SAT|SUN)\s(THRU|\&)\s(MON|TUES|WED|THURS|FRI|SAT|SUN)'
def extract_day(signdesc):
days = ['MON', 'TUES', 'WED', 'THURS', 'FRI', 'SAT', 'SUN']
p_idv_days = re.compile(regex_to_extract_days_idv_days)
m_idv_days = p_idv_days.search(signdesc)
p_range_days = re.compile(regex_to_extract_days_with_range)
m_range_days = p_range_days.search(signdesc)
if 'EXCEPT SUN' in signdesc:
return ', '.join(days[:6])
if 'INCLUDING SUNDAY' in signdesc:
return ', '.join(days)
if 'FRIW/' in signdesc:
return ', '.join(['FRI'])
if ('THRU' in signdesc) and m_range_days:
from_day = m_range_days.group(1)
to_day = m_range_days.group(3)
idx_frm_d = days.index(from_day)
idx_to_d = days.index(to_day)
return ', '.join([days[n] for n in range(idx_frm_d, idx_to_d + 1)])
if ('&' in signdesc) and m_range_days:
from_day = m_range_days.group(1)
to_day = m_range_days.group(3)
return ', '.join([from_day, to_day])
if m_idv_days:
days = m_idv_days.group(1)
d = []
for day in days.split(' '):
if len(day) > 3:
if day in ['MONDAY', 'WEDNESDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY']:
d.append(day[:3])
if day in ['TUESDAY']:
d.append(day[:4])
if day in ['THURSDAY']:
d.append(day[:5])
else:
d.append(day)
return ', '.join(d)
return np.nan
def filter_days(row):
if not pd.isnull(row['SIGNDESC1']):
return extract_day(row['SIGNDESC1'])
return np.nan
assert extract_day('NO STANDING 11AM-7AM MON SAT') == "MON, SAT"
assert extract_day('NO STANDING MON FRI 7AM-9AM') == "MON, FRI"
assert extract_day('2 HOUR PARKING 9AM-5PM MON THRU SAT') == "MON, TUES, WED, THURS, FRI, SAT"
assert extract_day('1 HOUR PARKING 8AM-7PM EXCEPT SUNDAY') == "MON, TUES, WED, THURS, FRI, SAT"
assert extract_day('NO PARKING 10PM-8AM INCLUDING SUNDAY') == "MON, TUES, WED, THURS, FRI, SAT, SUN"
assert extract_day('NO PARKING (SANITATION BROOM SYMBOL) MONDAY THURSDAY 9:30AM-11AM') == "MON, THURS"
assert extract_day('NO PARKING (SANITATION BROOM SYMBOL) 11:30AM TO 1 PM FRIW/ SINGLE ARROW') == "FRI"
assert extract_day('NO PARKING (SANITATION BROOM SYMBOL) 8-9:30AM TUES & FRI') == "TUES, FRI"
assert extract_day('NO PARKING (SANITATION BROOM SYMBOL) TUESDAY FRIDAY 11AM-12:30PM') == "TUES, FRI"
data['DAYS'] = data.apply(filter_days, axis=1)
rows_with_days_but_DAYS_NAN = data[data['DAYS'].isnull() & data['SIGNDESC1'].str.contains('\sMON|\sTUES|\sWED|\sTHURS|\sFRI|\sSAT|\sSUN')]
rows_with_days_but_DAYS_NAN[['SIGNDESC1', 'DAYS']]
data.iloc[308838, data.columns.get_loc('SIGNDESC1')]
data.head()
data.to_csv('Processed_Signs.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find out if any rows has NaN
Step2: Confirm that every row has from_time and to_time
Step3: Day of the week
Step4: Save to CSV
|
2,838
|
<ASSISTANT_TASK:>
Python Code:
!python -V
#!pip3 install torch torchvision
import torch
print("PyTorch version: ")
torch.__version__
print("Device Name: ")
torch.cuda.get_device_name(0)
print("CUDA Version: ")
print(torch.version.cuda)
print("cuDNN version is: ")
print(torch.backends.cudnn.version())
# NVIDIA profiling tool for the available GPU
!nvidia-smi
!cat /proc/cpuinfo
!cat /proc/meminfo
c = torch.tensor([[1,2],[1,2]])
d = torch.tensor([[[1,2],[3,4]],[[5,6],[7,8]]])
a = torch.tensor([0,1,2,3,4])
print("Type of entire array:")
type(a)
print("Datatype of tensor:")
a.type()
print(a.dtype)
print(a.device)
print(a.layout)
b = torch.FloatTensor([0,1,2,3,4])
print("Type of b: ")
b.type()
a = a.type(torch.FloatTensor)
print("Type of a: ")
a.type()
device = torch.device('cuda:0')
device
import numpy as np
datanp = np.array([1,2,3])
type(datanp)
o1 = torch.Tensor(datanp)
o2 = torch.tensor(datanp)
o3 = torch.as_tensor(datanp)
o4 = torch.from_numpy(datanp)
print(o1)
print(o2)
print(o3)
print(o4)
print(o1.dtype)
print(o2.dtype)
print(o3.dtype)
print(o4.dtype)
print(torch.tensor(datanp, dtype=torch.float32))
print(torch.as_tensor(datanp, dtype=torch.float32))
print('old:', datanp)
datanp[0] = 0
print('new:', datanp)
print(o1)
print(o2)
print(o3)
print(o4)
s1d = torch.tensor([1,9,1,1])
s2d = torch.tensor([[1,9,1,1], [4,5,6,7]])
#s3d = torch.tensor([ [[1,9,1,1],[4,5,6,7]] , [[2,10,2,2],[8,9,10,11]] ])
s3d = torch.tensor([
[
[1,9,1,1],
[4,5,6,7]
],
[
[2,10,2,2],
[8,9,10,11]
]
])
s4d = torch.tensor([[[[4,1]]]])
print("The dimension of tensors s1d, s2d, s3d,s4d are:")
print(s1d.ndimension(),s2d.ndimension(),s3d.ndimension(),s4d.ndimension())
dd = torch.tensor([
[1,2,3],
[4,5,6],
[7,8,9]])
#Each element along the first axis, is an array:
print(dd[0])
print(dd[1])
print(dd[2])
#Each element along the second axis, is a number:
print(dd[0][0])
print(dd[1][0])
print(dd[2][0])
print(dd[0][1])
print(dd[1][1])
print(dd[2][1])
print(dd[0][2])
print(dd[1][2])
print(dd[2][2])
s1d = torch.tensor([1,9,1,1])
s2d = torch.tensor([[1,9,1,1], [4,5,6,7]])
#s3d = torch.tensor([ [[1,9,1,1],[4,5,6,7]] , [[2,10,2,2],[8,9,10,11]] ])
s3d = torch.tensor([
[
[1,9,1,1],
[4,5,6,7]
],
[
[2,10,2,2],
[8,9,10,11]
]
])
s4d = torch.tensor([[[[4,1]]]])
print("The size tensor s3d i.e, interpreted as two sets of [2,4] tensors:")
s3d.size()
print("The shape of tensor s3d is:")
s3d.shape
import torch
import numpy as np
import pandas as pd
nparray = np.array([0.0,1.0,2.0,3.0,4.0])
print("\n Original Array in numpy: ",nparray)
torcharray = torch.from_numpy(nparray)
print("\n Torch Array from numpy: ",torcharray)
back_to_nparray = torcharray.numpy()
print("\n Numpy array converted back from torch: ",back_to_nparray)
pdarray = pd.Series([19.0,0.1,5.8,2.3])
print("\n Original Array in Pandas: \n",pdarray)
ptorcharray = torch.from_numpy(pdarray.values) #pdarray.values returns a numpy array
print("\n Torch Array from numpy: ",ptorcharray)
#!pip3 install torch torchvision
import torch
a = torch.tensor([[9,8,7],[6,5,4],[3,2,1]])
#[ [a[0][0],a[0][1],a[0][2]], [a[1][0],a[1][1],a[1][2]],[a[2][0],a[2][1],a[2][2]] ]
print("\nGiven array: \n",a)
print("\nIndexing: ")
print(a[0,0]) # Single bracket notation for accessing individual elements
print(a[0][0]) # Double bracket notation for accessing individual elements
print(a[0])
print(a[1])
print("\nSlicing: ")
slice1 = a[0:3,0:2]
print(slice1)
print("\n")
slice1[0][0]=0
print(slice1)
print("\nMixing Indexing and Slicing:")
print(a[0,0:2])
#!pip3 install torch torchvision
import torch
fancy = torch.tensor([78, 55, 82, 93])
print(fancy[0],fancy[1],fancy[2]) # Suppose we want to access three different elements. We could do it like this.
print([fancy[0],fancy[1],fancy[2]]) # Alternatively, we can pass a single list or array of indices to obtain the same
fancy_index = [0,1,2]
print(fancy[fancy_index]) # When using fancy indexing, the shape of the result reflects the shape of the index arrays rather than the shape of the array being indexed
print(fancy>78)
print(fancy[fancy>78])
print(fancy.gt(78))
r = torch.tensor([[0.0,1.0,2.0,3.0],
[4.0,5.0,6.0,7.0],
[8.0,9.0,10.0,11.0]])
print(r)
print("\nCombine fancy indexing and simple indices :", r[2, [2, 0, 1]])
print("\nCombine fancy indexing with slicing: ", r[1:, [2, 0, 1]])
#print("\nCombine fancy indexing with masking: ",)
t = torch.tensor([
[1,1,1,1],
[2,2,2,2],
[3,3,3,3]
], dtype=torch.float32)
print(t.size())
print(t.shape) #shape is an alias for size()
print(len(t.shape))
print(torch.tensor(t.shape).prod())
print(t.numel()) #In PyTorch, there is a dedicated function for determining the number of elements inside a tensor
print(t.reshape([1,12]))
print(t.reshape([2,6]))
print(t.reshape([3,4]))
print(t.reshape([4,3]))
print(t.reshape(6,2))
print(t.reshape(12,1))
reshapea = t.reshape([1,12])
print(reshapea)
print("\n")
reshapea[0][0]=0
print(reshapea)
print(t)
#!pip3 install torch torchvision
import torch
v = torch.Tensor([9,8,7,6,5,4])
v_col= v.view(6,1)
print("\nView 6x1: \n",v_col)
v_col1= v.view(3,-1)
print("\nView 3x2: \n",v_col1)
print(v)
v_col1[0][0]=0
print(v_col1)
print(t.reshape([1,12]))
print(t.reshape([1,12]).shape)
print(t.reshape([1,12]).squeeze())
print(t.reshape([1,12]).squeeze().shape)
print(t.reshape([1,12]).squeeze().unsqueeze(dim=0))
print(t.reshape([1,12]).squeeze().unsqueeze(dim=0).shape)
f = torch.ones(4, 3)
print(f)
print(torch.flatten(f)) #https://pytorch.org/docs/master/torch.html
print("\n")
f1 = torch.tensor([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
print("Original Array: \n",f1)
print("\n")
print(torch.flatten(f1))
print(torch.flatten(f1, start_dim=1))
t1 = torch.tensor([
[1,2],
[3,4]
])
t2 = torch.tensor([
[5,6],
[7,8]
])
# We can combine t1 and t2 row-wise (axis-0) in the following way:
print(torch.cat((t1, t2), dim=0))
# We can combine them column-wise (axis-1) like this:
print(torch.cat((t1, t2), dim=1))
print(torch.cat((t1, t2), dim=0).shape)
print(torch.cat((t1, t2), dim=1).shape)
import torch
a = torch.randn(1, 4)
b = torch.randn(1, 4)
print(a)
print(b)
print("\n")
abcat = torch.cat((a, b), 0)
print(abcat)
print(abcat.shape)
print("\n")
abstack = torch.stack((a, b),0) # The last element is the dimension to insert. Has to be between 0 and the number of dimensions of concatenated tensors (inclusive)
print(abstack)
print(abstack.shape)
tensor_one = torch.tensor([[1,2,3],[4,5,6]])
print(tensor_one.shape)
tensor_two = torch.tensor([[7,8,9],[10,11,12]])
print(tensor_two.shape)
tensor_tre = torch.tensor([[13,14,15],[16,17,18]])
print(tensor_tre.shape)
tensor_list = [tensor_one, tensor_two, tensor_tre]
stacked_tensor = torch.stack(tensor_list)
print(stacked_tensor.shape)
ele = torch.tensor([
[1,2],
[3,4]],dtype=torch.float32)
# (1) Using these symbolic operations:
print(ele + 2)
print(ele - 2)
print(ele * 2)
print(ele / 2)
print("\n")
# (2) these built-in tensor object methods:
print(ele.add(2))
print(ele.sub(2))
print(ele.mul(2))
print(ele.div(2))
t = torch.tensor([
[0,5,0],
[6,0,7],
[0,8,0]], dtype=torch.float32)
print(t.eq(0))
print(t.ge(0))
print(t.gt(0))
print(t.lt(0))
print(t.le(7))
print(t.abs())
print(t.sqrt())
print(t.neg())
print(t.neg().abs())
x = torch.ones((3,4))
y = torch.arange(4,dtype=torch.float32)
print(x)
print(y)
print("\n")
print(x-y)
red = torch.tensor([
[0,1,0],
[2,0,2],
[0,3,0]], dtype=torch.float32)
print(red.sum())
print("\nThe number of elements in original tensor",red.numel())
print("\nConcating Operations: ")
print(red.sum().numel())
print(red.sum().numel() < red.numel())
print(red.sum())
print(red.prod())
print(red.mean())
print(red.std())
ra = torch.tensor([
[1,1,1,1],
[2,2,2,2],
[3,3,3,3]], dtype=torch.float32)
print(ra.size()) # 3 x 4 rank-2 tensor
print(ra.sum(dim=0)) # Sum along the axis containing 3 elements i.e, column wise
print(ra.sum(dim=1)) # Sum along the axis containing 4 elements i.e, row wise
xa = torch.tensor([
[1,0,0,2],
[0,3,3,0],
[4,0,0,5]], dtype=torch.float32)
print(xa.max())
print(xa.argmax())
print(xa.flatten())
print(xa.max(dim=0))
print(xa.argmax(dim=0))
print(xa.max(dim=1))
print(xa.argmax(dim=1))
ta = torch.tensor([
[1,2,3],
[4,5,6],
[7,8,9]], dtype=torch.float32)
print(ta.mean())
print(ta.mean().item())
print(ta.mean(dim=0).tolist())
print(ta.mean(dim=0).numpy())
a = torch.randn(4)
print(a)
torch.clamp(a, min=0, max=1) # If less than zero make it zero, if greater than make it 1 and leave rest as is
a = torch.tensor([[2,4,3],[5,6,1]])
b = torch.tensor([[10,11,10],[10,2,8]])
torch.mul(a, b) # Each element of the tensor input is multiplied by each element of the Tensor other, must be broadcastable
torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) # This function does not broadcast.
mat1 = torch.randn(2, 3)
mat2 = torch.randn(3, 3)
print(torch.mm(mat1, mat2)) # matrix multiplication This function does not broadcast. For broadcasting matrix products, use torch.matmul().
print(mat1 @ mat2) # A @ B is the matrix product
mat1 = torch.randn(3, 3)
mat2 = torch.randn(3, 3)
print(mat1 * mat2) # A * B the element-wise product
import torch
t = torch.tensor([1., 2., 4.]).requires_grad_()
u = torch.tensor([10., 20.]).requires_grad_()
a = t.pow(2).sum() + u.log().sum()
torch.autograd.grad(a, (t, u))
#!pip3 install torch torchvision
import torch
x = torch.tensor(2.0,requires_grad=True) # We create a Torch tensor with a value of 2 and we set the parameter requires_grad equals
# The requires_grad=True stores all the operations associated with the variables
y = x**2 # We have function y in terms of x i.e, y(x)=x^2 and this function call constructs a graph
y.backward() # The backward method calculates the derivative of y w.r.t x when x=2 using chain rule
x.grad # The grad method shows the value of the derivative of y w.r.t x when x=2
x = torch.tensor(2.0, requires_grad=True) # Assign x=2.0
z = x**2 + 2*x+1 # Construct the graph
z.backward() # Use chain rule to calculate the gradient
print(x.grad) # Print the gradient
print(x)
#x = torch.tensor(2.0, requires_grad=True)
z = x**2 + 2*x+1 # Reconstruct the graph
z.backward() # Use chain rule to calculate the gradient
print(x.grad)
print(x)
#x = torch.tensor(2.0, requires_grad=True)
x.grad.zero_() # Zero the gradients before reconstructing the graph
z = x**2 + 2*x+1 # Reconstruct the graph
z.backward() # Use chain rule to calculate the gradient
print(x.grad)
print(x)
#!pip3 install torch torchvision
import torch
x = torch.tensor([ 1.0, -2.0, 3.0, -4.0 ]).requires_grad_()
a = x.abs()
s = a.sum()
print(s)
print(s.grad_fn.next_functions)
print(s.grad_fn.next_functions[0][0].next_functions)
#!pip3 install torch torchvision
import torch
# Requires_grad=True turns on differential mode
a=torch.tensor(([1.0]),requires_grad=True)
b=torch.tensor(([2.0]),requires_grad=True)
c=torch.tensor(([3.0]),requires_grad=True)
d=a+b
e=d+c
e.backward()
print(a.grad,b.grad,c.grad)
print(d.grad) # Intermediate gradient value is not saved, and is empty
print(a.grad_fn) # The first node's .grad_fn is empty
print(e.grad_fn)
#!pip3 install torch torchvision
import torch
x = torch.tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0], requires_grad=True)
z = (x**2) * y
print(z)
z.backward(torch.FloatTensor([1, 0, 0, 0])) # do backward for first element of z
#z.backward()
print("The derivative of z_1 w.r.t to x: ",x.grad.data)
print("The derivative of z_1 w.r.t to y: ",y.grad.data)
x = torch.tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0], requires_grad=True)
z = (x**2) * y
z.backward(torch.FloatTensor([0, 1, 0, 0])) # do backward for second element of z
print("The derivative of z_2 w.r.t to x:, ",x.grad.data)
print("The derivative of z_2 w.r.t to y: ",y.grad.data)
# do backward for all elements of z, equal to the collection of partial derivatives z_1, z_2, z_3 and z_4
x = torch.tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0], requires_grad=True)
z = (x**2) * y
z.backward(torch.FloatTensor([1, 1, 1, 1]))
print(x.grad.data)
print(y.grad.data)
x = torch.tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0], requires_grad=True)
z = (x**2) * y
z.backward(gradient=torch.ones(z.size()))
print(x.grad.data)
print(y.grad.data)
import torch
x = torch.tensor([1.0, 2.0, 3.0, 4.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0], requires_grad=True)
z = (x**2) * y
print(z)
print(z.shape)
z.backward(torch.FloatTensor([1, 0, 0, 0])) # do backward for first element of z
print("The derivative of z_1 w.r.t to x: ",x.grad.data)
print("The derivative of z_1 w.r.t to y: ",y.grad.data)
x.grad.zero_() # Zero the gradients of x
y.grad.zero_() # Zero the gradients of y
z = (x**2) * y # Reconstruct the graph
z.backward(torch.FloatTensor([1, 1, 1, 1])) # do backward for second element of z
print("The derivative of z_2 w.r.t to x:, ",x.grad.data)
print("The derivative of z_2 w.r.t to y: ",y.grad.data)
x.grad.zero_()
y.grad.zero_()
z = (x**2) * y
z.backward(torch.FloatTensor([1, 1, 1, 1]))
print(x.grad.data)
print(y.grad.data)
x = torch.randn(2, 2, requires_grad=True) #x is a leaf created by user, thus grad_fn is none
# Each tensor has a .grad_fn attribute that references a "Function" that has created the tensor (except for tensors created by the user – their grad_fn is None).
print('x', x)
y = 2 * x # define an operation on x and construct the graph
z = y ** 3 # define one more operation to check the chain rule and continue constructing the graph
print('z shape:', z.size())
z.backward(torch.FloatTensor([[1, 1], [1, 1]]))
print('x gradient for its all elements:\n', x.grad)
x.grad.zero_()
x.grad.data.zero_() #the gradient for x will be accumulated, it needs to be cleared.
y = 2 * x
z = y ** 3
z.backward(torch.FloatTensor([[0, 1], [0, 1]]))
print('x gradient for the second column:\n', x.grad)
x.grad.zero_()
x.grad.data.zero_()
y = 2 * x
z = y ** 3
z.backward(torch.FloatTensor([[1, 1], [0, 0]]))
print('x gradient for the first row:\n', x.grad)
x = torch.randn(2, 2, requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
y = 2 * x #define an operation on x
print('y', y)
z = y ** 3 #define one more operation to check the chain rule
out = z.mean()
print('out', out)
out.backward()
print('x gradient:\n', x.grad)
x.grad.data.zero_()
y = 2 * x
z = y ** 3
out = z.mean()
#out.backward(torch.FloatTensor([[1, 1], [1, 1]]))
out.backward(torch.ones(z.size())) #Note the use of size() function of the output vector
print('x gradient second time', x.grad)
#Define two tensors u and v
u=torch.tensor(1.0,requires_grad=True)
v=torch.tensor(2.0,requires_grad=True)
f=u*v+u**2
f.backward()
print(u.grad)
print(v.grad)
x = torch.linspace(-10,10,10,requires_grad=True)
print(x)
Y=x**2
print(Y)
y=x**2
print(y)
y.backward(torch.Tensor([1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]))
print(x.grad)
import matplotlib.pyplot as plt
plt.plot(x.detach().numpy(),Y.detach().numpy(),label='function')
plt.plot(x.detach().numpy(),x.grad.detach().numpy(),label='derivative')
plt.legend()
x = torch.linspace(-10,10,10,requires_grad=True)
print(x)
Y=x**2
print(Y)
y=torch.sum(x**2)
print(y)
y.backward()
print(x.grad)
import matplotlib.pyplot as plt
plt.plot(x.detach().numpy(),Y.detach().numpy(),label='function')
plt.plot(x.detach().numpy(),x.grad.detach().numpy(),label='derivative')
plt.legend()
import torch.nn.functional as F
x=torch.linspace(-3,3,100,requires_grad=True)
Y=F.relu(x)
y=torch.sum(F.relu(x))
y.backward()
plt.plot(x.detach().numpy(),Y.detach().numpy(),label='function')
plt.plot(x.detach().numpy(),x.grad.detach().numpy(),label='derivative')
plt.legend()
#!pip3 install torch torchvision
import torch
f = torch.tensor([2.0,3.0],requires_grad=True)
print("Original Tensor: ",f)
g = f[0] * f[1]
g.backward(retain_graph=True)
#g.backward()
print("1st Backward Pass: ",f.grad)
g.backward()
print("2nd Backward Pass: ",f.grad)
import torch
a = torch.randn((1,4),requires_grad=True)
b = a**2
c = b*2
d = c.mean()
e = c.sum()
#d.backward(retain_graph=True)
d.backward(retain_graph=True) # fine....graph is retained so no need to reconstruct again
e.backward(retain_graph=True) # fine....graph is retained so no need to reconstruct again
d.backward() # also fine
e.backward() # error will occur!
x = torch.tensor([3.0],requires_grad=True)
y = x * 2 + x ** 2 + 3
y.backward(retain_graph= True )
print(x.grad) # Tensor containing: 8, [torch.FloatTensor of size 1]
y.backward(retain_graph= True )
print(x.grad) # Output16, because the automatic derivation is done twice, so the first gradient 8 and the second gradient 8 are added to get 16
y.backward() # Do another automatic derivation, this time does not retain the calculation graph
print(x.grad) # Outputs 24
#y.backward() # will do an error, the calculation graph has been discarded
f = torch.tensor([2.0,3.0], requires_grad=True)
g = f[0] + f[1]
g.backward()
print(f.grad)
g.backward()
print(f.grad)
print(f.grad.data[0])
print(f.is_leaf)
#loss1.backward(retain_graph=True) # suppose you first back-propagate loss1, then loss2 (you can also do it in reverse order)
#loss2.backward() # now the graph is freed, and next process of batch gradient descent is ready
#optimizer.step() # update the network parameters
# Original Problem
# -*- coding: utf-8 -*-
import numpy as np
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = np.random.randn(N, D_in)
y = np.random.randn(N, D_out)
# Randomly initialize weights
w1 = np.random.randn(D_in, H)
w2 = np.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(5):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, 0)
y_pred = h_relu.dot(w2)
# Zero the grads is not required as we are not using torch backward() function which accumulates buffers
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
import torch
# device = torch.device("cpu") # Uncomment this to run on CPU
dtype = torch.float
device = torch.device("cuda:0")
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torch.randn(N, D_in, device=device, dtype=dtype) #64x1000
y = torch.randn(N, D_out, device=device, dtype=dtype) #64x10
# Randomly initialize weights
w1 = torch.randn(D_in, H, device=device, dtype=dtype) #1000 x 100
w2 = torch.randn(H, D_out, device=device, dtype=dtype) #100 x 10
learning_rate = 1e-6
for t in range(5):
# Forward pass: compute predicted y
h = x.mm(w1)
h_relu = h.clamp(min=0)
y_pred = h_relu.mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum().item()
print(t, loss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h_relu = grad_y_pred.mm(w2.t())
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h)
# Update weights using gradient descent
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
import torch
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in,requires_grad=False)
y = torch.randn(N, D_out,requires_grad=False)
w1 = torch.randn(D_in, H,requires_grad=True)
w2 = torch.randn(H, D_out,requires_grad=True)
learning_rate = 1e-6
for t in range(5):
# Do forward
y_pred = x.mm(w1).clamp(min=0).mm(w2)
# Compute Loss
loss = (y_pred - y).pow(2).sum()
print(t, loss.detach().item())
# Backprop to compute gradients (partial derivatives) of w1 and w2 with respect to loss
loss.backward()
# Update weights using gradient descent
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
# Zero them for next iteration, as we have already update the weights with gradient data
w1.grad.data.zero_()
w2.grad.data.zero_()
# ==================USING detach()=======================================
#!pip3 install torch torchvision
import torch
x = torch.tensor(([5.0]))
w = torch.tensor(([10.0]),requires_grad=True)
y = w*x
print(w)
c = w.detach()
c.zero_()
print(w) # Modified by c.zero_()!!
y.backward() # Error One of the variables needed for gradient computation has been modified by an inplace operation
# ==================USING .data()=======================================
#!pip3 install torch torchvision
import torch
x = torch.tensor(([5.0]))
w = torch.tensor(([10.0]),requires_grad=True)
y = w*x
print(w)
c = w.data
c.zero_()
print(w) # Modified by c.zero_()!!
y.backward() # Error is not reported as in-place changes are not tracked by autograd
import torch
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs.
# Setting requires_grad=False indicates that we do not need to compute gradients
# with respect to these Tensors during the backward pass.
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
# Create random Tensors for weights.
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(5):
# Forward pass: compute predicted y using operations on Tensors; these
# are exactly the same operations we used to compute the forward pass using
# Tensors, but we do not need to keep references to intermediate values since
# we are not implementing the backward pass by hand.
y_pred = x.mm(w1).clamp(min=0).mm(w2)
# Compute and print loss using operations on Tensors.
# Now loss is a Tensor of shape (1,)
# loss.item() gets the a scalar value held in the loss.
loss = (y_pred - y).pow(2).sum()
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call w1.grad and w2.grad will be Tensors holding the gradient
# of the loss with respect to w1 and w2 respectively.
loss.backward()
# Manually update weights using gradient descent. Wrap in torch.no_grad()
# because weights have requires_grad=True, but we don't need to track this
# in autograd.
# An alternative way is to operate on weight.data and weight.grad.data.
# Recall that tensor.data gives a tensor that shares the storage with
# tensor, but doesn't track history.
# You can also use torch.optim.SGD to achieve this.
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# Manually zero the gradients after updating weights
w1.grad.zero_()
w2.grad.zero_()
#!pip3 install torch torchvision
import torch
f = torch.nn.Linear(in_features = 10, out_features = 4)
for n, p in f.named_parameters():
print(n, p.size())
#for param in f.parameters():
#print(param.size())
x = torch.empty(523, 10).normal_()
y = f(x)
y.size()
#!pip3 install torch torchvision
import torch
import torch.optim
x = torch.randn((5, 5))
w = torch.randn((5, 5),requires_grad=True)
z = w.mm(x).mean() # Perform an operation
opt = torch.optim.Adam([w], lr=0.1, betas=(0.5, 0.999)) # Define the optimizer
z.backward() # Calculate gradients
print(w.data) # Print the weights
opt.step() # Update w according to Adam's gradient update rules
print(w.data) # Print updated weights after training step
opt.step()
print(w.data)
import torch
#device = torch.device("cpu") # Uncomment this to run on CPU
dtype = torch.float
device = torch.device("cuda:0")
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torch.randn(N, D_in, device=device, dtype=dtype) #64x1000
y = torch.randn(N, D_out, device=device, dtype=dtype) #64x10
# Randomly initialize weights
w1 = torch.randn(D_in, H, device=device, dtype=dtype) #1000 x 100
w2 = torch.randn(H, D_out, device=device, dtype=dtype) #100 x 10
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Tensors for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
torch.nn.Sigmoid())
model = model.cuda()
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_function = torch.nn.MSELoss(reduction='sum').cuda()
# Set the learning rate value
learning_rate = 1e-4
for t in range(5):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Tensor of input data to the Module and it produces
# a Tensor of output data. Basically construct the graph
y_pred=model(x)
# Compute and print loss. We pass Tensors containing the predicted and true
# values of y, and the loss function returns a Tensor containing the
# loss.
loss = loss_function(y_pred, y)
print(t, "{:.20f}".format(loss.item()))
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Tensors with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Tensor, so
# we can access its gradients like we did before.
with torch.no_grad():
for param in model.parameters():
param -= learning_rate * param.grad
# -*- coding: utf-8 -*-
import torch
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out))
loss_fn = torch.nn.MSELoss(reduction='sum')
# Use the optim package to define an Optimizer that will update the weights of
# the model for us. Here we will use Adam; the optim package contains many other
# optimization algoriths. The first argument to the Adam constructor tells the
# optimizer which Tensors it should update.
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
for t in range(5):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x) # Compute the graph
# Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, "{:.20f}".format(loss.item()))
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
#!pip3 install torch torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
class net(torch.nn.Module):
def __init__(self):
super(net,self).__init__() # Can also use torch.nn.Module.__init__(self)
self.linear1=torch.nn.Linear(D_in,H)
self.linear2=torch.nn.Linear(H,D_out)
def forward(self,x):
h_relu = torch.nn.functional.relu(self.linear1(x))
#h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in).cuda()
y = torch.randn(N, D_out).cuda()
#x = x.type(torch.FloatTensor)
#y = y.type(torch.FloatTensor)
mymodel = net()
print(mymodel)
# Move to gpu if available:
if torch.cuda.is_available():
mymodel.cuda()
# create a stochastic gradient descent optimizer
optimizer = torch.optim.SGD(mymodel.parameters(), lr=0.0009, momentum=0.9)
# create a loss function
#criterion = nn.NLLLoss()
criterion = torch.nn.MSELoss(reduction='sum')
epochs = 5
for epoch in range(epochs):
optimizer.zero_grad()
model_output = mymodel(x)
loss = criterion(model_output, y)
print(epoch, "{:.20f}".format(loss.item()))
loss.backward()
optimizer.step()
# -*- coding: utf-8 -*-
import torch
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
In the forward function we accept a Tensor of input data and we must return
a Tensor of output data. We can use Modules defined in the constructor as
well as arbitrary operators on Tensors.
#h_relu = self.linear1(x).clamp(min=0)
h_relu = torch.nn.functional.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return y_pred
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Construct our model by instantiating the class defined above
model = TwoLayerNet(D_in, H, D_out)
# Construct our loss function and an Optimizer. The call to model.parameters()
# in the SGD constructor will contain the learnable parameters of the two
# nn.Linear modules which are members of the model.
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(5):
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x) #Construct the graph and return the output of forward
# Compute and print loss, can directly call nn.functional.mse_loss(out, y)since it is a non-parametric function
loss = criterion(y_pred, y)
print(t, "{:.20f}".format(loss.item()))
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum([p.numel() for p in model.parameters()])
import torch
class Model(torch.nn.Module):
super().__init__()
self.feature_extractor = nn.Sequential(
Conv2d(3, 12, kernel_size=3, padding=1, stride=1),
Conv2d(12, 24, kernel_size=3, padding=1, stride=1),)
def forward(self, x):
x = self.feature_extractor(x)
return x
import torch.nn as nn
def init_weights(m):
print(m)
if type(m) == nn.Linear:
#m.weight.data.fill_(1.0)
nn.init.xavier_uniform_(m.weight.data)
print(m.weight.data)
net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
net.apply(init_weights)
seq_net = nn.Sequential(
nn.Linear(2,4),
nn.Tanh(),
nn.Linear(4,1)
)
print(seq_net[0])
print(seq_net[1])
print(seq_net[0].weight)
for param in net.parameters():
print(param,":",param.size())
for name, param in net.named_parameters():
print(name,": ",param.size())
for buf in net.buffers():
print(type(buf.data), buf.size())
for child in net.children():
print(child)
for name, module in net.named_children():
print(name," : ", module)
for idx, m in enumerate(net.modules()):
print(idx, ' ', m)
l = nn.Linear(2, 2)
net = nn.Sequential(l, l)
for idx, m in enumerate(net.named_modules()):
print(idx,":",m)
net.cpu()
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
x = torch.randn(1, 2).cuda()
y = torch.randn(1, 1).cuda()
class net(torch.nn.Module):
def __init__(self):
super(net,self).__init__()
self.linear1=torch.nn.Linear(2,1)
self.linear2=torch.nn.Linear(1,2)
def forward(self,x):
h_relu = torch.nn.functional.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return y_pred
mymodel = net()
if torch.cuda.is_available():
mymodel.cuda()
optimizer = torch.optim.SGD(mymodel.parameters(), lr=0.0009, momentum=0.9)
# Before training
print("predict (before training)", mymodel(x).data[0])
loss_col = []
epochs = 5
for epoch in range(epochs):
optimizer.zero_grad()
model_output = mymodel(x)
loss = torch.nn.functional.mse_loss(model_output, y) # direct use of mse_loss from nn.functional
loss_col.append(loss)
#print(epoch, "{:.20f}".format(loss.item()))
loss.backward()
optimizer.step()
print("progress:", epoch, loss.data.item())
plt.plot(loss_col)
plt.show()
#print(list(mymodel.parameters()))
# After training
hour_var = torch.Tensor([1.0,4.0]).cuda()
y_pred = mymodel(hour_var)
print("predict (after training)", y_pred.data)
print(mymodel.linear1.weight.grad.size())
print(mymodel.linear1.weight.data[0])
print(mymodel.linear1.weight.data.norm()) # norm of the weight
print(mymodel.linear1.weight.grad.data.norm()) # norm of the gradients
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in mymodel.state_dict():
print("\n",param_tensor, "\t", mymodel.state_dict()[param_tensor].size())
# Print optimizer's state_dict
print("\nOptimizer's state_dict:")
for var_name in optimizer.state_dict():
print("\n",var_name, "\t", optimizer.state_dict()[var_name])
# Save only model parameters
torch.save(mymodel.state_dict(), "/content/rahul.pth")
# Load only model parameters
# To re-read the parameters of the model, first we need to redefine the model once, then re-read the parameters
mymodel_n = net().cuda()
mymodel_n.load_state_dict(torch.load("/content/rahul.pth"))
print(mymodel_n)
# print(list(mymodel_n.parameters()))
# model.eval()
hour_var = torch.Tensor([1.0,4.0]).cuda()
y_pred_n = mymodel_n(hour_var)
print("predict (after training)", y_pred_n.data)
# Save the entire model
torch.save(mymodel,"/content/rahule.pt")
# Load the entire model
mymodel_nf = torch.load("/content/rahule.pt")
# model.eval()
hour_var = torch.Tensor([1.0,4.0]).cuda()
y_pred_nf = mymodel_nf(hour_var)
print("predict (after training)", y_pred_nf.data)
# Checkpoint save
torch.save({
'epoch': epoch,
'model_state_dict': mymodel.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
...
}, PATH)
# Checkpoint load
model = TheModelClass(*args, **kwargs)
optimizer = TheOptimizerClass(*args, **kwargs)
checkpoint = torch.load(PATH)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
model.eval()
# - or -
model.train()
torch.save({
'modelA_state_dict': modelA.state_dict(),
'modelB_state_dict': modelB.state_dict(),
'optimizerA_state_dict': optimizerA.state_dict(),
'optimizerB_state_dict': optimizerB.state_dict(),
...
}, PATH)
modelA = TheModelAClass(*args, **kwargs)
modelB = TheModelBClass(*args, **kwargs)
optimizerA = TheOptimizerAClass(*args, **kwargs)
optimizerB = TheOptimizerBClass(*args, **kwargs)
checkpoint = torch.load(PATH)
modelA.load_state_dict(checkpoint['modelA_state_dict'])
modelB.load_state_dict(checkpoint['modelB_state_dict'])
optimizerA.load_state_dict(checkpoint['optimizerA_state_dict'])
optimizerB.load_state_dict(checkpoint['optimizerB_state_dict'])
modelA.eval()
modelB.eval()
# - or -
modelA.train()
modelB.train()
torch.save(modelA.state_dict(), PATH)
modelB = TheModelBClass(*args, **kwargs)
modelB.load_state_dict(torch.load(PATH), strict=False)
# Save
torch.save(model.state_dict(), PATH)
# Load
device = torch.device('cpu')
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH, map_location=device)
torch.save(model.state_dict(), PATH)
device = torch.device("cuda")
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH, map_location="cuda:0")) # Choose whatever GPU device number you want
model.to(device) # Model to GPU
# Make sure to call the code; input = input.to(device) on any input tensors that you feed to the model
# Save
torch.save(model.state_dict(), PATH)
# Load
device = torch.device("cuda")
model = TheModelClass(*args, **kwargs)
model.load_state_dict(torch.load(PATH))
model.to(device) # Model to GPU
# Make sure to call the code; input = input.to(device) on any input tensors that you feed to the model
#!pip3 install torch torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
x = torch.randn(1, 2).cuda()
y = torch.randn(1, 1).cuda()
class net(torch.nn.Module):
def __init__(self):
super(net,self).__init__()
self.linear1=torch.nn.Linear(2,1)
self.linear2=torch.nn.Linear(1,2)
def forward(self,x):
h_relu = torch.nn.functional.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return y_pred
def forwardcall(module,input,output):
print("\nInside forward of ", module," : ",input," : ", output)
def backwardcall(module, grad_input, grad_output):
print("Inside backward of ", module," : ",grad_input," : ", grad_output)
mymodel = net()
print(mymodel)
hook1 = mymodel.linear1.register_forward_hook(forwardcall)
hook2 = mymodel.linear2.register_backward_hook(backwardcall)
if torch.cuda.is_available():
mymodel.cuda()
optimizer = torch.optim.SGD(mymodel.parameters(), lr=0.0009, momentum=0.9)
loss_col = []
epochs = 5
for epoch in range(epochs):
optimizer.zero_grad()
model_output = mymodel(x)
loss = torch.nn.functional.mse_loss(model_output, y)
loss_col.append(loss)
#print(epoch, "{:.20f}".format(loss.item()))
loss.backward()
optimizer.step()
hook1.remove() # removes the hook
hook2.remove() # removes the hook
# You should be able to check the training state of the model:
if mymodel.training == True:
print("Model is in training mode")
if mymodel.training == False:
print("Model is in Evaluation mode")
linear = nn.Linear(2, 2)
print(linear.weight)
linear.to(torch.double)
print(linear.weight)
gpu1 = torch.device("cuda:0")
print(linear.to(gpu1, dtype=torch.half, non_blocking=True))
print(linear.weight)
cpu = torch.device("cpu")
linear.to(cpu)
print(linear.weight)
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
x = torch.randn(1, 2).cuda()
y = torch.randn(1, 1).cuda()
class net(torch.nn.Module):
def __init__(self):
super(net,self).__init__()
self.linear1=torch.nn.Linear(2,1)
self.linear2=torch.nn.Linear(1,2)
def forward(self,x):
h_relu = torch.nn.functional.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return y_pred
mymodel = net()
print(mymodel)
if torch.cuda.is_available():
mymodel.cuda()
#optimizer = torch.optim.SGD(mymodel.parameters(), lr=0.0009, momentum=0.9)
optimizer = torch.optim.SGD(mymodel.parameters(), lr=0.01)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
# Train flag can be updated with boolean
# to disable dropout and batch norm learning
mymodel.train(True)
# execute train step
loss_col = []
epochs = 50
for epoch in range(epochs):
optimizer.zero_grad()
model_output = mymodel(x)
loss = torch.nn.functional.mse_loss(model_output, y)
loss_col.append(loss)
print("\nIteration:",epoch,"Loss:","{:.20f}".format(loss.item()))
print("lr: ", "{:.20f}".format(optimizer.param_groups[0]['lr']))
loss.backward()
scheduler.step()
optimizer.step()
mymodel.train(False)
# run inference step
# CPU seed
torch.manual_seed(42)
# GPU seed
torch.cuda.manual_seed_all(42)
from torch.utils.data import Dataset
class c2fdata(Dataset):
def __init__(self):
self.celsius = torch.tensor([(float)(c) for c in range(-273,1000)])
self.fahrenheit = torch.tensor([c*1.8+32.0 for c in self.celsius])
def __getitem__(self,index):
return self.celsius[index],self.fahrenheit[index]
def __len__(self):
return self.celsius.shape[0]
mydataset = c2fdata()
print(mydataset[0])
print(len(mydataset))
for i in range(len(mydataset)):
print(mydataset[i])
if i == 3:
break
mydataloader = torch.utils.data.DataLoader(mydataset,batch_size=6,num_workers=2)
for i, batch in enumerate(mydataloader):
print(i, batch)
mydataloader = torch.utils.data.DataLoader(mydataset,batch_size=6, shuffle=True, num_workers=2, drop_last=True)
for i, batch in enumerate(mydataloader):
print(i, batch)
import math
class RangeDataset(torch.utils.data.Dataset):
def __init__(self, start, end, step=1):
self.start = start
self.end = end
self.step = step
def __getitem__(self, index):
value = self.start + index * self.step
assert value < self.end
return value
def __len__(self):
return math.ceil((self.end - self.start) / self.step)
dataset = RangeDataset(0, 10)
data_loader = torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=True, num_workers=2, drop_last=True)
for i, batch in enumerate(data_loader):
print(i, batch)
class DiabetesDataset(Dataset):
Diabetes dataset.
# Initialize your data, download, etc.
def __init__(self):
xy = np.loadtxt('./data/diabetes.csv.gz',
delimiter=',', dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:, 0:-1])
self.y_data = torch.from_numpy(xy[:, [-1]])
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset()
train_loader = DataLoader(dataset=dataset,batch_size=32, shuffle=True,num_workers=2)
for epoch in range(2):
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
# Run your training process
print(epoch, i, "inputs", inputs.data, "labels", labels.data)
scale = Rescale(256)
crop = RandomCrop(128)
composed = transforms.Compose([Rescale(256),RandomCrop(224)])
import torch
from torchvision import transforms, datasets
data_transform = transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
mydataset = datasets.ImageFolder(root='root',transform=data_transform)
dataset_loader = torch.utils.data.DataLoader(mydataset,batch_size=4, shuffle=True,num_workers=4)
#!pip3 install torch torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
class mymnist(nn.Module):
def __init__(self):
super(mymnist, self).__init__()
self.fc1 = nn.Linear(28 * 28, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x,dim=1)
net1 = mymnist().cuda()
print(net1)
# create a stochastic gradient descent optimizer
optimizer = torch.optim.SGD(net1.parameters(), lr=0.01, momentum=0.9)
# create a loss function
criterion = nn.NLLLoss().cuda()
#optimizer = torch.optim.Adam(net1.parameters(), lr=0.01)
#criterion = nn.CrossEntropyLoss()
#!pip3 install torch torchvision
import torch
import torchvision
import torchvision.datasets as dataset
import torchvision.transforms as transforms
BATCH_SIZE = 64
# torchvision.datasets.MNIST outputs a set of PIL images
# We transform them to tensors
transform = transforms.ToTensor()
# Load the training dataset by setting the parameters train to True and convert it to a tensor by placing a transform object in the argument transform.
trainset = torchvision.datasets.MNIST('/tmp', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=2)
# Load the testing dataset by setting the parameters train False and convert it to a tensor by placing a transform object in the argument transform
testset = torchvision.datasets.MNIST('/tmp', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False, num_workers=2)
# Print out the tuple (image,label)
#print(trainset[0])
# Print out the first image
#print("\n",trainset[0][0])
# Print out first label or class
print(trainset[0][1])
print(trainset[0][0].type())
print(trainset[0][1].type())
import matplotlib.pylab as plt
import numpy as np
print(trainset[3][0].shape)
plt.imshow(trainset[3][0].numpy().reshape(28,28),cmap='gray')
print(trainset[3][1].item())
plt.title('y= '+ str(trainset[3][1].item()))
#print(trainset[30])
dataiter = iter(trainloader)
images, labels = dataiter.next() # Returns batches of images and corresponding labels)
print('Labels: ', labels)
print('Batch shape: ', images.size())
im = torchvision.utils.make_grid(images)
print(im.size())
plt.imshow(np.transpose(im.numpy(), (1, 2, 0)))
#plt.imshow(np.transpose(im.numpy(), (2, 1, 0)))
# Hyper-parameters
num_epochs = 1
learning_rate = 0.001
# Train the model
total_step = len(trainloader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(trainloader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).cuda() # Flatten the data (n, 1, 28, 28)-> (n, 784)
labels = labels.cuda()
# Forward pass
outputs = net1(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in testloader:
images = images.reshape(-1, 28*28).cuda() # Flatten the data (n, 1, 28, 28)-> (n, 784)
labels = labels.cuda()
outputs = net1(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(net1.state_dict(), 'model.ckpt')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What is a tensor?
Step2: Tensor Types
Step3: Every torch.Tensor has these attributes
Step4: Tensor datatypes are given in the table below. Note that torch.tensor is an alias for the default tensor type (torch.FloatTensor).
Step5: Notice how each type in the table has a CPU and GPU version. One thing to keep in mind about tensor data types is that tensor operations between tensors must happen between tensors with the same type of data.
Step6: One thing to keep in mind about using multiple devices is that tensor operations between tensors must happen between tensors that exists on the same device.
Step7: Note PyTorch tensors can be created with the torch.Tensor constructor, which takes the tensor’s dimensions as input and returns a tensor occupying an uninitialized region of memory
Step8: All of the options (o1, o2, o3, o4) appear to have produced the same tensors except for the first one.
Step9: With torch.Tensor(), we are unable to pass a dtype to the constructor. This is an example of the torch.Tensor() constructor lacking in configuration options. This is one of the reasons to go with the torch.tensor() factory function for creating our tensors.
Step10: The first two o1 and o2 still have the original value of 1 for index 0, while the second two o3 and o4 have the new value of 0 for index 0.
Step11: The dimension corresponds to the number of nested list sets
Step12: Axes of a tensor
Step13: Note that, with tensors, the elements of the last axis are always numbers (scalar). Every other axis will contain n-dimensional arrays.
Step14: .shape is an alias for .size()
Step15: Tensor Indexing and Slicing
Step16: Indexing gets an item but destroys the data structure as returned item is not the original tensor. Simply put, it does not preserve the dimensionality of the original tensor.
Step17: Mathematical operations on tensors can be performed element wise, and flat arrays can be returned which can used as masks.
Step18: We can
Step19: Tensor Functions
Step20: Typically, after we know a tensor’s shape, we can deduce a couple of things. First, we can deduce the tensor's rank. The rank of a tensor is equal to the length of the tensor's shape.
Step21: Since the above tensor has 12 elements, so any reshaping must account for exactly 12 elements.
Step22: PyTorch has another function called view() that does the same thing as the reshape() function. We can use -1 in the argments if we dont know the actual size of the tensor.
Step23: We can also change the shape of our tensors is by squeezing and unsqueezing them.
Step24: Flatten a tensor
Step25: Most times we deal with batches of color images and we don’t want to flatten the whole tensor. We only want to flatten the image tensors within the batch tensor leaving the batch dimension intact
Step26: torch.cat() concatenate tensors along an existing dimension and torch.stack() stack tensors along a new dimension.
Step27: Our initial three tensors were all of shape 2x3. So the default of torch.stack is that it’s going to insert a new dimension in front of the 2x3, so we’re going to end up with a 3x2x3 tensor.
Step28: Comparison operations are also element-wise. For a given comparison operations between tensors, a new tensor of the same shape is returned with each element containing either a 0 or a 1.
Step29: There are some other ways to refer to element-wise operations, and all of these mean the same thing
Step30: Broadcasting of Tensors
Step31: Reduction operations
Step32: Reduction by Axis
Step33: Argmax tensor reduction operation
Step34: If we don’t specify an axis to the argmax() method, it returns the index location of the max value from the flattened tensor, which in this case is indeed 11.
Step35: Accessing elements inside tensors
Step36: Additional operators
Step37: Homework
Step38: Example 1
Step39: Note that, to get access to the value derivative of Y with respect to X we use X.grad not Y.grad
Step40: Gradients are accumulated with every cycle which allow us to get the correct gradient for all the computations with a given variable.
Step41: After the backward call is called, the derivative is automatically calculated.
Step42: Since the backward() function accumulates gradients, and you don’t want to mix up gradients between minibatches, you have to zero them out at the start of a new minibatch. This is exactly like how a general (additive) accumulator variable is initialized to 0 in code.
Step43:
Step44: Multi-Dimensional Tensor Inputs
Step45: The above code can also be written as
Step46: For varying dimensional matrices, we can clearly see the gradients of z are computed w.r.t to each dimension of x, because the operations are all element-wise.
Step47: Tensor Gradients
Step48: We need to calculate the derivative of y equals x squared. We generate values of x from -10 to 10. Note that we have to use the detach .option before we can cast it as a numpy array required for matplotlib function. The .detach() function will prevent future computations on the tensor from being tracked. Another way to prevent history tracking is by wrapping your code with torch.no_grad().
Step49: Homework
Step50: Relu Function
Step51: Retain graph
Step52: Suppose that we have a computation graph shown above. The variable d and e is the output, and a is the input. The underlining computation is
Step53: When we do d.backward(), that is fine. After this computation, the part of graph that calculate d will be freed by default to save memory. So if we do e.backward(), the error message will pop up. In order to do e.backward(), we have to set the parameter retain_graph to True in d.backward(), i.e.,
Step54: As long as you use retain_graph=True in your backward method, you can do backward any time you want.
Step55: Here the call is made again in the last sentence, the error will be reported. Because the pytorch defaults to an automatic derivation, the calculation graph is discarded. The requests need to be set manually.
Step56: The addition operation don’t need buffers
Step57: Usually after a backpropagation you process the next batch so you don’t need the gradients of the previous batch anymore.
Step58: Wrap-up
Step59: Original Problem using pytorch
Step60: Using pytorch built in functions.
Step61: We can also use loss.data (loss.data.item()) instead of loss.detach().item() however it detaches the tensor from the computation graph and might lead to wrong results.
Step62: Thus, .data can be unsafe in some cases. Any changes on w.data wouldn't be tracked by autograd, and the computed gradients would be incorrect if w is needed in a backward pass. A safer alternative is to use w.detach(), which also returns a Tensor that shares data with requires_grad=False, but will have its in-place changes reported by autograd if w is needed in backward
Step63: Using the context manager no_grad() we can avoid storing the computations done producing the output of our network in the computation graph.
Step64: There are many methods available for each module to access its children —
Step65: Thus torch.nn.Sequential expects a list of the layers that we want in the neural network. in our case our list has two things, we want a linear layer (y=wx+b) whose input is a vector of some length along with a non-linear activation function relu followed another linear layer with sigmoid activation function - this is a two layer neuron model
Step66: Up to this point we have updated the weights of our models by manually mutating the Tensors holding learnable parameters (with torch.no_grad() or .data to avoid tracking history in autograd). This is not a huge burden for simple optimization algorithms like stochastic gradient descent, but in practice we often train neural networks using more sophisticated optimizers like AdaGrad, RMSProp, Adam, etc.
Step67: Using Class
Step68: During the forward pass, the inputs must be passed to the graph being constructed and any output must be returned for the loss to be calculated.
Step69: The above step creates an instance of the network architecture. We then setup an optimizer and a loss criteria. In PyTorch, the optimizer knows how to optimize any attribute of type Parameter.
Step70: We finally run the main training loop. The optimizer.step() does a single updation of all the parameters using the new gradients calculated for every backward call.
Step73: The special method forward() will automatically get called when the layer is calculated and its gonna get passed the data from previous layer.
Step74: Notice that we never explicitly call forward pass defined within the class. We always call the Module instance afterwards instead since it takes care of running the registered hooks while the former silently ignores them.
Step75: Initializing parameters of the model
Step76: Accessing Parameters and Modules
Step77: parameters(recurse=True)
Step78: named_parameters(prefix='', recurse=True)
Step79: buffers(recurse=True)
Step80: named_buffers(prefix='', recurse=True)
Step81: named_children()
Step82: modules()
Step83: named_modules(memo=None, prefix='')
Step84: Move to cpu
Step85: Move to GPU
Step86: Sometimes it gets difficult to plot our loss with respect to every parameter. As a result, we will store the loss in a list for each iteration.
Step87: Save and Load Models
Step88: Notice that the load_state_dict() function takes a dictionary object, NOT a path to a saved object. This means that you must deserialize the saved state_dict before you pass it to the load_state_dict() function. For example, you CANNOT load using model.load_state_dict(PATH)
Step89: Checkpoint saving
Step90: Multiple Models in One File
Step91: Using Parameters from a Different Model
Step92: Saving & Loading Model Across Devices
Step93: Save on CPU, Load on GPU
Step94: Save on GPU, Load on GPU
Step95: Forward and Backward Hooks
Step96: The current implementation will not have above behavior for complex Module that perform many operations.
Step97: Moves and/or cast the parameters and buffers
Step98: Scheduler
Step99: Debugging
Step100: Inside init we would usually configure some paths or change the set of samples ultimately returned. In len, we specify the upper bound for the index with which getitem may be called, and in getitem we return the actual sample, which could be an image or an audio snippet.
Step101: However, it would be much more convenient if the dataset implemented the iterator protocol itself, so we could simply loop over samples with for sample in dataset.
Step103: Here, we set batch_size to 4, so returned tensors will contain exactly four values.
Step104: Transforms
Step105: Torch.Vision
Step106: The DataLoader actually has some reasonably sophisticated logic to determine how to collate individual samples returned from our dataset’s getitem method into a batch, as returned by the DataLoader during iteration.
Step107: Gradient Descent and Optimizer
Step108: Data Loading
Step109: Data Access
Step110: Data Visualization
Step111: Using previously defined trainloader iterator to iterate through the dataset.
Step112: We often want to display a grid of images to show samples for the training or testing images. torchvision.utils.make_grid makes a grid to be displayed as known as Image grid. It takes input a 4D mini-batch Tensor of shape (B x C x H x W) or a list of images all of the same size.
Step113: Training
Step114: Testing
|
2,839
|
<ASSISTANT_TASK:>
Python Code:
from shapely.geometry import Point, Polygon
# Create Point objects
p1 = Point(24.952242, 60.1696017)
p2 = Point(24.976567, 60.1612500)
# Create a Polygon
coords = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)]
poly = Polygon(coords)
# Let's check what we have
print(p1)
print(p2)
print(poly)
# Check if p1 is within the polygon using the within function
p1_within = p1.within(poly)
# Check if p2 is within the polygon
p2_within = p2.within(poly)
# Print the results
print("Is p1 within the polygon?: ", p1_within)
print("Is p2 within the polygon?: ", p2_within)
print(p1)
print(poly.centroid)
# Does polygon contain point 1
print("Does polygon contain p1?: ", poly.contains(p1))
# What about the other point?
print("Does polygon contain p2?: ", poly.contains(p2))
from shapely.geometry import LineString, MultiLineString
# Create two lines
line_a = LineString([(0, 0), (1, 1)])
line_b = LineString([(1, 1), (0, 2)])
line_a.intersects(line_b)
line_a.touches(line_b)
# Create a MultiLineString
multi_line = MultiLineString([line_a, line_b])
multi_line
# Check if line_a touches itself
print("Touches?: ", line_a.touches(line_a))
# However, it does intersect
print("Intersects?: ", line_a.intersects(line_a))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's check if those points are within the polygon
Step2: Okey, so we can see that the first point seems to be inside that polygon and the other one doesn't.
Step3: It is also possible to do PIP other way around, i.e. to check if polygon contains a point
Step4: Thus, both ways has the same results.
Step5: Let's see if they intersect
Step6: Do they also touch each other?
Step7: Indeed, they do and we can see this by plotting the features together
Step8: Thus, the line_b continues from the same node ( (1,1) ) where line_a ends.
|
2,840
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
# TODO: Calculate the hidden layer's contribution to the error
error = y - final_outputs
output_error_term = error
hidden_error = np.dot(self.weights_hidden_to_output, output_error_term)
hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:, None]
# Weight step (hidden to output)
delta_weights_h_o += output_error_term * hidden_outputs[:, None]
# TODO: Update the weights - Replace these values with your calculations.
# update hidden-to-output weights with gradient descent step
self.weights_hidden_to_output += self.lr * delta_weights_h_o / len(targets)
# update input-to-hidden weights with gradient descent step
self.weights_input_to_hidden += self.lr * delta_weights_i_h / len(targets)
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features, self.weights_input_to_hidden)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
import sys
### Set the hyperparameters here ###
iterations = 3000
learning_rate = 0.6
hidden_nodes = 17
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5, ymin=0)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features).T*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Unit tests
Step9: Training the network
Step10: Check out your predictions
|
2,841
|
<ASSISTANT_TASK:>
Python Code:
!pip install Theano
!pip install lasagne
import numpy as np
def sum_squares(N):
return сумма квадратов чисел от 0 до N
%%time
sum_squares(10**8)
import theano
import theano.tensor as T
#будущий параметр функции
N = T.scalar("a dimension",dtype='int32')
#рецепт получения суммы квадратов
result = (T.arange(N)**2).sum()
#компиляция функции "сумма квадратов" чисел от 0 до N
sum_function = theano.function(inputs = [N],outputs=result)
%%time
sum_function(10**8)
#входы
example_input_integer = T.scalar("вход - одно число(пример)",dtype='float32')
example_input_tensor = T.tensor4("вход - четырёхмерный тензор(пример)")
#не бойся, тензор нам не пригодится
input_vector = T.vector("вход - вектор целых чисел", dtype='int32')
#преобразования
#поэлементное умножение
double_the_vector = input_vector*2
#поэлементный косинус
elementwise_cosine = T.cos(input_vector)
#разность квадрата каждого элемента и самого элемента
vector_squares = input_vector**2 - input_vector
double_the_vector
#теперь сам:
#создай 2 вектора из чисел float32
my_vector = <вектор из float32>
my_vector2 = <ещё один такой же>
#напиши преобразование, которое считает
#(вектор 1)*(вектор 2) / (sin(вектор 1) +1)
my_transformation = <преобразование>
print (my_transformation)
#то, что получилась не чиселка - это нормально
inputs = [<от чего завсит функция>]
outputs = [<что вычисляет функция (можно сразу несколько - списком, либо 1 преобразование)>]
# можно скомпилировать написанные нами преобразования как функцию
my_function = theano.function(
inputs,outputs,
allow_input_downcast=True #автоматически прводить типы (необязательно)
)
#можно вызвать вот-так:
print ("using python lists:")
print (my_function([1,2,3],[4,5,6]))
print()
#а можно так.
#К слову, ту тип float приводится к типу второго вектора
print ("using numpy arrays:")
print (my_function(np.arange(10),
np.linspace(5,6,10,dtype='float')))
#словарик значений для входов
my_function_inputs = {
my_vector:[1,2,3],
my_vector2:[4,5,6]
}
#вычислить без компиляции
#если мы ничего не перепутали,
#должно получиться точно то же, что и раньше
print my_transformation.eval(my_function_inputs)
#можно вычислять преобразования на ходу
print ("сумма 2 векторов", (my_vector + my_vector2).eval(my_function_inputs))
#!ВАЖНО! если преобразование зависит только от части переменных,
#остальные давать не надо
print ("форма первого вектора", my_vector.shape.eval({
my_vector:[1,2,3]
}))
# Задание 1 - напиши и скомпилируй theano-функцию, которая считает среднеквадратичную ошибку двух векторов-входов
# Вернуть нужно одно число - собственно, ошибку. Обновлять ничего не нужно
<твой код - входы и преобразования>
compute_mse =<твой код - компиляция функции>
#тесты
from sklearn.metrics import mean_squared_error
for n in [1,5,10,10**3]:
elems = [np.arange(n),np.arange(n,0,-1), np.zeros(n),
np.ones(n),np.random.random(n),np.random.randint(100,size=n)]
for el in elems:
for el_2 in elems:
true_mse = np.array(mean_squared_error(el,el_2))
my_mse = compute_mse(el,el_2)
if not np.allclose(true_mse,my_mse):
print ('Wrong result:')
print ('mse(%s,%s)'%(el,el_2))
print ("should be: %f, but your function returned %f"%(true_mse,my_mse))
raise ValueError("Что-то не так")
print ("All tests passed")
#cоздадим расшаренную перменную
shared_vector_1 = theano.shared(np.ones(10,dtype='float64'))
#получить (численное) значение переменной
print ("initial value",shared_vector_1.get_value())
#задать новое значение
shared_vector_1.set_value( np.arange(5) )
#проверим значение
print ("new value", shared_vector_1.get_value())
#Заметь, что раньше это был вектор из 10 элементов, а сейчас - из 5.
#Если граф при этом остался выполним, это сработает.
#напиши рецепт (преобразование), которое считает произведение(поэллементное) shared_vector на input_scalar
#скомпилируй это в функцию от input_scalar
input_scalar = T.scalar('coefficient',dtype='float32')
scalar_times_shared = <рецепт тут>
shared_times_n = <твой код, который компилирует функцию>
print ("shared:", shared_vector_1.get_value())
print ("shared_times_n(5)",shared_times_n(5))
print ("shared_times_n(-0.5)",shared_times_n(-0.5))
#поменяем значение shared_vector_1
shared_vector_1.set_value([-1,0,1])
print ("shared:", shared_vector_1.get_value())
print ("shared_times_n(5)",shared_times_n(5))
print ("shared_times_n(-0.5)",shared_times_n(-0.5))
my_scalar = T.scalar(name='input',dtype='float64')
scalar_squared = T.sum(my_scalar**2)
#производная v_squared по my_vector
derivative = T.grad(scalar_squared,my_scalar)
fun = theano.function([my_scalar],scalar_squared)
grad = theano.function([my_scalar],derivative)
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(-3,3)
x_squared = list(map(fun,x))
x_squared_der = list(map(grad,x))
plt.plot(x, x_squared,label="x^2")
plt.plot(x, x_squared_der, label="derivative")
plt.legend()
my_vector = T.vector('float64')
#посчитай производные этой функции по my_scalar и my_vector
#warning! Не пытайся понять физический смысл этой функции
weird_psychotic_function = ((my_vector+my_scalar)**(1+T.var(my_vector)) +1./T.arcsinh(my_scalar)).mean()/(my_scalar**2 +1) + 0.01*T.sin(2*my_scalar**1.5)*(T.sum(my_vector)* my_scalar**2)*T.exp((my_scalar-4)**2)/(1+T.exp((my_scalar-4)**2))*(1.-(T.exp(-(my_scalar-4)**2))/(1+T.exp(-(my_scalar-4)**2)))**2
der_by_scalar,der_by_vector = градиент функции сверху по скаляру и вектору (можно дать списком)
compute_weird_function = theano.function([my_scalar,my_vector],weird_psychotic_function)
compute_der_by_scalar = theano.function([my_scalar,my_vector],der_by_scalar)
#график функции и твоей производной
vector_0 = [1,2,3]
scalar_space = np.linspace(0,7)
y = [compute_weird_function(x,vector_0) for x in scalar_space]
plt.plot(scalar_space,y,label='function')
y_der_by_scalar = [compute_der_by_scalar(x,vector_0) for x in scalar_space]
plt.plot(scalar_space,y_der_by_scalar,label='derivative')
plt.grid();plt.legend()
#умножим shared вектор на число и сохраним новое значение обратно в этот shared вектор
inputs = [input_scalar]
outputs = [scalar_times_shared] #вернём вектор, умноженный на число
my_updates = {
shared_vector_1:scalar_times_shared #и этот же результат запишем в shared_vector_1
}
compute_and_save = theano.function(inputs, outputs, updates=my_updates)
shared_vector_1.set_value(np.arange(5))
#изначальное значение shared_vector_1
print ("initial shared value:" ,shared_vector_1.get_value())
# теперь вычислим функцию (значение shared_vector_1 при этом поменяется)
print ("compute_and_save(2) returns",compute_and_save(2))
#проверим, что в shared_vector_1
print ("new shared value:" ,shared_vector_1.get_value())
from sklearn.datasets import load_digits
mnist = load_digits(2)
X,y = mnist.data, mnist.target
print ("y [форма - %s]:"%(str(y.shape)),y[:10])
print ("X [форма - %s]:"%(str(X.shape)))
print (X[:3])
# переменные и входы
shared_weights = <твой код>
input_X = <твой код>
input_y = <твой код>
predicted_y = <предсказание логрегрессии на input_X (вероятность класса)>
loss = <логистическая ошибка (число - среднее по выборке)>
grad = <градиент loss по весам модели>
updates = {
shared_weights: <новое значение весов после шага градиентного спуска>
}
train_function = <функция, которая по X и Y возвращает ошибку и обновляет веса>
predict_function = <функция, которая по X считает предсказание для y>
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y)
from sklearn.metrics import roc_auc_score
for i in range(5):
loss_i = train_function(X_train,y_train)
print ("loss at iter %i:%.4f"%(i,loss_i))
print ("train auc:",roc_auc_score(y_train,predict_function(X_train)))
print ("test auc:",roc_auc_score(y_test,predict_function(X_test)))
print ("resulting weights:")
plt.imshow(shared_weights.get_value().reshape(8,-1))
plt.colorbar()
from mnist import load_dataset
X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
print (X_train.shape,y_train.shape)
plt.imshow(X_train[0,0])
import lasagne
input_X = T.tensor4("X")
#размерность входа (None означает "может изменяться")
input_shape = [None,1,28,28]
target_y = T.vector("target Y integer",dtype='int32')
#входной слой (вспомогательный)
input_layer = lasagne.layers.InputLayer(shape = input_shape,input_var=input_X)
#полносвязный слой, который принимает на вход input layer и имеет 100 нейронов.
# нелинейная функция - сигмоида как в логистической регрессии
# слоям тоже можно давать имена, но это необязательно
dense_1 = lasagne.layers.DenseLayer(input_layer,num_units=50,
nonlinearity = lasagne.nonlinearities.sigmoid,
name = "hidden_dense_layer")
#ВЫХОДНОЙ полносвязный слой, который принимает на вход dense_1 и имеет 10 нейронов -по нейрону на цифру
#нелинейность - softmax - чтобы вероятности всех цифр давали в сумме 1
dense_output = lasagne.layers.DenseLayer(dense_1,num_units = 10,
nonlinearity = lasagne.nonlinearities.softmax,
name='output')
#предсказание нейронки (theano-преобразование)
y_predicted = lasagne.layers.get_output(dense_output)
#все веса нейронки (shared-переменные)
all_weights = lasagne.layers.get_all_params(dense_output)
print (all_weights)
#функция ошибки - средняя кроссэнтропия
loss = lasagne.objectives.categorical_crossentropy(y_predicted,target_y).mean()
accuracy = lasagne.objectives.categorical_accuracy(y_predicted,target_y).mean()
#сразу посчитать словарь обновлённых значений с шагом по градиенту, как раньше
updates_sgd = lasagne.updates.rmsprop(loss, all_weights,learning_rate=0.01)
#функция, которая обучает сеть на 1 шаг и возвращащет значение функции потерь и точности
train_fun = theano.function([input_X,target_y],[loss,accuracy],updates= updates_sgd)
#функция, которая считает точность
accuracy_fun = theano.function([input_X,target_y],accuracy)
# вспомогательная функция, которая возвращает список мини-батчей для обучения нейронки
#на вход
# X - тензор из картинок размером (много, 1, 28, 28), например - X_train
# y - вектор из чиселок - ответов для каждой картинки из X; например - Y_train
#batch_size - одно число - желаемый размер группы
#что нужно сделать
# 1) перемешать данные
# - важно перемешать X и y одним и тем же образом, чтобы сохранить соответствие картинки ответу на неё
# 3) побить данные на подгруппы так, чтобы в каждой подгруппе было batch_size картинок и ответов
# - если число картинок не делится на batch_size, одну подгруппу можно вернуть другого размера
# 4) вернуть список (или итератор) пар:
# - (подгруппа картинок, ответы из y на эту подгруппу)
def iterate_minibatches(X, y, batchsize):
return X_minibatches, Y_minibatches # можно сделать списки, а ещё лучше - генератором через yield
#
#
#
#
#
#
#
# Всё плохо и ты не понимаешь, что от тебя хотят?
# можешь поискать похожую функцию в примере
# https://github.com/Lasagne/Lasagne/blob/master/examples/mnist.py
import time
num_epochs = 100 #количество проходов по данным
batch_size = 50 #размер мини-батча
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train,batch_size):
inputs, targets = batch
train_err_batch, train_acc_batch= train_fun(inputs, targets)
train_err += train_err_batch
train_acc += train_acc_batch
train_batches += 1
# And a full pass over the validation data:
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batch_size):
inputs, targets = batch
val_acc += accuracy_fun(inputs, targets)
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration):\t\t{:.6f}".format(train_err / train_batches))
print(" train accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500):
inputs, targets = batch
acc = accuracy_fun(inputs, targets)
test_acc += acc
test_batches += 1
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
if test_acc / test_batches * 100 > 99:
print ("Achievement unlocked: колдун 80 уровня")
else:
print ("Нужно больше магии!")
from mnist import load_dataset
X_train,y_train,X_val,y_val,X_test,y_test = load_dataset()
print (X_train.shape,y_train.shape)
import lasagne
input_X = T.tensor4("X")
#размерность входа (None означает "может изменяться")
input_shape = [None,1,28,28]
target_y = T.vector("target Y integer",dtype='int32')
#входной слой (вспомогательный)
input_layer = lasagne.layers.InputLayer(shape = input_shape,input_var=input_X)
<моя архитектура>
#ВЫХОДНОЙ полносвязный слой, который принимает на вход dense_1 и имеет 10 нейронов -по нейрону на цифру
#нелинейность - softmax - чтобы вероятности всех цифр давали в сумме 1
dense_output = lasagne.layers.DenseLayer(<предвыходной_слой>,num_units = 10,
nonlinearity = lasagne.nonlinearities.softmax,
name='output')
#предсказание нейронки (theano-преобразование)
y_predicted = lasagne.layers.get_output(dense_output)
#все веса нейронки (shared-переменные)
all_weights = lasagne.layers.get_all_params(dense_output)
print (all_weights)
#функция ошибки - средняя кроссэнтропия
loss = lasagne.objectives.categorical_crossentropy(y_predicted,target_y).mean()
#<возможно добавить регуляризатор>
accuracy = lasagne.objectives.categorical_accuracy(y_predicted,target_y).mean()
#сразу посчитать словарь обновлённых значений с шагом по градиенту, как раньше
updates_sgd = <поиграться с методами>
#функция, которая обучает сеть на 1 шаг и возвращащет значение функции потерь и точности
train_fun = theano.function([input_X,target_y],[loss,accuracy],updates= updates_sgd)
#функция, которая считает точность
accuracy_fun = theano.function([input_X,target_y],accuracy)
#итерации обучения
num_epochs = сколько_эпох #количество проходов по данным
batch_size = сколько_картинок_в_минибатче #размер мини-батча
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train,batch_size):
inputs, targets = batch
train_err_batch, train_acc_batch= train_fun(inputs, targets)
train_err += train_err_batch
train_acc += train_acc_batch
train_batches += 1
# And a full pass over the validation data:
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, batch_size):
inputs, targets = batch
val_acc += accuracy_fun(inputs, targets)
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration):\t\t{:.6f}".format(train_err / train_batches))
print(" train accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500):
inputs, targets = batch
acc = accuracy_fun(inputs, targets)
test_acc += acc
test_batches += 1
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: theano teaser
Step2: Как оно работает?
Step3: Компиляция
Step4: хинт для отладки
Step5: Для отладки желательно уменьшить масштаб задачи. Если вы планировали послать на вход вектор из 10^9 примеров, пошлите 10~100.
Step6: Shared variables
Step7: Теперь сам
Step8: T.grad, самое вкусное
Step9: теперь сам
Step10: Последний штрих - Updates
Step11: Логистическая регрессия
Step12: lasagne
Step13: Так задаётся архитектура нейронки
Step14: дальше вы могли бы просто
Step15: Вот и всё, пошли её учить
Step16: Процесс обучения
Step17: Нейронка твоей мечты
|
2,842
|
<ASSISTANT_TASK:>
Python Code:
import os
import csv
import cv2
import matplotlib.pyplot as plt
import random
import pprint
import numpy as np
from numpy import expand_dims
%tensorflow_version 1.x
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from keras import backend as K
from keras.models import Model, Sequential
from keras.models import load_model
from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from google.colab.patches import cv2_imshow
import sklearn
from sklearn.model_selection import train_test_split
import pandas as pd
print("Tensorflow Version:",tf.__version__)
print("Tensorflow Keras Version:",tf.keras.__version__)
print("Eager mode: ", tf.executing_eagerly())
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
# Raise SystemError('GPU device not found')
print('GPU device not found')
else:
print('Found GPU at: {}'.format(device_name))
# GPU count and name
!nvidia-smi -L
# Download the dataset
!curl -O https://selbystorage.s3-us-west-2.amazonaws.com/research/office_3/office_3.tar.gz
data_set = 'office_3'
tar_file = data_set + '.tar.gz'
# Unzip the .tgz file
# -x for extract
# -v for verbose
# -z for gnuzip
# -f for file (should come at last just before file name)
# -C to extract the zipped contents to a different directory
!tar -xvzf $tar_file
# Define path to csv file
csv_path = data_set + '/interpolated.csv'
# Load the CSV file into a pandas dataframe
df = pd.read_csv(csv_path, sep=",")
# Print the dimensions
print("Dataset Dimensions:")
print(df.shape)
# Print the first 5 lines of the dataframe for review
print("\nDataset Summary:")
df.head(5)
# Remove 'index' and 'frame_id' columns
df.drop(['index','frame_id'],axis=1,inplace=True)
# Verify new dataframe dimensions
print("Dataset Dimensions:")
print(df.shape)
# Print the first 5 lines of the new dataframe for review
print("\nDataset Summary:")
df.head(5)
# Detect Missing Values
print("Any Missing Values?: {}".format(df.isnull().values.any()))
# Total Sum
print("\nTotal Number of Missing Values: {}".format(df.isnull().sum().sum()))
# Sum Per Column
print("\nTotal Number of Missing Values per Column:")
print(df.isnull().sum())
# Determine if any throttle values are zeroes
print("Any 0 throttle values?: {}".format(df['speed'].eq(0).any()))
# Determine number of 0 throttle values:
print("\nNumber of 0 throttle values: {}".format(df['speed'].eq(0).sum()))
# Remove rows with 0 throttle values
if df['speed'].eq(0).any():
df = df.query('speed != 0')
# Reset the index
df.reset_index(inplace=True,drop=True)
# Verify new dataframe dimensions
print("\nNew Dataset Dimensions:")
print(df.shape)
df.head(5)
# Steering Command Statistics
print("\nSteering Command Statistics:")
print(df['angle'].describe())
print("\nThrottle Command Statistics:")
# Throttle Command Statistics
print(df['speed'].describe())
#@title Select the number of histogram bins
num_bins = 25 #@param {type:"slider", min:5, max:50, step:1}
hist, bins = np.histogram(df['angle'], num_bins)
center = (bins[:-1]+ bins[1:]) * 0.5
plt.bar(center, hist, width=0.05)
#plt.plot((np.min(df['angle']), np.max(df['angle'])), (samples_per_bin, samples_per_bin))
# Normalize the histogram (150-300 for RBG)
#@title Normalize the Histogram { run: "auto" }
hist = False #@param {type:"boolean"}
remove_list = []
samples_per_bin = 150
if hist:
for j in range(num_bins):
list_ = []
for i in range(len(df['angle'])):
if df.loc[i,'angle'] >= bins[j] and df.loc[i,'angle'] <= bins[j+1]:
list_.append(i)
random.shuffle(list_)
list_ = list_[samples_per_bin:]
remove_list.extend(list_)
print('removed:', len(remove_list))
df.drop(df.index[remove_list], inplace=True)
df.reset_index(inplace=True)
df.drop(['index'],axis=1,inplace=True)
print('remaining:', len(df))
hist, _ = np.histogram(df['angle'], (num_bins))
plt.bar(center, hist, width=0.05)
plt.plot((np.min(df['angle']), np.max(df['angle'])), (samples_per_bin, samples_per_bin))
# View a Single Image
index = random.randint(0,df.shape[0]-1)
img_name = data_set + '/' + df.loc[index,'filename']
angle = df.loc[index,'angle']
center_image = cv2.imread(img_name)
center_image_mod = cv2.resize(center_image, (320,180))
center_image_mod = cv2.cvtColor(center_image_mod,cv2.COLOR_RGB2BGR)
# Crop the image
height_min = 100
height_max = 170
width_min = 120
width_max = 200
bottom_crop = center_image_mod.shape[0] - height_max
right_crop = center_image_mod.shape[1] - width_max
crop_img = center_image_mod[height_min:height_max, width_min:width_max]
plt.subplot(2,1,1)
plt.imshow(center_image_mod)
plt.grid(False)
plt.xlabel('angle: {:.2}'.format(angle))
plt.show()
plt.subplot(2,1,2)
plt.imshow(crop_img)
plt.grid(False)
plt.xlabel('angle: {:.2}'.format(angle))
plt.show()
# Number of Images to Display
num_images = 4
# Display the images
i = 0
for i in range (i,num_images):
index = random.randint(0,df.shape[0]-1)
image_path = df.loc[index,'filename']
angle = df.loc[index,'angle']
img_name = data_set + '/' + image_path
image = cv2.imread(img_name)
image = cv2.resize(image, (320,180))
image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
plt.subplot(num_images/2,num_images/2,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.binary)
plt.xlabel('angle: {:.3}'.format(angle))
i += 1
# Create image data augmentation generator and choose augmentation types
datagen = ImageDataGenerator(
#rotation_range=20,
zoom_range=0.15,
#width_shift_range=0.1,
#height_shift_range=0.2,
#shear_range=10,
brightness_range=[0.5,1.0],
#horizontal_flip=True,
#vertical_flip=True,
#channel_shift_range=100.0,
fill_mode="reflect")
# load the image
index = random.randint(0,df.shape[0]-1)
img_name = data_set + '/' + df.loc[index,'filename']
original_image = cv2.imread(img_name)
original_image = cv2.cvtColor(original_image,cv2.COLOR_RGB2BGR)
original_image = cv2.resize(original_image, (320,180))
label = df.loc[index,'angle']
# convert to numpy array
data = img_to_array(original_image)
# expand dimension to one sample
test = expand_dims(data, 0)
# prepare iterator
it = datagen.flow(test, batch_size=1)
# generate batch of images
batch = it.next()
# convert to unsigned integers for viewing
image_aug = batch[0].astype('uint8')
print("Augmenting a Single Image: \n")
plt.subplot(2,1,1)
plt.imshow(original_image)
plt.grid(False)
plt.xlabel('angle: {:.2}'.format(label))
plt.show()
plt.subplot(2,1,2)
plt.imshow(image_aug)
plt.grid(False)
plt.xlabel('angle: {:.2}'.format(label))
plt.show()
print("Multiple Augmentations: \n")
# generate samples and plot
for i in range(0,num_images):
# define subplot
plt.subplot(num_images/2,num_images/2,i+1)
# generate batch of images
batch = it.next()
# convert to unsigned integers for viewing
image = batch[0].astype('uint8')
# plot raw pixel data
plt.imshow(image)
# show the figure
plt.show()
def generator(samples, batch_size=32, aug=0):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset + batch_size]
#print(batch_samples)
images = []
angles = []
for batch_sample in batch_samples:
if batch_sample[5] != "filename":
name = data_set + '/' + batch_sample[3]
center_image = cv2.imread(name)
center_image = cv2.cvtColor(center_image,cv2.COLOR_RGB2BGR)
center_image = cv2.resize(
center_image,
(320, 180)) #resize from 720x1280 to 180x320
angle = float(batch_sample[4])
if not aug:
images.append(center_image)
angles.append(angle)
else:
data = img_to_array(center_image)
sample = expand_dims(data, 0)
it = datagen.flow(sample, batch_size=1)
batch = it.next()
image_aug = batch[0].astype('uint8')
if random.random() < .5:
image_aug = np.fliplr(image_aug)
angle = -1 * angle
images.append(image_aug)
angles.append(angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
samples = []
samples = df.values.tolist()
sklearn.utils.shuffle(samples)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print("Number of traing samples: ", len(train_samples))
print("Number of validation samples: ", len(validation_samples))
batch_size_value = 32
img_aug = 1
train_generator = generator(train_samples, batch_size=batch_size_value, aug=img_aug)
validation_generator = generator(
validation_samples, batch_size=batch_size_value, aug=0)
# Initialize the model
model = Sequential()
# trim image to only see section with road
# (top_crop, bottom_crop), (left_crop, right_crop)
model.add(Cropping2D(cropping=((height_min,bottom_crop), (width_min,right_crop)), input_shape=(180,320,3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# Nvidia model
model.add(Convolution2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2)))
model.add(Convolution2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2)))
model.add(Convolution2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(loss='mse', optimizer=Adam(lr=0.001), metrics=['mse','mae','mape','cosine'])
# Print model sumamry
model.summary()
# checkpoint
model_path = './model'
!if [ -d $model_path ]; then echo 'Directory Exists'; else mkdir $model_path; fi
filepath = model_path + "/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
# The patience parameter is the amount of epochs to check for improvement
early_stop = EarlyStopping(monitor='val_loss', patience=10)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
# Clear any logs from previous runs
!rm -rf ./Graph/
# Launch Tensorboard
!pip install -U tensorboardcolab
from tensorboardcolab import *
tbc = TensorBoardColab()
# Configure the Tensorboard Callback
tbCallBack = TensorBoard(log_dir='./Graph',
histogram_freq=1,
write_graph=True,
write_grads=True,
write_images=True,
batch_size=batch_size_value,
update_freq='epoch')
load = True #@param {type:"boolean"}
if load:
# Returns a compiled model identical to the previous one
!curl -O https://selbystorage.s3-us-west-2.amazonaws.com/research/office_3/model_intensity.h5
!mv model_intensity.h5 model/
model_path_full = model_path + '/' + 'model_intensity.h5'
model = load_model(model_path_full)
print("Loaded previous model: {} \n".format(model_path_full))
else:
print("No previous model loaded \n")
# Define step sizes
STEP_SIZE_TRAIN = len(train_samples) / batch_size_value
STEP_SIZE_VALID = len(validation_samples) / batch_size_value
# Define number of epochs
n_epoch = 50
# Define callbacks
# callbacks_list = [TensorBoardColabCallback(tbc)]
# callbacks_list = [TensorBoardColabCallback(tbc), early_stop]
# callbacks_list = [TensorBoardColabCallback(tbc), early_stop, checkpoint]
callbacks_list = [TensorBoardColabCallback(tbc), early_stop, checkpoint, reduce_lr]
# Fit the model
history_object = model.fit_generator(
generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=validation_generator,
validation_steps=STEP_SIZE_VALID,
callbacks=callbacks_list,
use_multiprocessing=True,
epochs=n_epoch)
# Save model
model_path_full = model_path + '/'
model.save(model_path_full + 'model.h5')
with open(model_path_full + 'model.json', 'w') as output_json:
output_json.write(model.to_json())
# Plot the training and validation loss for each epoch
print('Generating loss chart...')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig(model_path + '/model.png')
# Done
print('Done.')
scores = model.evaluate_generator(validation_generator, STEP_SIZE_VALID, use_multiprocessing=True)
metrics_names = model.metrics_names
for i in range(len(model.metrics_names)):
print("Metric: {} - {}".format(metrics_names[i],scores[i]))
# Define image loading function
def load_images(dataframe):
# initialize images array
images = []
for i in dataframe.index.values:
name = data_set + '/' + dataframe.loc[i,'filename']
center_image = cv2.imread(name)
center_image = cv2.resize(center_image, (320,180))
images.append(center_image)
return np.array(images)
# Load images
test_size = 200
df_test = df.sample(frac=1).reset_index(drop=True)
df_test = df_test.head(test_size)
test_images = load_images(df_test)
batch_size = 32
preds = model.predict(test_images, batch_size=batch_size, verbose=1)
#print("Preds: {} \n".format(preds))
testY = df_test.iloc[:,4].values
#print("Labels: {} \n".format(testY))
df_testY = pd.Series(testY)
df_preds = pd.Series(preds.flatten)
# Replace 0 angle values
if df_testY.eq(0).any():
df_testY.replace(0, 0.0001,inplace=True)
# Calculate the difference
diff = preds.flatten() - df_testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std))
# Compute the mean and standard deviation of the difference
print(diff.describe())
# Plot a histogram of the prediction errors
num_bins = 25
hist, bins = np.histogram(diff, num_bins)
center = (bins[:-1]+ bins[1:]) * 0.5
plt.bar(center, hist, width=0.05)
plt.title('Historgram of Predicted Error')
plt.xlabel('Steering Angle')
plt.ylabel('Number of predictions')
plt.xlim(-2.0, 2.0)
plt.plot(np.min(diff), np.max(diff))
# Plot a Scatter Plot of the Error
plt.scatter(testY, preds)
plt.xlabel('True Values ')
plt.ylabel('Predictions ')
plt.axis('equal')
plt.axis('square')
plt.xlim([-1.75,1.75])
plt.ylim([-1.75,1.75])
plt.plot([-1.75, 1.75], [-1.75, 1.75], color='k', linestyle='-', linewidth=.1)
# Plot the image with the actual and predicted steering angle
index = random.randint(0,df_test.shape[0]-1)
img_name = data_set + '/' + df_test.loc[index,'filename']
center_image = cv2.imread(img_name)
center_image = cv2.cvtColor(center_image,cv2.COLOR_RGB2BGR)
center_image_mod = cv2.resize(center_image, (320,180))
plt.imshow(center_image_mod)
plt.grid(False)
plt.xlabel('Actual: {:.2f} Predicted: {:.2f}'.format(df_test.loc[index,'angle'],float(preds[index])))
plt.show()
model.summary()
# Creating a mapping of layer name ot layer details
# We will create a dictionary layers_info which maps a layer name to its charcteristics
layers_info = {}
for i in model.layers:
layers_info[i.name] = i.get_config()
# Here the layer_weights dictionary will map every layer_name to its corresponding weights
layer_weights = {}
for i in model.layers:
layer_weights[i.name] = i.get_weights()
pprint.pprint(layers_info['conv_5'])
# Visualize the first filter of each convolution layer
layers = model.layers
layer_ids = [2,3,4,6,7]
#plot the filters
fig,ax = plt.subplots(nrows=1,ncols=5)
for i in range(5):
ax[i].imshow(layers[layer_ids[i]].get_weights()[0][:,:,:,0][:,:,0],cmap='gray')
ax[i].set_title('Conv'+str(i+1))
ax[i].set_xticks([])
ax[i].set_yticks([])
!pip install -I scipy==1.2.*
!pip install git+https://github.com/raghakot/keras-vis.git -U
# import specific functions from keras-vis package
from vis.utils import utils
from vis.visualization import visualize_saliency, visualize_cam, overlay
# View a Single Image
index = random.randint(0,df.shape[0]-1)
img_name = data_set + '/' + df.loc[index,'filename']
sample_image = cv2.imread(img_name)
sample_image = cv2.cvtColor(sample_image,cv2.COLOR_RGB2BGR)
sample_image_mod = cv2.resize(sample_image, (320,180))
plt.imshow(sample_image_mod)
layer_idx = utils.find_layer_idx(model, 'conv_5')
grads = visualize_saliency(model,
layer_idx,
filter_indices=None,
seed_input=sample_image_mod,
grad_modifier='absolute',
backprop_modifier='guided')
plt.imshow(grads, alpha = 0.6)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Confirm TensorFlow can see the GPU
Step2: Load the Dataset
Step3: Parse the CSV File
Step4: Clean and Pre-process the Dataset
Step5: Detect Missing Data
Step6: Remove Zero Throttle Values
Step7: View Label Statistics
Step8: View Histogram of Steering Commands
Step9: View a Sample Image
Step10: View Multiple Images
Step11: Split the Dataset
Step12: View Image Augmentation Examples
Step13: Define a Data Generator
Step14: Split the Dataset
Step15: Define Training and Validation Data Generators
Step16: Compile and Train the Model
Step17: Setup Checkpoints
Step18: Setup Early Stopping to Prevent Overfitting
Step19: Reduce Learning Rate When a Metric has Stopped Improving
Step20: Setup Tensorboard
Step21: Load Existing Model
Step22: Train the Model
Step23: Save the Model
Step24: Evaluate the Model
Step25: Print Performance Metrics
Step26: Compute Prediction Statistics
Step27: Plot a Prediction
Step28: Visualize the Network
Step29: Access Individual Layers
Step30: Visualize the Filters
Step31: Visualize the Saliency Map
|
2,843
|
<ASSISTANT_TASK:>
Python Code:
!pip3 install tensorflow_hub
%%bash
pip install --upgrade tensorflow
# Import helpful libraries and setup our project, bucket, and region
import os
import tensorflow as tf
import tensorflow_hub as hub
# PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
# BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
# REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
PROJECT = "qwiklabs-gcp-cbc8684b07fc2dbd" # REPLACE WITH YOUR PROJECT ID
BUCKET = "qwiklabs-gcp-cbc8684b07fc2dbd-bucket" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-east1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.13"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/hybrid_recommendation/preproc; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical set of preprocessed files if you didn't do preprocessing notebook
gsutil -m cp -R gs://cloud-training-demos/courses/machine_learning/deepdive/10_recommendation/hybrid_recommendation gs://${BUCKET}
fi
from tensorflow.python.lib.io import file_io
# Get number of content ids from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/content_id_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_content_ids = int([x for x in ifp][0])
print("number_of_content_ids = {}".format(number_of_content_ids))
# Get number of categories from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/category_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_categories = int([x for x in ifp][0])
print("number_of_categories = {}".format(number_of_categories))
# Get number of authors from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/author_vocab_count.txt*".format(BUCKET))[0], mode = 'r') as ifp:
number_of_authors = int([x for x in ifp][0])
print("number_of_authors = {}".format(number_of_authors))
# Get mean months since epoch from text file in Google Cloud Storage
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocab_counts/months_since_epoch_mean.txt*".format(BUCKET))[0], mode = 'r') as ifp:
mean_months_since_epoch = float([x for x in ifp][0])
print("mean_months_since_epoch = {}".format(mean_months_since_epoch))
# Determine CSV and label columns
NON_FACTOR_COLUMNS = "next_content_id,visitor_id,content_id,category,title,author,months_since_epoch".split(',')
FACTOR_COLUMNS = ["user_factor_{}".format(i) for i in range(10)] + ["item_factor_{}".format(i) for i in range(10)]
CSV_COLUMNS = NON_FACTOR_COLUMNS + FACTOR_COLUMNS
LABEL_COLUMN = "next_content_id"
# Set default values for each CSV column
NON_FACTOR_DEFAULTS = [["Unknown"],["Unknown"],["Unknown"],["Unknown"],["Unknown"],["Unknown"],[mean_months_since_epoch]]
FACTOR_DEFAULTS = [[0.0] for i in range(10)] + [[0.0] for i in range(10)] # user and item
DEFAULTS = NON_FACTOR_DEFAULTS + FACTOR_DEFAULTS
# Create input function for train and eval
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(records = value_column, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename = filename)
# Create dataset from file list
dataset = tf.data.TextLineDataset(filenames = file_list).map(map_func = decode_csv)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(count = num_epochs).batch(batch_size = batch_size)
return dataset.make_one_shot_iterator().get_next()
return _input_fn
# Create feature columns to be used in model
def create_feature_columns(args):
# Create content_id feature column
content_id_column = tf.feature_column.categorical_column_with_hash_bucket(
key = "content_id",
hash_bucket_size = number_of_content_ids)
# Embed content id into a lower dimensional representation
embedded_content_column = tf.feature_column.embedding_column(
categorical_column = content_id_column,
dimension = args["content_id_embedding_dimensions"])
# Create category feature column
categorical_category_column = tf.feature_column.categorical_column_with_vocabulary_file(
key = "category",
vocabulary_file = tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/category_vocab.txt*".format(args["bucket"]))[0],
num_oov_buckets = 1)
# Convert categorical category column into indicator column so that it can be used in a DNN
indicator_category_column = tf.feature_column.indicator_column(categorical_column = categorical_category_column)
# Create title feature column using TF Hub
embedded_title_column = hub.text_embedding_column(
key = "title",
module_spec = "https://tfhub.dev/google/nnlm-de-dim50-with-normalization/1",
trainable = False)
# Create author feature column
author_column = tf.feature_column.categorical_column_with_hash_bucket(
key = "author",
hash_bucket_size = number_of_authors + 1)
# Embed author into a lower dimensional representation
embedded_author_column = tf.feature_column.embedding_column(
categorical_column = author_column,
dimension = args["author_embedding_dimensions"])
# Create months since epoch boundaries list for our binning
months_since_epoch_boundaries = list(range(400, 700, 20))
# Create months_since_epoch feature column using raw data
months_since_epoch_column = tf.feature_column.numeric_column(
key = "months_since_epoch")
# Create bucketized months_since_epoch feature column using our boundaries
months_since_epoch_bucketized = tf.feature_column.bucketized_column(
source_column = months_since_epoch_column,
boundaries = months_since_epoch_boundaries)
# Cross our categorical category column and bucketized months since epoch column
crossed_months_since_category_column = tf.feature_column.crossed_column(
keys = [categorical_category_column, months_since_epoch_bucketized],
hash_bucket_size = len(months_since_epoch_boundaries) * (number_of_categories + 1))
# Convert crossed categorical category and bucketized months since epoch column into indicator column so that it can be used in a DNN
indicator_crossed_months_since_category_column = tf.feature_column.indicator_column(
categorical_column = crossed_months_since_category_column)
# Create user and item factor feature columns from our trained WALS model
user_factors = [tf.feature_column.numeric_column(key = "user_factor_" + str(i)) for i in range(10)]
item_factors = [tf.feature_column.numeric_column(key = "item_factor_" + str(i)) for i in range(10)]
# Create list of feature columns
feature_columns = [embedded_content_column,
embedded_author_column,
indicator_category_column,
embedded_title_column,
indicator_crossed_months_since_category_column] + user_factors + item_factors
return feature_columns
# Create custom model function for our custom estimator
def model_fn(features, labels, mode, params):
# Create neural network input layer using our feature columns defined above
net = tf.feature_column.input_layer(features = features, feature_columns = params["feature_columns"])
# Create hidden layers by looping through hidden unit list
for units in params["hidden_units"]:
net = tf.layers.dense(inputs = net, units = units, activation = tf.nn.relu)
# Compute logits (1 per class) using the output of our last hidden layer
logits = tf.layers.dense(inputs = net, units = params["n_classes"], activation = None)
# Find the predicted class indices based on the highest logit (which will result in the highest probability)
predicted_classes = tf.argmax(input = logits, axis = 1)
# Read in the content id vocabulary so we can tie the predicted class indices to their respective content ids
with file_io.FileIO(tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/content_id_vocab.txt*".format(BUCKET))[0], mode = "r") as ifp:
content_id_names = tf.constant(value = [x.rstrip() for x in ifp])
# Gather predicted class names based predicted class indices
predicted_class_names = tf.gather(params = content_id_names, indices = predicted_classes)
# If the mode is prediction
if mode == tf.estimator.ModeKeys.PREDICT:
# Create predictions dict
predictions_dict = {
"class_ids": tf.expand_dims(input = predicted_classes, axis = -1),
"class_names" : tf.expand_dims(input = predicted_class_names, axis = -1),
"probabilities": tf.nn.softmax(logits = logits),
"logits": logits
}
# Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
return tf.estimator.EstimatorSpec( # return early since we"re done with what we need for prediction mode
mode = mode,
predictions = predictions_dict,
loss = None,
train_op = None,
eval_metric_ops = None,
export_outputs = export_outputs)
# Continue on with training and evaluation modes
# Create lookup table using our content id vocabulary
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file = tf.gfile.Glob(filename = "gs://{}/hybrid_recommendation/preproc/vocabs/content_id_vocab.txt*".format(BUCKET))[0])
# Look up labels from vocabulary table
labels = table.lookup(keys = labels)
# Compute loss using sparse softmax cross entropy since this is classification and our labels (content id indices) and probabilities are mutually exclusive
loss = tf.losses.sparse_softmax_cross_entropy(labels = labels, logits = logits)
# If the mode is evaluation
if mode == tf.estimator.ModeKeys.EVAL:
# Compute evaluation metrics of total accuracy and the accuracy of the top k classes
accuracy = tf.metrics.accuracy(labels = labels, predictions = predicted_classes, name = "acc_op")
top_k_accuracy = tf.metrics.mean(values = tf.nn.in_top_k(predictions = logits, targets = labels, k = params["top_k"]))
map_at_k = tf.metrics.average_precision_at_k(labels = labels, predictions = predicted_classes, k = params["top_k"])
# Put eval metrics into a dictionary
eval_metric_ops = {
"accuracy": accuracy,
"top_k_accuracy": top_k_accuracy,
"map_at_k": map_at_k}
# Create scalar summaries to see in TensorBoard
tf.summary.scalar(name = "accuracy", tensor = accuracy[1])
tf.summary.scalar(name = "top_k_accuracy", tensor = top_k_accuracy[1])
tf.summary.scalar(name = "map_at_k", tensor = map_at_k[1])
return tf.estimator.EstimatorSpec( # return early since we"re done with what we need for evaluation mode
mode = mode,
predictions = None,
loss = loss,
train_op = None,
eval_metric_ops = eval_metric_ops,
export_outputs = None)
# Continue on with training mode
# If the mode is training
assert mode == tf.estimator.ModeKeys.TRAIN
# Create a custom optimizer
optimizer = tf.train.AdagradOptimizer(learning_rate = params["learning_rate"])
# Create train op
train_op = optimizer.minimize(loss = loss, global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec( # final return since we"re done with what we need for training mode
mode = mode,
predictions = None,
loss = loss,
train_op = train_op,
eval_metric_ops = None,
export_outputs = None)
# Create serving input function
def serving_input_fn():
feature_placeholders = {
colname : tf.placeholder(dtype = tf.string, shape = [None]) \
for colname in NON_FACTOR_COLUMNS[1:-1]
}
feature_placeholders["months_since_epoch"] = tf.placeholder(dtype = tf.float32, shape = [None])
for colname in FACTOR_COLUMNS:
feature_placeholders[colname] = tf.placeholder(dtype = tf.float32, shape = [None])
features = {
key: tf.expand_dims(tensor, -1) \
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Create train and evaluate loop to combine all of the pieces together.
tf.logging.set_verbosity(tf.logging.INFO)
def train_and_evaluate(args):
estimator = tf.estimator.Estimator(
model_fn = model_fn,
model_dir = args["output_dir"],
params = {
"feature_columns": create_feature_columns(args),
"hidden_units": args["hidden_units"],
"n_classes": number_of_content_ids,
"learning_rate": args["learning_rate"],
"top_k": args["top_k"],
"bucket": args["bucket"]
}
)
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset(filename = args["train_data_paths"], mode = tf.estimator.ModeKeys.TRAIN, batch_size = args["batch_size"]),
max_steps = args["train_steps"])
exporter = tf.estimator.LatestExporter(name = "exporter", serving_input_receiver_fn = serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset(filename = args["eval_data_paths"], mode = tf.estimator.ModeKeys.EVAL, batch_size = args["batch_size"]),
steps = None,
start_delay_secs = args["start_delay_secs"],
throttle_secs = args["throttle_secs"],
exporters = exporter)
tf.estimator.train_and_evaluate(estimator = estimator, train_spec = train_spec, eval_spec = eval_spec)
# Call train and evaluate loop
import shutil
outdir = "hybrid_recommendation_trained"
shutil.rmtree(path = outdir, ignore_errors = True) # start fresh each time
arguments = {
"bucket": BUCKET,
"train_data_paths": "gs://{}/hybrid_recommendation/preproc/features/train.csv*".format(BUCKET),
"eval_data_paths": "gs://{}/hybrid_recommendation/preproc/features/eval.csv*".format(BUCKET),
"output_dir": outdir,
"batch_size": 128,
"learning_rate": 0.1,
"hidden_units": [256, 128, 64],
"content_id_embedding_dimensions": 10,
"author_embedding_dimensions": 10,
"top_k": 10,
"train_steps": 1000,
"start_delay_secs": 30,
"throttle_secs": 30
}
train_and_evaluate(arguments)
%%writefile requirements.txt
tensorflow_hub
%%bash
echo "bucket=${BUCKET}"
rm -rf hybrid_recommendation_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/hybrid_recommendations_module
python -m trainer.task \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=60
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/small_trained_model
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=30
%%writefile hyperparam.yaml
trainingInput:
hyperparameters:
goal: MAXIMIZE
maxTrials: 5
maxParallelTrials: 1
hyperparameterMetricTag: accuracy
params:
- parameterName: batch_size
type: INTEGER
minValue: 8
maxValue: 64
scaleType: UNIT_LINEAR_SCALE
- parameterName: learning_rate
type: DOUBLE
minValue: 0.01
maxValue: 0.1
scaleType: UNIT_LINEAR_SCALE
- parameterName: hidden_units
type: CATEGORICAL
categoricalValues: ["1024 512 256", "1024 512 128", "1024 256 128", "512 256 128", "1024 512 64", "1024 256 64", "512 256 64", "1024 128 64", "512 128 64", "256 128 64", "1024 512 32", "1024 256 32", "512 256 32", "1024 128 32", "512 128 32", "256 128 32", "1024 64 32", "512 64 32", "256 64 32", "128 64 32"]
- parameterName: content_id_embedding_dimensions
type: INTEGER
minValue: 5
maxValue: 250
scaleType: UNIT_LOG_SCALE
- parameterName: author_embedding_dimensions
type: INTEGER
minValue: 5
maxValue: 30
scaleType: UNIT_LINEAR_SCALE
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/hypertuning
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
--config=hyperparam.yaml \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=1000 \
--start_delay_secs=30 \
--throttle_secs=30
%%bash
OUTDIR=gs://${BUCKET}/hybrid_recommendation/big_trained_model
JOBNAME=hybrid_recommendation_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=$(pwd)/hybrid_recommendations_module/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version=$TFVERSION \
-- \
--bucket=${BUCKET} \
--train_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/train.csv* \
--eval_data_paths=gs://${BUCKET}/hybrid_recommendation/preproc/features/eval.csv* \
--output_dir=${OUTDIR} \
--batch_size=128 \
--learning_rate=0.1 \
--hidden_units="256 128 64" \
--content_id_embedding_dimensions=10 \
--author_embedding_dimensions=10 \
--top_k=10 \
--train_steps=10000 \
--start_delay_secs=30 \
--throttle_secs=30
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now reset the notebook's session kernel! Since we're no longer using Cloud Dataflow, we'll be using the python3 kernel from here on out so don't forget to change the kernel if it's still python2.
Step2: <h2> Create hybrid recommendation system model using TensorFlow </h2>
Step3: Create input function for training and evaluation to read from our preprocessed CSV files.
Step4: Next, we will create our feature columns using our read in features.
Step5: Now we'll create our model function
Step6: Now create a serving input function
Step7: Now that all of the pieces are assembled let's create and run our train and evaluate loop
Step8: Run train_and_evaluate!
Step9: Run on module locally
Step10: Run on Google Cloud AI Platform
Step11: Let's add some hyperparameter tuning!
Step12: Now that we know the best hyperparameters, run a big training job!
|
2,844
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
pd.set_option('display.max_columns', 500)
df = sns.load_dataset('titanic')
# Write the code to look at the head of the dataframe
# Create a histogram to examine age distribution of the passengers.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(df['age'], bins = 10, range = (df['age'].min(),df['age'].max()))
plt.title('Age distribution')
plt.xlabel('Age')
plt.ylabel('Count of Passengers')
plt.show()
# What is a factorplot? Check the documentation! Which data are we using? What is the count a count of?
g = sns.factorplot("alive", col="deck", col_wrap=4,
data=df[df.deck.notnull()], kind="count", size=4, aspect=.8)
# Try your own variation of the factorplot above.
# Draw a nested barplot to show survival for class and sex
g = sns.factorplot(x="CHANGE TO THE CORRECT FEATURE",
y="CHANGE TO THE CORRECT FEATURE",
hue="CHANGE TO THE CORRECT FEATURE",
data=df,
size=6, kind="bar", palette="muted")
g.despine(left=True)
g.set_ylabels("survival probability")
g = sns.factorplot(x="CHANGE TO THE CORRECT FEATURE",
y="CHANGE TO THE CORRECT FEATURE",
col="CHANGE TO THE CORRECT FEATURE",
data=df,
saturation=.5, kind="bar", ci=None,aspect=.6)
(g.set_axis_labels("", "Survival Rate").set_xticklabels(["Men", "Women", "Children"]).set_titles
("{col_name} {col_var}").set(ylim=(0, 1)).despine(left=True))
# With factorplot, make a violin plot that shows the age of the passengers at each embarkation point
# based on their class. Use the hue parameter to show the sex of the passengers
df.age = df.age.fillna(df.age.mean())
g = sns.pairplot(data=df[['survived', 'pclass', 'age', 'sibsp', 'parch', 'fare']], hue='survived', dropna=True)
# Pairplot of the crash data
g = sns.jointplot("fare", "age", df)
# Jointplot, titanic data
# Jointplot, crash data
# boxplot of the age distribution on each deck by class
# boxplot of the age distribution on each deck by class using FacetGrid
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Like scikit-learn, Seaborn has "toy" datasets available to import for exploration. This includes the Titanic data we have previously looked at. Let's load the Seaborn Titanic dataset and take a look.
Step2: As you can see, the data has been cleaned up a bit.
Step3: Factorplot
Step4: What other options can you set with a factorplot in Seaborn? Using the code above as a starting point, create some code to create a factorplot with the data above, but in a different configuration. For example- make 2 plots per column, change the colors, add a legend, change the size, etc.
Step5: As you saw in the factorplot documentation, you can specify several different types of plots in the parameters. Let's use factorplot to create a nested barplot showing passenger survival based on their class and sex. Fill in the missing pieces of the code below.
Step6: Take a look at the code below. Let's again plot passenger survival based on their class and who they were (man, woman, child) but using a different plot for each class, like what we did above for the deck information.
Step7: Factorplot has 6 different kinds of plots, we explored two of them above. Using the documentation, try out one of the remaining plot types. A suggestion is provided below. You can follow it, and/or create your own visualization.
Step8: Pairplot
Step9: The Titanic data gives an idea of what we can see with a pairplot, but it might not be the most illustrative example. Using the information provided so far, make a pairplot using the seaborn car crashes data.
Step10: Jointplot
Step11: Using either the Titanic or crash data, create some jointplots.
Step12: Bonus
|
2,845
|
<ASSISTANT_TASK:>
Python Code:
import WizardTree as wt
story = wt.CrawlComment(comment_id = "cy8z5uv")
wt.Export(story, "Output/WizardTree.json")
import WizardTree as wt
story = wt.Import("Output/WizardTree.json")
print "Title: " + story['title']
print "Author: " + story['author']
print "Url: " + story['url']
print "Contains %d events, with %d possible endings" % (
len(story['events']),
len([1 for event in story['events'].values() if len(event['actions']) == 0]))
%matplotlib inline
#Imports
import WizardTree as wt
import networkx as nx
import matplotlib.pyplot as plt
#Build graph
G = nx.DiGraph()
for event_id in story['events']:
event = story['events'][event_id]
for action_id in event['actions']:
action = story['actions'][action_id]
G.add_edge(event_id, action['consequence'])
#Plot
shells = {}
pos = {}
for node in nx.dfs_preorder_nodes(G):
depth = len(nx.ancestors(G, node))
if depth in shells:
shells[depth].append(node)
else:
shells[depth] = [node]
pos[node] = [len(shells[depth]), -depth]
plt.figure(figsize=(8,20))
nx.draw(G, pos=pos, with_labels=False)
plt.show()
#Imports
from html import HTML
#Definitions
def Truncate(text):
return text[:60] + (text[60:] and '..')
def ParseEvent(story, body, event_id, index):
event = story['events'][event_id]
div = body.div(klass = 'event')
div.a('', name = event_id)
links = div.div(klass = 'link-container')
if event['parent'] != None:
links.a('Go to Previous',
href = '#'+event['parent'],
klass = 'left-link')
else:
links.a('Go to Index', href = '#index', klass = 'left-link')
links.a('Go to Original', href = event['url'], klass = 'right-link')
div.text(event['description'], escape=False)
for action_id in event['actions']:
ParseAction(story, body, action_id, div.ul(), index)
def ParseAction(story, body, action_id, ul, index):
action = story['actions'][action_id]
href = '#'+action['consequence']
indli = index.li()
indli.a(action['description'], href = href, klass='action', style = 'display: block;', escape=False)
li = ul.li(klass = 'action')
a = li.a(href = href,
style = "display: block;",
title = '/u/'+ action['actor'])
a.span(action['description'], escape=False)
ParseEvent(story, body, action['consequence'], index)
def ParseStory(story):
#Styling
css =
body {
font-family: arial, sans-serif;
width: 40em;
margin-left:auto;
margin-right:auto;
}
p {
text-align: justiy;
-moz-hyphens: auto;
hyphens: auto;
}
.event {
margin-bottom: 1000ex;
}
.event > p {
font-family: Georgia, serif;
font-size: larger;
line-height: 1.5em;
}
.action p {
margin-top: 0.25em;
margin-bottom: 0.25em;
}
.action {
font-family: consolas, monospace;
margin: 0 0 2ex 0;
}
.link-container {
font-family: consolas, monospace;
display: flex;
}
.left-link{
width: 40ex;
}
.right-link{
flex-grow: 1;
text-align: right;
}
#Build HTML file
doc = HTML()
doc.text("<!DOCTYPE html>", escape=False)
html = doc.html()
header = html.head()
header.meta(charset = "UTF-8")
header.title(story['title'])
header.style(css, escape = False)
body = html.body()
index = HTML().ul()
#Parse tree
ParseEvent(story, body, story['start'], index)
#Add index
body.a('', name = 'index')
body.h1('Index')
body.div(index, escape = False)
#Add credits
body.a('', name = 'credits')
body.h1('Credits')
body.p("A special thanks to " + story['author'] + " for creating the story")
body.p("Also thanks to all the commenters appearing in the story:")
users_ul = body.ul()
for actor in story['actors']: users_ul.li("/u/"+actor)
body.p("Powered by python, coded by /u/XkF21WNJ.")
return doc
doc = ParseStory(story)
#Export to file
with open("Output/WizardTree.html", 'w') as f:
f.write(unicode(doc).encode('utf-8'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load story from JSON file
Step2: For the following examples, the variable story is assumed to contain the story
Step3: Visualize story structure
Step5: HTML conversion
|
2,846
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from IPython.display import clear_output
from matplotlib import pyplot as plt
# Load dataset.
dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
y_train = dftrain.pop('survived')
y_eval = dfeval.pop('survived')
import tensorflow as tf
tf.random.set_seed(123)
dftrain.head()
dftrain.describe()
dftrain.shape[0], dfeval.shape[0]
dftrain.age.hist(bins=20)
plt.show()
dftrain.sex.value_counts().plot(kind='barh')
plt.show()
dftrain['class'].value_counts().plot(kind='barh')
plt.show()
dftrain['embark_town'].value_counts().plot(kind='barh')
plt.show()
pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive')
plt.show()
CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck',
'embark_town', 'alone']
NUMERIC_COLUMNS = ['age', 'fare']
def one_hot_cat_column(feature_name, vocab):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(feature_name,
vocab))
feature_columns = []
for feature_name in CATEGORICAL_COLUMNS:
# Need to one-hot encode categorical features.
vocabulary = dftrain[feature_name].unique()
feature_columns.append(one_hot_cat_column(feature_name, vocabulary))
for feature_name in NUMERIC_COLUMNS:
feature_columns.append(tf.feature_column.numeric_column(feature_name,
dtype=tf.float32))
example = dict(dftrain.head(1))
class_fc = tf.feature_column.indicator_column(tf.feature_column.categorical_column_with_vocabulary_list('class', ('First', 'Second', 'Third')))
print('Feature value: "{}"'.format(example['class'].iloc[0]))
print('One-hot encoded: ', tf.keras.layers.DenseFeatures([class_fc])(example).numpy())
tf.keras.layers.DenseFeatures(feature_columns)(example).numpy()
# Use entire batch since this is such a small dataset.
NUM_EXAMPLES = len(y_train)
def make_input_fn(X, y, n_epochs=None, shuffle=True):
def input_fn():
dataset = tf.data.Dataset.from_tensor_slices((dict(X), y))
if shuffle:
dataset = dataset.shuffle(NUM_EXAMPLES)
# For training, cycle thru dataset as many times as need (n_epochs=None).
dataset = dataset.repeat(n_epochs)
# In memory training doesn't use batching.
dataset = dataset.batch(NUM_EXAMPLES)
return dataset
return input_fn
# Training and evaluation input functions.
train_input_fn = make_input_fn(dftrain, y_train)
eval_input_fn = make_input_fn(dfeval, y_eval, shuffle=False, n_epochs=1)
linear_est = tf.estimator.LinearClassifier(feature_columns)
# Train model.
linear_est.train(train_input_fn, max_steps=100)
# Evaluation.
result = linear_est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
# Since data fits into memory, use entire dataset per layer. It will be faster.
# Above one batch is defined as the entire dataset.
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
pred_dicts = list(est.predict(eval_input_fn))
probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts])
probs.plot(kind='hist', bins=20, title='predicted probabilities')
plt.show()
from sklearn.metrics import roc_curve
fpr, tpr, _ = roc_curve(y_eval, probs)
plt.plot(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.xlim(0,)
plt.ylim(0,)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Estimators を使用するブースティング木
Step2: データセットはトレーニングセットと評価セットで構成されています。
Step3: トレーニングセットと評価セットには、それぞれ 627 個と 264 個の例があります。
Step4: 乗船者の大半は 20 代から 30 代です。
Step5: 男性の乗船者数は女性の乗船者数の約 2 倍です。
Step6: 乗船者の大半は「3 等」の船室クラスを利用していました。
Step7: 大半の乗船者はサウサンプトンから乗船しています。
Step8: 女性は男性よりも生存する確率がはるかに高く、これは明らかにモデルの予測特徴です。
Step9: 特徴量カラムを作成して関数を入力する
Step10: 特徴量カラムが生成する変換は表示することができます。例えば、indicator_columnを単一の例で使用した場合の出力は次のようになります。
Step11: さらに、特徴量カラムの変換を全てまとめて表示することができます。
Step12: 次に、入力関数を作成する必要があります。これらはトレーニングと推論の両方のためにデータをモデルに読み込む方法を指定します。 tf.data API のfrom_tensor_slicesメソッドを使用して Pandas から直接データを読み取ります。これは小規模でインメモリのデータセットに適しています。大規模のデータセットの場合は、多様なファイル形式(csvを含む)をサポートする tf.data API を使用すると、メモリに収まりきれないデータセットも処理することができます。
Step13: モデルをトレーニングして評価する
Step14: 次に、ブースティング木モデルをトレーニングしてみましょう。ブースティング木では、回帰(BoostedTreesRegressor)と分類(BoostedTreesClassifier)をサポートします。目標は、生存か非生存かのクラスを予測することなので、BoostedTreesClassifierを使用します。
Step15: このトレーニングモデルを使用して、評価セットからある乗船者に予測を立てることができます。TensorFlow モデルは、バッチ、コレクション、または例に対してまとめて予測を立てられるように最適化されています。以前は、eval_input_fn は評価セット全体を使って定義されていました。
Step16: 最後に、結果の受信者操作特性(ROC)を見てみましょう。真陽性率と偽陽性率間のトレードオフに関し、より明確な予想を得ることができます。
|
2,847
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import random
from collections import defaultdict
import mdtraj as md
import numpy as np
import scipy.cluster.hierarchy
stride = 5
subsampled = md.load('ala2.h5', stride=stride)
print(subsampled)
distances = np.empty((subsampled.n_frames, subsampled.n_frames))
for i in range(subsampled.n_frames):
distances[i] = md.rmsd(subsampled, subsampled, i)
n_clusters = 3
linkage = scipy.cluster.hierarchy.ward(distances)
labels = scipy.cluster.hierarchy.fcluster(linkage, t=n_clusters, criterion='maxclust')
labels
mapping = defaultdict(lambda : [])
for i, label in enumerate(labels):
mapping[label].append(i)
mapping
n_leaders_per_cluster = 2
leaders = md.Trajectory(xyz=np.empty((0, subsampled.n_atoms, 3)),
topology=subsampled.topology)
leader_labels = []
for label, indices in mapping.items():
leaders = leaders.join(subsampled[np.random.choice(indices, n_leaders_per_cluster)])
leader_labels.extend([label] * n_leaders_per_cluster)
print(leaders)
print(leader_labels)
labels = []
for frame in md.iterload('ala2.h5', chunk=1):
labels.append(leader_labels[np.argmin(md.rmsd(leaders, frame, 0))])
labels = np.array(labels)
print(labels)
print(labels.shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute the pairwise RMSD between all of the frames. This requires
Step2: Now that we have the distances, we can use out favorite clustering
Step3: Now, we need to extract n_leaders random samples from each of the clusters.
Step4: Now we can iterate through the mapping and select n_leaders random
Step5: Now our leaders trajectory contains a set of representitive conformations
|
2,848
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
# Set up code checking
import os
if not os.path.exists("../input/fifa.csv"):
os.symlink("../input/data-for-datavis/fifa.csv", "../input/fifa.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.data_viz_to_coder.ex1 import *
print("Setup Complete")
# Fill in the line below
one = ____
# Check your answer
step_1.check()
#%%RM_IF(PROD)%%
one = 1
step_1.assert_check_passed()
step_1.hint()
step_1.solution()
# Path of the file to read
fifa_filepath = "../input/fifa.csv"
# Read the file into a variable fifa_data
fifa_data = pd.read_csv(fifa_filepath, index_col="Date", parse_dates=True)
# Check your answer
step_2.check()
#%%RM_IF(PROD)%%
fifa_data = pd.read_csv(fifa_filepath, index_col="Date", parse_dates=True)
step_2.assert_check_passed()
# Uncomment the line below to receive a hint
#_COMMENT_IF(PROD)_
step_2.hint()
# Uncomment the line below to see the solution
#_COMMENT_IF(PROD)_
step_2.solution()
# Set the width and height of the figure
plt.figure(figsize=(16,6))
# Line chart showing how FIFA rankings evolved over time
sns.lineplot(data=fifa_data)
# Check your answer
step_3.a.check()
#_COMMENT_IF(PROD)_
step_3.b.hint()
# Check your answer (Run this code cell to receive credit!)
step_3.b.solution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The code you just ran sets up the system to give you feedback on your work. You'll learn more about the feedback system in the next step.
Step 1
Step2: In this exercise, you were responsible for filling in the line of code that sets the value of variable one. Don't edit the code that checks your answer. You'll need to run the lines of code like step_1.check() and step_2.check() just as they are provided.
Step3: Step 2
Step4: Next, recall the difference between comments and executable code
Step5: Next, remove the pound sign before step_2.hint() so that the code cell above appears as follows
Step6: Some questions won't require you to write any code. Instead, you'll interpret visualizations.
Step7: Once you have an answer, check the <font color='#33cc99'>Solution</font> to get credit for completing the problem and to ensure your interpretation is right.
|
2,849
|
<ASSISTANT_TASK:>
Python Code:
from civis.ml import ModelPipeline
from civis import APIClient
client = APIClient()
# dynamically get database name
creds = client.credentials.list()
dbs = [db for db in find(creds, type='Database')
if 'redshift' in db.name.lower()]
db_name = dbs[0].name
model = ModelPipeline('random_forest_classifier',
dependent_variable='upgrade',
primary_key='brandable_user_id',
model_name='Brandable "upgrade" CivisML model',
excluded_columns=['residential_zip'],
cross_validation_parameters={"max_depth": [2, 3, 5],
"n_estimators": [50, 100, 500]})
from civis.io import read_civis
df = read_civis(table='sample_project.brandable_training_set',
database=db_name,
use_pandas=True)
print('Data has dimensions: {}'.format(df.shape))
df.head()
train = model.train(df)
train.result() # success!
train.estimator
train.estimator.best_params_
train.metrics['roc_auc']
train.metrics.keys() # lots of other metrics here too
train.table.head() # out-of-sample scores
from sklearn.svm import SVC
# we need to call `predict_proba` for our predictions
# so we set `probability=True`
est = SVC(probability=True,
kernel='rbf')
model_custom = ModelPipeline(model=est,
dependent_variable='upgrade',
primary_key='brandable_user_id',
model_name='Brandable "upgrade" CivisML custom model',
excluded_columns=['residential_zip'])
model_custom.model
train_custom = model_custom.train(table_name='sample_project.brandable_training_set',
database_name=db_name)
train_custom.result() # wait for result
train_custom.metrics['roc_auc']
predict = model.predict(table_name='sample_project.brandable_all_users',
database_name=db_name,
output_table='sample_project.brandable_user_scores')
predict.table.head()
old_model = ModelPipeline.from_existing(train.job_id)
old_model.model_name # same as before!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the first example, we'll use a random forest model from scikit-learn. In addition, we'll grid search over hyperparameters for the maximum depth of the tree and the number of trees to optimally predict which users choose to upgrade to a premium service from the free version. Our data are in an AWS Redshift database on Civis Platform.
Step2: Training
Step3: We can find out which hyperparameter combination was optimal and easily extract out-of-sample scoring metrics, as well as metadata.
Step4: Fitting Custom Models
Step5: This time we'll use our scratch.brandable_training table in Redshift instead of a dataframe.
Step6: Prediction
Step7: Recreating CivisML models
|
2,850
|
<ASSISTANT_TASK:>
Python Code:
import os
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT ID
BUCKET = "cloud-training-demos-ml" # REPLACE WITH YOUR BUCKET NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = "1.15"
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
import tensorflow as tf
print(tf.__version__)
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
sql =
WITH CTE_visitor_page_content AS (
SELECT
# Schema: https://support.google.com/analytics/answer/3437719?hl=en
# For a completely unique visit-session ID, you combine combination of fullVisitorId and visitNumber:
CONCAT(fullVisitorID,'-',CAST(visitNumber AS STRING)) AS visitorId,
(SELECT MAX(IF(index=10, value, NULL)) FROM UNNEST(hits.customDimensions)) AS latestContentId,
(LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId ORDER BY hits.time ASC) - hits.time) AS session_duration
FROM
`cloud-training-demos.GA360_test.ga_sessions_sample`,
UNNEST(hits) AS hits
WHERE
# only include hits on pages
hits.type = "PAGE"
GROUP BY
fullVisitorId,
visitNumber,
latestContentId,
hits.time )
-- Aggregate web stats
SELECT
visitorId,
latestContentId as contentId,
SUM(session_duration) AS session_duration
FROM
CTE_visitor_page_content
WHERE
latestContentId IS NOT NULL
GROUP BY
visitorId,
latestContentId
HAVING
session_duration > 0
df = bq.query(sql).to_dataframe()
df.head()
stats = df.describe()
stats
df[["session_duration"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
# The rating is the session_duration scaled to be in the range 0-1. This will help with training.
median = stats.loc["50%", "session_duration"]
df["rating"] = 0.3 * df["session_duration"] / median
df.loc[df["rating"] > 1, "rating"] = 1
df[["rating"]].plot(kind="hist", logy=True, bins=100, figsize=[8,5])
del df["session_duration"]
%%bash
rm -rf data
mkdir data
# Write object to a comma-separated values (csv) file.
# TODO 1: Your code here
!head data/collab_raw.csv
import pandas as pd
import numpy as np
def create_mapping(values, filename):
with open(filename, 'w') as ofp:
value_to_id = {value:idx for idx, value in enumerate(values.unique())}
for value, idx in value_to_id.items():
ofp.write("{},{}\n".format(value, idx))
return value_to_id
df = pd.read_csv(filepath_or_buffer = "data/collab_raw.csv",
header = None,
names = ["visitorId", "contentId", "rating"],
dtype = {"visitorId": str, "contentId": str, "rating": np.float})
df.to_csv(path_or_buf = "data/collab_raw.csv", index = False, header = False)
user_mapping = create_mapping(df["visitorId"], "data/users.csv")
item_mapping = create_mapping(df["contentId"], "data/items.csv")
!head -3 data/*.csv
df["userId"] = df["visitorId"].map(user_mapping.get)
df["itemId"] = df["contentId"].map(item_mapping.get)
mapped_df = df[["userId", "itemId", "rating"]]
mapped_df.to_csv(path_or_buf = "data/collab_mapped.csv", index = False, header = False)
mapped_df.head()
import pandas as pd
import numpy as np
mapped_df = pd.read_csv(filepath_or_buffer = "data/collab_mapped.csv", header = None, names = ["userId", "itemId", "rating"])
mapped_df.head()
NITEMS = np.max(mapped_df["itemId"]) + 1
NUSERS = np.max(mapped_df["userId"]) + 1
mapped_df["rating"] = np.round(mapped_df["rating"].values, 2)
print("{} items, {} users, {} interactions".format( NITEMS, NUSERS, len(mapped_df) ))
grouped_by_items = mapped_df.groupby("itemId")
iter = 0
for item, grouped in grouped_by_items:
print(item, grouped["userId"].values, grouped["rating"].values)
iter = iter + 1
if iter > 5:
break
import tensorflow as tf
grouped_by_items = mapped_df.groupby("itemId")
with tf.python_io.TFRecordWriter("data/users_for_item") as ofp:
for item, grouped in grouped_by_items:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [item])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["userId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
grouped_by_users = mapped_df.groupby("userId")
with tf.python_io.TFRecordWriter("data/items_for_user") as ofp:
for user, grouped in grouped_by_users:
example = tf.train.Example(features = tf.train.Features(feature = {
"key": tf.train.Feature(int64_list = tf.train.Int64List(value = [user])),
"indices": tf.train.Feature(int64_list = tf.train.Int64List(value = grouped["itemId"].values)),
"values": tf.train.Feature(float_list = tf.train.FloatList(value = grouped["rating"].values))
}))
ofp.write(example.SerializeToString())
!ls -lrt data
import os
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.contrib.factorization import WALSMatrixFactorization
def read_dataset(mode, args):
def decode_example(protos, vocab_size):
# TODO 2: Your code here
return
def remap_keys(sparse_tensor):
# Current indices of your SparseTensor that you need to fix
bad_indices = sparse_tensor.indices # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Current values of your SparseTensor that you need to fix
bad_values = sparse_tensor.values # shape = (current_batch_size * (number_of_items/users[i] + 1),)
# Since batch is ordered, the last value for a batch index is the user
# Find where the batch index chages to extract the user rows
# 1 where user, else 0
user_mask = tf.concat(values = [bad_indices[1:,0] - bad_indices[:-1,0], tf.constant(value = [1], dtype = tf.int64)], axis = 0) # shape = (current_batch_size * (number_of_items/users[i] + 1), 2)
# Mask out the user rows from the values
good_values = tf.boolean_mask(tensor = bad_values, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
item_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 0)) # shape = (current_batch_size * number_of_items/users[i],)
user_indices = tf.boolean_mask(tensor = bad_indices, mask = tf.equal(x = user_mask, y = 1))[:, 1] # shape = (current_batch_size,)
good_user_indices = tf.gather(params = user_indices, indices = item_indices[:,0]) # shape = (current_batch_size * number_of_items/users[i],)
# User and item indices are rank 1, need to make rank 1 to concat
good_user_indices_expanded = tf.expand_dims(input = good_user_indices, axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_item_indices_expanded = tf.expand_dims(input = item_indices[:, 1], axis = -1) # shape = (current_batch_size * number_of_items/users[i], 1)
good_indices = tf.concat(values = [good_user_indices_expanded, good_item_indices_expanded], axis = 1) # shape = (current_batch_size * number_of_items/users[i], 2)
remapped_sparse_tensor = tf.SparseTensor(indices = good_indices, values = good_values, dense_shape = sparse_tensor.dense_shape)
return remapped_sparse_tensor
def parse_tfrecords(filename, vocab_size):
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
else:
num_epochs = 1 # end-of-input after this
files = tf.gfile.Glob(filename = os.path.join(args["input_path"], filename))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(map_func = lambda x: decode_example(x, vocab_size))
dataset = dataset.repeat(count = num_epochs)
dataset = dataset.batch(batch_size = args["batch_size"])
dataset = dataset.map(map_func = lambda x: remap_keys(x))
return dataset.make_one_shot_iterator().get_next()
def _input_fn():
features = {
WALSMatrixFactorization.INPUT_ROWS: parse_tfrecords("items_for_user", args["nitems"]),
WALSMatrixFactorization.INPUT_COLS: parse_tfrecords("users_for_item", args["nusers"]),
WALSMatrixFactorization.PROJECT_ROW: tf.constant(True)
}
return features, None
return _input_fn
def try_out():
with tf.Session() as sess:
fn = read_dataset(
mode = tf.estimator.ModeKeys.EVAL,
args = {"input_path": "data", "batch_size": 4, "nitems": NITEMS, "nusers": NUSERS})
feats, _ = fn()
print(feats["input_rows"].eval())
print(feats["input_rows"].eval())
try_out()
def find_top_k(user, item_factors, k):
all_items = tf.matmul(a = tf.expand_dims(input = user, axis = 0), b = tf.transpose(a = item_factors))
topk = tf.nn.top_k(input = all_items, k = k)
return tf.cast(x = topk.indices, dtype = tf.int64)
def batch_predict(args):
import numpy as np
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
# This is how you would get the row factors for out-of-vocab user data
# row_factors = list(estimator.get_projections(input_fn=read_dataset(tf.estimator.ModeKeys.EVAL, args)))
# user_factors = tf.convert_to_tensor(np.array(row_factors))
# But for in-vocab data, the row factors are already in the checkpoint
user_factors = tf.convert_to_tensor(value = estimator.get_row_factors()[0]) # (nusers, nembeds)
# In either case, you have to assume catalog doesn"t change, so col_factors are read in
item_factors = tf.convert_to_tensor(value = estimator.get_col_factors()[0])# (nitems, nembeds)
# For each user, find the top K items
topk = tf.squeeze(input = tf.map_fn(fn = lambda user: find_top_k(user, item_factors, args["topk"]), elems = user_factors, dtype = tf.int64))
with file_io.FileIO(os.path.join(args["output_dir"], "batch_pred.txt"), mode = 'w') as f:
for best_items_for_user in topk.eval():
f.write(",".join(str(x) for x in best_items_for_user) + '\n')
def train_and_evaluate(args):
train_steps = int(0.5 + (1.0 * args["num_epochs"] * args["nusers"]) / args["batch_size"])
steps_in_epoch = int(0.5 + args["nusers"] / args["batch_size"])
print("Will train for {} steps, evaluating once every {} steps".format(train_steps, steps_in_epoch))
def experiment_fn(output_dir):
return tf.contrib.learn.Experiment(
tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"]),
train_input_fn = read_dataset(tf.estimator.ModeKeys.TRAIN, args),
eval_input_fn = read_dataset(tf.estimator.ModeKeys.EVAL, args),
train_steps = train_steps,
eval_steps = 1,
min_eval_frequency = steps_in_epoch
)
from tensorflow.contrib.learn.python.learn import learn_runner
learn_runner.run(experiment_fn = experiment_fn, output_dir = args["output_dir"])
batch_predict(args)
import shutil
shutil.rmtree(path = "wals_trained", ignore_errors=True)
train_and_evaluate({
"output_dir": "wals_trained",
"input_path": "data/",
"num_epochs": 0.05,
"nitems": NITEMS,
"nusers": NUSERS,
"batch_size": 512,
"n_embeds": 10,
"topk": 3
})
!ls wals_trained
!head wals_trained/batch_pred.txt
os.environ["NITEMS"] = str(NITEMS)
os.environ["NUSERS"] = str(NUSERS)
%%bash
rm -rf wals.tar.gz wals_trained
gcloud ai-platform local train \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
-- \
--output_dir=${PWD}/wals_trained \
--input_path=${PWD}/data \
--num_epochs=0.01 --nitems=${NITEMS} --nusers=${NUSERS} \
--job-dir=./tmp
%%bash
gsutil -m cp data/* gs://${BUCKET}/wals/data
%%bash
OUTDIR=gs://${BUCKET}/wals/model_trained
JOBNAME=wals_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=walsmodel.task \
--package-path=${PWD}/walsmodel \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC_GPU \
--runtime-version=$TFVERSION \
-- \
--output_dir=$OUTDIR \
--input_path=gs://${BUCKET}/wals/data \
--num_epochs=10 --nitems=${NITEMS} --nusers=${NUSERS}
def get_factors(args):
with tf.Session() as sess:
estimator = tf.contrib.factorization.WALSMatrixFactorization(
num_rows = args["nusers"],
num_cols = args["nitems"],
embedding_dimension = args["n_embeds"],
model_dir = args["output_dir"])
row_factors = estimator.get_row_factors()[0]
col_factors = estimator.get_col_factors()[0]
return row_factors, col_factors
args = {
"output_dir": "gs://{}/wals/model_trained".format(BUCKET),
"nitems": NITEMS,
"nusers": NUSERS,
"n_embeds": 10
}
user_embeddings, item_embeddings = get_factors(args)
print(user_embeddings[:3])
print(item_embeddings[:3])
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
pca.fit(user_embeddings)
user_embeddings_pca = # TODO 3: Your code here
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(111, projection = "3d")
xs, ys, zs = user_embeddings_pca[::150].T
ax.scatter(xs, ys, zs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Create raw dataset
Step3: Create dataset for WALS
Step4: Creating rows and columns datasets
Step5: To summarize, you created the following data files from collab_raw.csv
Step6: This code is helpful in developing the input function. You don't need it in production.
Step7: Run as a Python module
Step8: Run on Cloud
Step9: This will take <b>10 minutes</b> to complete. Rerun the above command until the jobs gets submitted.
Step10: You can visualize the embedding vectors using dimensional reduction techniques such as PCA.
|
2,851
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
sys.path.append(os.getcwd()+'/../')
# our lib
from lib.resnet50 import ResNet50
from lib.imagenet_utils import preprocess_input, decode_predictions
#keras
from keras.preprocessing import image
from keras.models import Model
# sklearn
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import permutation_test_score
# other
import numpy as np
import glob
import pandas as pd
import ntpath
# plotting
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
def preprocess_img(img_path):
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return(x,img)
def perf_measure(y_actual, y_hat):
TP = 0
FP = 0
TN = 0
FN = 0
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==1:
TP += 1
for i in range(len(y_hat)):
if (y_hat[i]==1) and (y_actual[i]!=y_hat[i]):
FP += 1
for i in range(len(y_hat)):
if y_actual[i]==y_hat[i]==0:
TN += 1
for i in range(len(y_hat)):
if (y_hat[i]==0) and (y_actual[i]!=y_hat[i]):
FN += 1
return(TP, FP, TN, FN)
# instantiate the model
base_model = ResNet50(include_top=False, weights='imagenet') #this will pull the weights from the folder
# cut the model to lower levels only
model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output)
#img_paths = glob.glob('../img/baiyi/*')
#
img_paths = glob.glob('../original_img/*')
img_paths[0:3]
# create dataframe with all image features
img_feature_df = pd.DataFrame()
for i,img_path in enumerate(img_paths):
x,img = preprocess_img(img_path) # preprocess
model_output = model.predict(x)[0,0,0,:]
img_feature_df.loc[i,'img_path']=img_path
img_feature_df.loc[i,'nn_features']=str(list(model_output))
img_feature_df['img_name'] = img_feature_df['img_path'].apply(lambda x: ntpath.basename(x))
img_feature_df.head()
img_feature_df.to_csv('../data_nn_features/img_features_all.csv')
# get target and non-target lists
def create_image_class_dataframe(target_img_folder):
# all the image folders
non_target_img_folders = ['../original_img/']
target_img_paths=glob.glob(target_img_folder+'*')
target_img_paths_stemless = [ntpath.basename(t) for t in target_img_paths]
non_target_img_paths =[]
for non_target_folder in non_target_img_folders:
for img_path in glob.glob(non_target_folder+'*'):
if ntpath.basename(img_path) not in target_img_paths_stemless: # remove targets from non-target list
non_target_img_paths.append(img_path)
# create data frame with image name and label
img_paths = np.append(target_img_paths,non_target_img_paths)
labels = np.append(np.ones(len(target_img_paths)),np.zeros(len(non_target_img_paths)))
df = pd.DataFrame(data=np.vstack((img_paths,labels)).T,columns=['img_path','label'])
df['img_name'] = df['img_path'].apply(lambda x: ntpath.basename(x)) # add image name
df['label'] = df['label'].apply(lambda x: float(x)) # add label
# load up features per image
img_feature_df = pd.read_csv('../data_nn_features/img_features_all.csv',index_col=0)
img_feature_df.head()
# create feature matrix out of loaded up features.
for i,row in df.iterrows():
features = img_feature_df.loc[img_feature_df.img_name==row['img_name'],'nn_features'].as_matrix()[0].replace(']','').replace('[','').split(',')
features = [np.float(f) for f in features]
lab = row['img_name']
if i==0:
X = features
labs = lab
else:
X = np.vstack((X,features))
labs = np.append(labs,lab)
xcolumns = ['x'+str(i) for i in np.arange(X.shape[1])]
X_df = pd.DataFrame(np.hstack((labs[:,np.newaxis],X)),columns=['img_name']+xcolumns)
# merge together
df = df.merge(X_df,on='img_name')
# make sure there is only one instance per image in dataframe
lens = np.array([])
for img_name in df.img_name.unique():
lens = np.append(lens,len(df.loc[df.img_name==img_name]))
assert len(np.unique(lens)[:])==1
return(df)
# remove some non-targets to make dataset smaller #
# i_class0 = np.where(df.label==0.0)[0]
# i_class0_remove = np.random.choice(i_class0,int(np.round(len(i_class0)/1.1)))
# df_smaller = df.drop(i_class0_remove)
#df_smaller.to_csv('test.csv')
# image folder
target_img_folder ='../data_img_classes/class_horiztonal_striped/'
df = create_image_class_dataframe(target_img_folder)
df.head()
print('target class')
plt.figure(figsize=(12,3))
for i in range(5):
img_path= df['img_path'][i]
img = image.load_img(img_path, target_size=(224, 224))
plt.subplot(1,5,i+1)
plt.imshow(img)
plt.grid(b=False)
xcolumns=['x'+str(i) for i in np.arange(2024)]
X = df.loc[:,xcolumns].as_matrix().astype('float')
y= df.loc[:,'label'].as_matrix().astype('float')
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X,y,stratify=y,test_size=.33)
print(' training shape {0} \n testing shape {1}').format(X_train.shape,X_test.shape)
print('\n target/non-target \n (train) {0}\{1} \n (test) {2}\{3}').format(y_train.sum(),(1-y_train).sum(),y_test.sum(),(1-y_test).sum())
# classifiers
C = 1.0
clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_svm = sklearn.svm.SVC(C=C,kernel='linear')
clf_LR.fit(X_train, y_train)
clf_svm.fit(X_train, y_train)
coef = clf_LR.coef_[0,:]
plt.figure(figsize=(12,3))
sns.set_style('white')
plt.scatter(np.arange(len(coef)),coef)
plt.xlabel('nnet feature')
plt.ylabel('LogReg coefficient')
sns.despine()
y_pred = clf_LR.predict(X_test)
(TP,FP,TN,FN) =perf_measure(y_test,y_pred)
print('TruePos:{0}\nFalsePos:{1}\nTrueNeg:{2}\nFalseNeg:{3}').format(TP,FP,TN,FN)
y_pred = clf_svm.predict(X_test)
(TP,FP,TN,FN) =perf_measure(y_test,y_pred)
print('TruePos:{0}\nFalsePos:{1}\nTrueNeg:{2}\nFalseNeg:{3}').format(TP,FP,TN,FN)
# from sklearn.model_selection import StratifiedKFold
# skf = StratifiedKFold(n_splits=5,shuffle=True)
# for train, test in skf.split(X, y):
# #print("%s %s" % (train, test))
# C=1.0
# clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
# clf_LR.fit(X[train], y[train])
# y_pred = clf_LR.predict(X[test])
# (TP,FP,TN,FN) =perf_measure(y[test],y_pred)
# print('\nTruePos:{0}\nFalsePos:{1}\nTrueNeg:{2}\nFalseNeg:{3}').format(TP,FP,TN,FN)
clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
skf = StratifiedKFold(n_splits=5,shuffle=True)
score, permutation_scores, pvalue = permutation_test_score(
clf_LR, X, y, scoring="accuracy", cv=skf, n_permutations=100)
#
plt.hist(permutation_scores)
plt.axvline(score)
sns.despine()
plt.xlabel('accuracy')
print(pvalue)
# image folder
target_img_folder ='../data_img_classes/class_red_pink/'
df = create_image_class_dataframe(target_img_folder)
df.head()
print('target class')
plt.figure(figsize=(12,3))
for i in range(5):
img_path= df['img_path'][i+1]
img = image.load_img(img_path, target_size=(224, 224))
plt.subplot(1,5,i+1)
plt.imshow(img)
plt.grid(b=False)
# split data
xcolumns=['x'+str(i) for i in np.arange(2024)]
X = df.loc[:,xcolumns].as_matrix().astype('float')
y= df.loc[:,'label'].as_matrix().astype('float')
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X,y,stratify=y,test_size=.33)
print(' training shape {0} \n testing shape {1}').format(X_train.shape,X_test.shape)
print('\n target/non-target \n (train) {0}\{1} \n (test) {2}\{3}').format(y_train.sum(),(1-y_train).sum(),y_test.sum(),(1-y_test).sum())
# Train
C = 1.0
clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_LR.fit(X_train, y_train)
# test
y_pred = clf_LR.predict(X_test)
(TP,FP,TN,FN) =perf_measure(y_test,y_pred)
print('TruePos:{0}\nFalsePos:{1}\nTrueNeg:{2}\nFalseNeg:{3}').format(TP,FP,TN,FN)
from sklearn.model_selection import StratifiedKFold
C = 1.0
clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
skf = StratifiedKFold(n_splits=5,shuffle=True)
score, permutation_scores, pvalue = permutation_test_score(
clf_LR, X, y, scoring="accuracy", cv=skf, n_permutations=100)
plt.hist(permutation_scores)
plt.axvline(score)
sns.despine()
plt.xlabel('accuracy')
plt.title('permutation test on test set classification')
print(pvalue)
C = 1.0
clf_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_LR.fit(X, y)
coef = clf_LR.coef_[0,:]
plt.figure(figsize=(12,3))
sns.set_style('white')
plt.scatter(np.arange(len(coef)),coef)
plt.xlabel('nnet feature')
plt.ylabel('LogReg coefficient')
sns.despine()
np.savetxt('../data_nn_features/class_weights_LR_redpink.txt',coef)
##coef[0:100]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extract NN Features
Step2: Predicting Own Labels from Selected Images
Step3: Horizontal Striped Data
Step4: neither the svm or the logistic reg is doing well
Step5: the accuracy achieved is above chance (as determined by permutation testing)
Step6: Split Set Assessment
Step7: classification performance is mucher better on this dataset
Step8: Re-train on whole dataset
|
2,852
|
<ASSISTANT_TASK:>
Python Code:
%%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import talib
from talib.abstract import *
from talib import MA_Type
import pinkfish as pf
# Format price data
pd.options.display.float_format = '{:0.3f}'.format
%matplotlib inline
# Set size of inline plots
'''note: rcParams can't be in same cell as import matplotlib
or %matplotlib inline
%matplotlib notebook: will lead to interactive plots embedded within
the notebook, you can zoom and resize the figure
%matplotlib inline: only draw static images in the notebook
'''
plt.rcParams["figure.figsize"] = (10, 7)
symbol = 'SPY'
start = datetime.datetime(2018, 1, 1)
end = datetime.datetime.now()
ts = pf.fetch_timeseries(symbol)
ts.tail()
ts = pf.select_tradeperiod(ts, start, end)
ts.head()
print('There are {} TA-Lib functions!'.format(len(talib.get_functions())))
for group, funcs in talib.get_function_groups().items():
print(group)
print('-----------------------------------------')
for func in funcs:
f = Function(func)
print('{} - {}'.format(func, f.info['display_name']))
print()
print(SMA)
print(SMA.info)
print(SMA.input_names)
print(SMA.parameters)
print(SMA.output_names)
sma = SMA(ts)
sma.tail()
sma200 = SMA(ts, timeperiod=200)
sma200.tail()
sma200 = SMA(ts, timeperiod=200, price='open')
sma200.tail()
ts['sma200'] = sma200
ts.tail()
print(CCI)
print(CCI.input_names)
print(CCI.parameters)
cci = CCI(ts)
ts['cci'] = cci
ts.tail()
print(BBANDS)
print(BBANDS.input_names)
print(BBANDS.parameters)
attributes = [attr for attr in dir(MA_Type)
if not attr.startswith('__')]
attributes
MA_Type.__dict__
print(MA_Type[MA_Type.DEMA])
#upper, middle, lower = BBANDS(ts, timeperiod=20, matype=MA_Type.EMA)
#(for some reason, the abstract API doesn't work for BBANDS, so use the function API)
upper, middle, lower = talib.BBANDS(ts.close, timeperiod=20, matype=MA_Type.EMA)
ts['upper'] = upper; ts['middle'] = middle; ts['lower'] = lower
ts.tail()
print(MOM)
mom10 = MOM(ts, timeperiod=10)
mom10.tail()
ts['mom10'] = mom10
ts.head(50)
m1 = ts['close'].pct_change(periods=10)
ts['m1'] = m1
ts.head(50)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some global data
Step2: Fetch symbol data from cache, if available.
Step3: Select timeseries between start and end.
Step4: Get info about TA-Lib
Step5: Here is a complete listing of the functions by group
Step6: Get info about a specific TA-Lib function
Step7: More information is available through the 'info' property. We observe here that the default price used is 'close'. This can be changed by setting 'price' in the function call, e.g. price='open'.
Step8: If we just want to see the inputs, we can print the input_names property.
Step9: If we just want to see the parameters, we can print the paramters property.
Step10: If we just want to see the outputs, we can print the output_names property.
Step11: Create a technical indicator using talib
Step12: Create technical indicator
Step13: Create technical indicator
Step14: Add a technical indicator to a pinkfish timeseries
Step15: Try another one
Step16: Now for something a little more difficult
Step17: Print the available moving average types
Step18: Set timeperiod=20 and matype=MA_Type.EMA
|
2,853
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float,
'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str,
'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int,
'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int}
sales = pd.read_csv('kc_house_data.csv', dtype=dtype_dict)
train_data = pd.read_csv('kc_house_train_data.csv', dtype=dtype_dict)
test_data = pd.read_csv('kc_house_test_data.csv', dtype=dtype_dict)
sales.head()
# Let's compute the mean of the House Prices in King County in 2 different ways.
prices = sales['price'] # extract the price column of the sales SFrame -- this is now an SArray
# recall that the arithmetic average (the mean) is the sum of the prices divided by the total number of houses:
sum_prices = prices.sum()
num_houses = len(prices) # when prices is an SArray .size() returns its length
avg_price_1 = sum_prices/num_houses
avg_price_2 = prices.mean() # if you just want the average, the .mean() function
print "average price via method 1: " + str(avg_price_1)
print "average price via method 2: " + str(avg_price_2)
# if we want to multiply every price by 0.5 it's a simple as:
half_prices = 0.5*prices
# Let's compute the sum of squares of price. We can multiply two SArrays of the same length elementwise also with *
prices_squared = prices*prices
sum_prices_squared = prices_squared.sum() # price_squared is an SArray of the squares and we want to add them up.
print "the sum of price squared is: " + str(sum_prices_squared)
def simple_linear_regression(input_feature, output):
n = len(input_feature)
x = input_feature
y = output
# compute the mean of input_feature and output
x_mean = x.mean()
y_mean = y.mean()
# compute the product of the output and the input_feature and its mean
sum_xy = (y * x).sum()
xy_by_n = (y.sum() * x.sum())/n
# compute the squared value of the input_feature and its mean
x_square = (x**2).sum()
xx_by_n = (x.sum() * x.sum())/n
# use the formula for the slope
slope = (sum_xy - xy_by_n) / (x_square - xx_by_n)
# use the formula for the intercept
intercept = y_mean - (slope * x_mean)
return (intercept, slope)
test_feature = np.array(range(5))
test_output = np.array(1 + 1*test_feature)
(test_intercept, test_slope) = simple_linear_regression(test_feature, test_output)
print "Intercept: " + str(test_intercept)
print "Slope: " + str(test_slope)
sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'].values, train_data['price'].values)
print "Intercept: " + str(sqft_intercept)
print "Slope: " + str(sqft_slope)
def get_regression_predictions(input_feature, intercept, slope):
# calculate the predicted values:
predicted_values = intercept + (slope * input_feature)
return predicted_values
my_house_sqft = 2650
estimated_price = get_regression_predictions(my_house_sqft, sqft_intercept, sqft_slope)
print "The estimated price for a house with %d squarefeet is $%.2f" % (my_house_sqft, estimated_price)
def get_residual_sum_of_squares(input_feature, output, intercept, slope):
# First get the predictions
predicted_values = intercept + (slope * input_feature)
# then compute the residuals (since we are squaring it doesn't matter which order you subtract)
residuals = output - predicted_values
# square the residuals and add them up
RSS = (residuals * residuals).sum()
return(RSS)
print get_residual_sum_of_squares(test_feature, test_output, test_intercept, test_slope) # should be 0.0
rss_prices_on_sqft = get_residual_sum_of_squares(train_data['sqft_living'], train_data['price'], sqft_intercept, sqft_slope)
print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)
def inverse_regression_predictions(output, intercept, slope):
# solve output = intercept + slope*input_feature for input_feature. Use this equation to compute the inverse predictions:
estimated_feature = (output - intercept)/slope
return estimated_feature
my_house_price = 800000
estimated_squarefeet = inverse_regression_predictions(my_house_price, sqft_intercept, sqft_slope)
print "The estimated squarefeet for a house worth $%.2f is %d" % (my_house_price, estimated_squarefeet)
# Estimate the slope and intercept for predicting 'price' based on 'bedrooms'
sqft_intercept, sqft_slope = simple_linear_regression(train_data['bedrooms'].values, train_data['price'].values)
print "Intercept: " + str(sqft_intercept)
print "Slope: " + str(sqft_slope)
# Compute RSS when using bedrooms on TEST data:
sqft_intercept, sqft_slope = simple_linear_regression(train_data['bedrooms'].values,
train_data['price'].values)
rss_prices_on_bedrooms = get_residual_sum_of_squares(test_data['bedrooms'].values,
test_data['price'].values,
sqft_intercept, sqft_slope)
print 'The RSS of predicting Prices based on Bedrooms is : ' + str(rss_prices_on_bedrooms)
# Compute RSS when using squarfeet on TEST data:
sqft_intercept, sqft_slope = simple_linear_regression(train_data['sqft_living'].values,
train_data['price'].values)
rss_prices_on_sqft = get_residual_sum_of_squares(test_data['sqft_living'].values,
test_data['price'].values,
sqft_intercept, sqft_slope)
print 'The RSS of predicting Prices based on Square Feet is : ' + str(rss_prices_on_sqft)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Useful pandas summary functions
Step2: As we see we get the same answer both ways
Step3: Aside
Step4: We can test that our function works by passing it something where we know the answer. In particular we can generate a feature and then put the output exactly on a line
Step5: Now that we know it works let's build a regression model for predicting price based on sqft_living. Rembember that we train on train_data!
Step6: Predicting Values
Step7: Now that we can calculate a prediction given the slop and intercept let's make a prediction. Use (or alter) the following to find out the estimated price for a house with 2650 squarefeet according to the squarefeet model we estiamted above.
Step8: Residual Sum of Squares
Step9: Let's test our get_residual_sum_of_squares function by applying it to the test model where the data lie exactly on a line. Since they lie exactly on a line the residual sum of squares should be zero!
Step10: Now use your function to calculate the RSS on training data from the squarefeet model calculated above.
Step11: Predict the squarefeet given price
Step12: Now that we have a function to compute the squarefeet given the price from our simple regression model let's see how big we might expect a house that coses $800,000 to be.
Step13: New Model
Step14: Test your Linear Regression Algorithm
|
2,854
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from parcels import Field, FieldSet, ParticleSet, JITParticle, plotTrajectoriesFile, AdvectionRK4
import numpy as np
xdim, ydim = (10, 20)
Uflow = Field('U', np.ones((ydim, xdim), dtype=np.float32),
lon=np.linspace(0., 1e3, xdim, dtype=np.float32),
lat=np.linspace(0., 1e3, ydim, dtype=np.float32))
Vflow = Field('V', np.zeros((ydim, xdim), dtype=np.float32), grid=Uflow.grid)
fieldset_flow = FieldSet(Uflow, Vflow)
pset = ParticleSet(fieldset_flow, pclass=JITParticle, lon=[0], lat=[900])
output_file = pset.ParticleFile(name='SummedFieldParticle_flow.nc', outputdt=1)
pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file)
output_file.export() # export the trajectory data to a netcdf file
plotTrajectoriesFile('SummedFieldParticle_flow.nc');
gf = 10 # factor by which the resolution of this grid is higher than of the original one.
Ustokes = Field('U', np.zeros((ydim*gf, xdim*gf), dtype=np.float32),
lon=np.linspace(0., 1e3, xdim*gf, dtype=np.float32),
lat=np.linspace(0., 1e3, ydim*gf, dtype=np.float32))
Vstokes = Field('V', -0.2*np.ones((ydim*gf, xdim*gf), dtype=np.float32), grid=Ustokes.grid)
fieldset_stokes=FieldSet(Ustokes, Vstokes)
pset = ParticleSet(fieldset_stokes, pclass=JITParticle, lon=[0], lat=[900])
output_file = pset.ParticleFile(name='SummedFieldParticle_stokes.nc', outputdt=1)
pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file)
output_file.export() # export the trajectory data to a netcdf file
plotTrajectoriesFile('SummedFieldParticle_stokes.nc');
fieldset_sum = FieldSet(U=fieldset_flow.U+fieldset_stokes.U, V=fieldset_flow.V+fieldset_stokes.V)
pset = ParticleSet(fieldset_sum, pclass=JITParticle, lon=[0], lat=[900])
output_file = pset.ParticleFile(name='SummedFieldParticle_sum.nc', outputdt=1)
pset.execute(AdvectionRK4, runtime=10, dt=1, output_file=output_file)
output_file.export() # export the trajectory data to a netcdf file
plotTrajectoriesFile('SummedFieldParticle_sum.nc');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, let's first define a zonal and meridional velocity field on a 1kmx1km grid with a flat mesh. The zonal velocity is uniform and 1 m/s, and the meridional velocity is zero everywhere.
Step2: We then run a particle and plot its trajectory
Step3: The trajectory plot shows a particle moving eastward on the 1 m/s flow, as expected
Step4: We run a particle in this FieldSet and also plot its trajectory
Step5: Now comes the trick of the SummedFields. We can simply define a new FieldSet with a summation of different Fields, as in U=fieldset_flow.U+fieldset_stokes.U.
Step6: And if we then run the particle again and plot its trajectory, we see that it moves southeastward!
|
2,855
|
<ASSISTANT_TASK:>
Python Code:
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.layer = torch.nn.Linear(1,1)
def forward(self, x):
return self.layer(x)
model = Model()
Optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(1000):
x_tr = torch.from_numpy(x[:2,:1]).type(torch.FloatTensor)
y_tr = torch.from_numpy(y[:2,:1]).type(torch.FloatTensor)
y_pr = model(x_tr)
loss = torch.pow(torch.abs(y_tr - y_pr),2)
Optimizer.zero_grad()
torch.sum(loss).backward()
Optimizer.step()
print(model(torch.from_numpy(x).type(torch.FloatTensor)).detach().numpy())
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.layer = torch.nn.Linear(1,1)
def forward(self, x):
return self.layer(x)
model = Model()
Optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
print('w=', list(model.parameters())[0].detach().numpy())
print('b=', list(model.parameters())[1].detach().numpy())
print()
for epoch in range(1000):
x_tr = torch.from_numpy(x[:2,:1]).type(torch.FloatTensor)
y_tr = torch.from_numpy(y[:2,:1]).type(torch.FloatTensor)
y_pr = model(x_tr)
loss = torch.pow(torch.abs(y_tr - y_pr),2)
if epoch < 3:
print(f'Epoch:{epoch}')
print('y_pr:', y_pr.detach().numpy())
print('y_tr:', y[:2,:1])
print('loss:', loss.detach().numpy())
print()
Optimizer.zero_grad()
torch.sum(loss).backward()
Optimizer.step()
print(model(torch.from_numpy(x).type(torch.FloatTensor)).detach().numpy())
import torch
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.layer = torch.nn.Linear(1,1)
self.Optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
def forward(self, x):
x = torch.from_numpy(x).type(torch.FloatTensor)
return self.layer(x)
def fit(self, x, y, epochs):
for epoch in range(epochs):
y_tr = torch.from_numpy(y).type(torch.FloatTensor)
y_pr = model(x)
loss = torch.pow(torch.abs(y_tr - y_pr),2)
self.Optimizer.zero_grad()
torch.sum(loss).backward()
self.Optimizer.step()
model = Model()
model.fit(x[:2], y[:2], epochs=1000)
print(model(x))
import torch
import numpy as np
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
print('Using PyTorch version:', torch.__version__, ' Device:', device)
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.layer = torch.nn.Linear(1,1)
self.Optimizer = torch.optim.SGD(self.parameters(), lr=0.01)
def forward(self, x):
x = torch.from_numpy(x).type(torch.FloatTensor).to(device)
return self.layer(x)
def fit(self, x, y, epochs):
for epoch in range(epochs):
y_tr = torch.from_numpy(y).type(torch.FloatTensor).to(device)
y_pr = model(x)
loss = torch.pow(torch.abs(y_tr - y_pr),2)
self.Optimizer.zero_grad()
torch.sum(loss).backward()
self.Optimizer.step()
model = Model().to(device)
model.fit(x[:2], y[:2], epochs=1000)
print(model(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Detail version with monitoring variables
Step2: Compatible version
Step3: GPU Version
Step4: GPU인지 CPU인지 검사
|
2,856
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'auth_read':'user', # Credentials used for writing data.
'auth_write':'service', # Credentials used for writing data.
'partner':'', # Partner ID to run user audit on.
'recipe_slug':'', # Name of Google BigQuery dataset to create.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'dataset':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}}
}
},
{
'google_api':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for writing data.'}},
'api':'doubleclickbidmanager',
'version':'v1.1',
'function':'queries.listqueries',
'alias':'list',
'results':{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'table':'DV_Reports'
}
}
}
},
{
'google_api':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for writing data.'}},
'api':'displayvideo',
'version':'v1',
'function':'partners.list',
'kwargs':{
'fields':'partners.displayName,partners.partnerId,nextPageToken'
},
'results':{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'table':'DV_Partners'
}
}
}
},
{
'google_api':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'user','description':'Credentials used for writing data.'}},
'api':'displayvideo',
'version':'v1',
'function':'advertisers.list',
'kwargs':{
'partnerId':{'field':{'name':'partner','kind':'integer','order':2,'default':'','description':'Partner ID to run user audit on.'}},
'fields':'advertisers.displayName,advertisers.advertiserId,nextPageToken'
},
'results':{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'table':'DV_Advertisers'
}
}
}
},
{
'google_api':{
'auth':'service',
'api':'displayvideo',
'version':'v1',
'function':'users.list',
'kwargs':{
},
'results':{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'table':'DV_Users'
}
}
}
},
{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'from':{
'query':"SELECT U.userId, U.name, U.email, U.displayName, REGEXP_EXTRACT(U.email, r'@(.+)') AS Domain, IF (ENDS_WITH(U.email, '.gserviceaccount.com'), 'Service', 'User') AS Authentication, IF((Select COUNT(advertiserId) from UNNEST(U.assignedUserRoles)) = 0, 'Partner', 'Advertiser') AS Scope, STRUCT( AUR.partnerId, P.displayName AS partnerName, AUR.userRole, AUR.advertiserId, A.displayName AS advertiserName, AUR.assignedUserRoleId ) AS assignedUserRoles, FROM `{dataset}.DV_Users` AS U, UNNEST(assignedUserRoles) AS AUR LEFT JOIN `{dataset}.DV_Partners` AS P ON AUR.partnerId=P.partnerId LEFT JOIN `{dataset}.DV_Advertisers` AS A ON AUR.advertiserId=A.advertiserId ",
'parameters':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}}
},
'legacy':False
},
'to':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'view':'Barnacle_User_Roles'
}
}
},
{
'bigquery':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'from':{
'query':"SELECT R.*, P.displayName AS partnerName, A.displayName AS advertiserName, FROM ( SELECT queryId, (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_PARTNER' LIMIT 1) AS partnerId, (SELECT CAST(value AS INT64) FROM UNNEST(R.params.filters) WHERE type = 'FILTER_ADVERTISER' LIMIT 1) AS advertiserId, R.schedule.frequency, R.params.metrics, R.params.type, R.metadata.dataRange, R.metadata.sendNotification, DATE(TIMESTAMP_MILLIS(R.metadata.latestReportRunTimeMS)) AS latestReportRunTime, FROM `{dataset}.DV_Reports` AS R) AS R LEFT JOIN `{dataset}.DV_Partners` AS P ON R.partnerId=P.partnerId LEFT JOIN `{dataset}.DV_Advertisers` AS A ON R.advertiserId=A.advertiserId ",
'parameters':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}}
},
'legacy':False
},
'to':{
'dataset':{'field':{'name':'recipe_slug','kind':'string','order':4,'default':'','description':'Name of Google BigQuery dataset to create.'}},
'view':'Barnacle_Reports'
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter DV360 User Audit Recipe Parameters
Step3: 4. Execute DV360 User Audit
|
2,857
|
<ASSISTANT_TASK:>
Python Code:
# Author: Hicham Janati <hicham.janati@inria.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
# necessary for 3d plot even if not used
from mpl_toolkits.mplot3d import Axes3D # noqa
from matplotlib.collections import PolyCollection
# parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a1 = ot.datasets.make_1D_gauss(n, m=20, s=5) # m= mean, s= std
a2 = ot.datasets.make_1D_gauss(n, m=60, s=8)
# make unbalanced dists
a2 *= 3.
# creating matrix A containing all distributions
A = np.vstack((a1, a2)).T
n_distributions = A.shape[1]
# loss matrix + normalization
M = ot.utils.dist0(n)
M /= M.max()
# plot the distributions
pl.figure(1, figsize=(6.4, 3))
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.tight_layout()
# non weighted barycenter computation
weight = 0.5 # 0<=weight<=1
weights = np.array([1 - weight, weight])
# l2bary
bary_l2 = A.dot(weights)
# wasserstein
reg = 1e-3
alpha = 1.
bary_wass = ot.unbalanced.barycenter_unbalanced(A, M, reg, alpha, weights)
pl.figure(2)
pl.clf()
pl.subplot(2, 1, 1)
for i in range(n_distributions):
pl.plot(x, A[:, i])
pl.title('Distributions')
pl.subplot(2, 1, 2)
pl.plot(x, bary_l2, 'r', label='l2')
pl.plot(x, bary_wass, 'g', label='Wasserstein')
pl.legend()
pl.title('Barycenters')
pl.tight_layout()
# barycenter interpolation
n_weight = 11
weight_list = np.linspace(0, 1, n_weight)
B_l2 = np.zeros((n, n_weight))
B_wass = np.copy(B_l2)
for i in range(0, n_weight):
weight = weight_list[i]
weights = np.array([1 - weight, weight])
B_l2[:, i] = A.dot(weights)
B_wass[:, i] = ot.unbalanced.barycenter_unbalanced(A, M, reg, alpha, weights)
# plot interpolation
pl.figure(3)
cmap = pl.cm.get_cmap('viridis')
verts = []
zs = weight_list
for i, z in enumerate(zs):
ys = B_l2[:, i]
verts.append(list(zip(x, ys)))
ax = pl.gcf().gca(projection='3d')
poly = PolyCollection(verts, facecolors=[cmap(a) for a in weight_list])
poly.set_alpha(0.7)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('x')
ax.set_xlim3d(0, n)
ax.set_ylabel(r'$\alpha$')
ax.set_ylim3d(0, 1)
ax.set_zlabel('')
ax.set_zlim3d(0, B_l2.max() * 1.01)
pl.title('Barycenter interpolation with l2')
pl.tight_layout()
pl.figure(4)
cmap = pl.cm.get_cmap('viridis')
verts = []
zs = weight_list
for i, z in enumerate(zs):
ys = B_wass[:, i]
verts.append(list(zip(x, ys)))
ax = pl.gcf().gca(projection='3d')
poly = PolyCollection(verts, facecolors=[cmap(a) for a in weight_list])
poly.set_alpha(0.7)
ax.add_collection3d(poly, zs=zs, zdir='y')
ax.set_xlabel('x')
ax.set_xlim3d(0, n)
ax.set_ylabel(r'$\alpha$')
ax.set_ylim3d(0, 1)
ax.set_zlabel('')
ax.set_zlim3d(0, B_l2.max() * 1.01)
pl.title('Barycenter interpolation with Wasserstein')
pl.tight_layout()
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate data
Step2: Plot data
Step3: Barycenter computation
Step4: Barycentric interpolation
|
2,858
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
source_int = []
for text in source_text.split('\n'):
source_int.append([source_vocab_to_int[word] for word in text.split()])
target_int = []
for text in target_text.split('\n'):
target_int.append([target_vocab_to_int[word] for word in text.split()])
for text in target_int:
text.append(target_vocab_to_int['<EOS>'])
return source_int, target_int
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
# TODO: Implement Function
input_ = tf.placeholder(tf.int32, (None, None), name='input')
target_ = tf.placeholder(tf.int32, (None, None), name='target')
lr_ = tf.placeholder(tf.float32, name='learning_rate')
keep_prob_ = tf.placeholder(tf.float32, name='keep_prob')
target_seq_len_ = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_len_ = tf.reduce_max(target_seq_len_, name='max_target_len')
source_seq_len_ = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_, target_, lr_, keep_prob_, target_seq_len_, max_target_len_, source_seq_len_
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([target_vocab_to_int['<GO>'] * tf.ones((batch_size, 1), tf.int32), ending], axis=1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_encoding_input(process_decoder_input)
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
# TODO: Implement Function
encoding_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=0))
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return cell
cells = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
encoder_output, encoder_state = tf.nn.dynamic_rnn(
cells, encoding_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return encoder_output, encoder_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
# TODO: Implement Function
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, training_helper, encoder_state, output_layer)
final_output, final_state = tf.contrib.seq2seq.dynamic_decode(
training_decoder, impute_finished=True, maximum_iterations=max_summary_length)
return final_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
# TODO: Implement Function
start_tokens = start_of_sequence_id * tf.ones((batch_size), tf.int32)
start_tokens = tf.identity(start_tokens, name='start_tokens')
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
dec_embeddings, start_tokens, end_of_sequence_id)
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, inference_helper, encoder_state, output_layer)
final_output, final_state = tf.contrib.seq2seq.dynamic_decode(
inference_decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
return final_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
# TODO: Implement Function
# Decoder embedding
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# Construct the decoding cell
def make_cell(rnn_size, keep_prob):
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=0))
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
return cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size, keep_prob) for i in range(num_layers)])
output_layer = Dense(target_vocab_size,
kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope('decoder'):
training_decoder_output = decoding_layer_train(encoder_state,
dec_cell,
dec_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob)
with tf.variable_scope('decoder', reuse=True):
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
vocab_size = target_vocab_size
inference_decoder_output = decoding_layer_infer(encoder_state,
dec_cell,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
vocab_size,
output_layer,
batch_size,
keep_prob)
return training_decoder_output, inference_decoder_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
# TODO: Implement Function
_, encoder_state = encoding_layer(input_data,
rnn_size,
num_layers,
keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size)
dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
training_decoder_output, inference_decoder_output = decoding_layer(dec_input,
encoder_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size)
return training_decoder_output, inference_decoder_output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 2
# Batch Size
batch_size = 32
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 256
decoding_embedding_size = 256
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.8
display_step = 100
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
DON'T MODIFY ANYTHING IN THIS CELL
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
sentence = sentence.lower()
unknow = vocab_to_int['<UNK>']
sentence = [vocab_to_int.get(word, unknow) for word in sentence.split()]
return sentence
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoder Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step40: Batch and pad the source and target sequences
Step43: Train
Step45: Save Parameters
Step47: Checkpoint
Step50: Sentence to Sequence
Step52: Translate
|
2,859
|
<ASSISTANT_TASK:>
Python Code:
# Imports
from sklearn import metrics
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
# Training Data
training_raw = pd.read_table("../data/training_data.dat")
df_training = pd.DataFrame(training_raw)
# test Data
test_raw = pd.read_table("../data/test_data.dat")
df_test = pd.DataFrame(test_raw)
# target names
target_categories = ['Unclassified','Art','Aviation','Boating','Camping /Walking /Climbing','Collecting']
# Extract target results from panda
target = df_training["CategoryID"].values
# Create classifier class
model_dtc = DecisionTreeClassifier()
# features
feature_names_integers = ['Barcode','UnitRRP']
# Extra features from panda (without description)
training_data_integers = df_training[feature_names_integers].values
training_data_integers[:3]
# train model
model_dtc.fit(training_data_integers, target)
# Extract test data and test the model
test_data_integers = df_test[feature_names_integers].values
test_target = df_test["CategoryID"].values
expected = test_target
predicted_dtc = model_dtc.predict(test_data_integers)
print(metrics.classification_report(expected, predicted_dtc, target_names=target_categories))
print(metrics.confusion_matrix(expected, predicted_dtc))
metrics.accuracy_score(expected, predicted_dtc, normalize=True, sample_weight=None)
model_dtc = DecisionTreeClassifier()
model_dtc.fit(training_data_integers, target)
predicted_dtc = model_dtc.predict(test_data_integers)
metrics.accuracy_score(expected, predicted_dtc, normalize=True, sample_weight=None)
model_dtc = DecisionTreeClassifier()
model_dtc.fit(training_data_integers, target)
predicted_dtc = model_dtc.predict(test_data_integers)
metrics.accuracy_score(expected, predicted_dtc, normalize=True, sample_weight=None)
model_dtc = DecisionTreeClassifier(random_state=511)
model_dtc.fit(training_data_integers, target)
predicted_dtc = model_dtc.predict(test_data_integers)
metrics.accuracy_score(expected, predicted_dtc, normalize=True, sample_weight=None)
model_dtc = DecisionTreeClassifier(random_state=511)
model_dtc.fit(training_data_integers, target)
predicted_dtc = model_dtc.predict(test_data_integers)
metrics.accuracy_score(expected, predicted_dtc, normalize=True, sample_weight=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: and again.
Step2: one more time
Step3: We see that the results are not the same. This is because the Decision Tree Classifier chooses a feature at random in order to try to avoid overfitting. As we are about to start trying to improve the results by trying different strategies of preparing and loading data having varying will be unhelpful.
|
2,860
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_excel("H-2B_Disclosure_Data_FY15_Q4.xlsx")
df.head()
#df.info()
df['CASE_NUMBER'].count()
df['NBR_WORKERS_REQUESTED'].sum()
df.groupby('FULL_TIME_POSITION')['NBR_WORKERS_REQUESTED'].sum()
df['NBR_WORKERS_CERTIFIED'].sum()
df.groupby('FULL_TIME_POSITION')['NBR_WORKERS_CERTIFIED'].sum()
df.groupby('BASIC_UNIT_OF_PAY')['PREVAILING_WAGE'].mean()
df.groupby('BASIC_UNIT_OF_PAY')['BASIC_UNIT_OF_PAY'].count()
worst_wage = df[df['BASIC_UNIT_OF_PAY'] == 'Hour'].sort_values(by='PREVAILING_WAGE', ascending=True).head()
worst_wage[['BASIC_UNIT_OF_PAY', 'PREVAILING_WAGE', 'EMPLOYER_NAME', 'JOB_TITLE', 'WORKSITE_CITY', 'NBR_WORKERS_REQUESTED', 'NBR_WORKERS_CERTIFIED']]
lowest_wages_accepted = df[df['NBR_WORKERS_CERTIFIED'] != 0].sort_values(by='PREVAILING_WAGE', ascending=True).head()
lowest_wages_accepted[['BASIC_UNIT_OF_PAY', 'PREVAILING_WAGE', 'EMPLOYER_NAME', 'JOB_TITLE', 'WORKSITE_CITY', 'NBR_WORKERS_REQUESTED', 'NBR_WORKERS_CERTIFIED']]
df.groupby('BASIC_UNIT_OF_PAY')['BASIC_UNIT_OF_PAY'].count()
#df.groupby('BASIC_UNIT_OF_PAY')['PREVAILING_WAGE'].describe()
#df.groupby('PREVAILING_WAGE').count()
df.groupby('EMPLOYER_COUNTRY')['EMPLOYER_COUNTRY'].count()
#x = df.groupby('JOB_TITLE')['JOB_TITLE'].value_counts()
df['JOB_TITLE'].value_counts().head(10)
plt.style.use('ggplot')
df['JOB_TITLE'].value_counts(ascending=True).tail(10).plot(kind='barh')
plt.savefig("Top_Jobs.svg")
##Is there an efficient way for Pandas to clean the data? Merge "Landscape Laborer" with "LANDSCAPE LABORER" etc.?
#x = df['EMPLOYER_STATE'].value_counts(ascending=False).head(10) * df['NBR_WORKERS_REQUESTED'].sum()
df['EMPLOYER_STATE'].value_counts(ascending=False).head(10).plot(kind='bar')
plt.savefig("semand_in_states.svg")
#x = df['EMPLOYER_STATE'].value_counts(ascending=False).head(10) * df['NBR_WORKERS_REQUESTED'].sum()
df['EMPLOYER_STATE'].value_counts(ascending=True).head(10).plot(kind='bar')
plt.savefig("demand_in_states.svg")
Workers_in_state_count = df.groupby('EMPLOYER_STATE')['NBR_WORKERS_REQUESTED'].sum()
Workers_in_state_count.sort_values(ascending=True).tail(10).plot(kind='barh', legend='NBR_WORKERS_REQUESTED')
plt.savefig("workers_requestet_in_states.svg")
#changing df['NAIC_CODE'] from non_null object into int
#This .fillna(0.0) is magic. I found it here:
#http://stackoverflow.com/questions/21291259/convert-floats-to-ints-in-pandas
#df['NAIC_CODE'] = df['NAIC_CODE'].fillna(0.0).astype(int)
#But it turns out, it only works for my one fill. Not on the other. Why?
NAIC_CODEs = pd.read_excel("6-digit_2012_Code.xls")
NAIC_CODEs.info()
#Changing the NAIC_Codesfrom non-null object into float64
#NAIC_CODEs['NAICS12'] = df['NAIC_CODE'].fillna(0.0).astype(int)
NAIC_CODEs.head()
#And now reimporting the original file.
df = pd.read_excel("H-2B_Disclosure_Data_FY15_Q4.xlsx")
#now in the NAIC_CODE is a Float64 in the cells we want to merge.
df_merged = df.merge(NAIC_CODEs, how = 'left', left_on = 'NAIC_CODE', right_on ='NAICS2012')
#df_merged.info()
df_merged['Industry'].value_counts().head(10)
workers_by_industry = df_merged.groupby('Industry')['NBR_WORKERS_REQUESTED'].sum()
workers_by_industry.sort_values(ascending=True).tail(10).plot(kind='barh', legend='NBR_WORKERS_REQUESTED')
plt.savefig("workers_by_industry.svg")
df['EMPLOYER_NAME'].value_counts().head(5)
company_workers_demand = df.groupby('EMPLOYER_NAME')['NBR_WORKERS_REQUESTED'].sum()
company_workers_demand.sort_values(ascending=True).tail(10).plot(kind='barh')
plt.savefig("company_workers_demand.svg")
company_workers_demand = df.groupby('EMPLOYER_NAME')['NBR_WORKERS_CERTIFIED'].sum()
company_workers_demand.sort_values(ascending=True).tail(10).plot(kind='barh')
plt.savefig("company_workers_demand.svg")
SILVER_BAY_SEAFOODS = df[df['EMPLOYER_NAME'] == 'SILVER BAY SEAFOODS, LLC']
SILVER_BAY_SEAFOODS[['JOB_TITLE', 'PREVAILING_WAGE', 'HOURLY_WORK_SCHEDULE_AM', 'HOURLY_WORK_SCHEDULE_PM', 'OVERTIME_RATE_FROM', 'OVERTIME_RATE_TO', 'NATURE_OF_TEMPORARY_NEED', 'NBR_WORKERS_REQUESTED', 'NBR_WORKERS_CERTIFIED']]
SOCCER_CAMPS = df[df['EMPLOYER_NAME'] == 'UK International Soccer Camps']
SOCCER_CAMPS[['JOB_TITLE', 'PREVAILING_WAGE', 'HOURLY_WORK_SCHEDULE_AM', 'HOURLY_WORK_SCHEDULE_PM', 'OVERTIME_RATE_FROM', 'OVERTIME_RATE_TO', 'NATURE_OF_TEMPORARY_NEED', 'NBR_WORKERS_REQUESTED', 'NBR_WORKERS_CERTIFIED']]
df_unfaelle.groupby('Jahr')[(df_unfaelle['Lichtverhältnis UAP'] == 622)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. How many requests did the Office of Foreign Labor Certification (OFLC) receive in 2015?
Step2: 2. How many jobs did that regard in total? And how many full time positions?
Step3: 3. How many jobs did the ETA National Processing Center actually certify?
Step4: **4. What was the average pay?
Step5: The majority of the jobs are payed hourly at an average rate of 12.65 $ an hour.
Step6: This table displays the lowest payed jobs for which no workers were certified.
Step7: And this table shows that landscape laborers are the ones that are earning the least.
Step8: 6. What was the most common unit of pay (daily, weekly, monthly)?
Step9: 7. Work our total pay amount payed to H-2B laborers?
Step10: Approx. ####Count * Mean (Year, Week, Month, Hour(8)(33 Million, Bi-Weekly (180'000)#### 40 million $.
Step11: 9. Most common job title. Graph this.
Step12: 10. Which US states have the largest need for unskilled workers? Make a graph of this.
Step13: 11. Which industries had the largest need?
Step14: Importing the NAIC_Codes from here.
Step15: 12. Which companies had the largest need? Compare acceptance/denials of each company.
Step16: BONUS Looking into Silver Bay Seafoods and UK International Soccer Campus.
|
2,861
|
<ASSISTANT_TASK:>
Python Code:
import tarfile
# 檔案名稱格式
filename_format="M06A_{year:04d}{month:02d}{day:02d}.tar.gz".format
xz_filename_format="xz/M06A_{year:04d}{month:02d}{day:02d}.tar.xz".format
csv_format = "M06A/{year:04d}{month:02d}{day:02d}/{hour:02d}/TDCS_M06A_{year:04d}{month:02d}{day:02d}_{hour:02d}0000.csv".format
# 打開剛才下載的檔案試試
data_config ={"year":2016, "month":12, "day":18}
tar = tarfile.open(filename_format(**data_config), 'r')
# 如果沒有下載,可以試試看 xz 檔案
#data_dconfig ={"year":2016, "month":11, "day":18}
#tar = tarfile.open(xz_filename_format(**data_config), 'r')
# 列出內容
tar.list()
# 打開裡面 10 點鐘的資料
csv = tar.extractfile(csv_format(hour=10, **data_config))
# 類似檔案的物件
csv
# 印出前十行來看看
for i in range(10):
print(csv.readline().decode())
import pandas
# 設定欄位名稱
M06A_fields = ['VehicleType',
'DetectionTime_O','GantryID_O',
'DetectionTime_D','GantryID_D ',
'TripLength', 'TripEnd', 'TripInformation']
# 打開裡面 10 點鐘的資料
csv = tar.extractfile(csv_format(hour=10, **data_config))
# 讀進資料
data = pandas.read_csv(csv, names=M06A_fields)
data
# 先檢查一下有沒有異常的資料
data[data.TripEnd == 'N'].shape
# 先去除異常資料
data = data[data.TripEnd == 'Y']
# 然後乾脆刪掉 TripEnd 這欄
del data['TripEnd']
# 也可以用 data.drop('TripEnd', axis=1, inplace=True)
# 前 5 筆
# 或 data.iloc[:5]
data.head(5)
# 第 12 筆
data.iloc[11]
# 對我們來說,其實重要的只有 TripInformation 和 VehicleType
# 先只注意這兩項
data = data[['VehicleType', "TripInformation"]]
data.head(5)
# 查看看小貨車資料
data.query('VehicleType==32')
# 或者查看看小客車資料
data[data.VehicleType==31]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 利用 pandas 來處理資料
Step2: csv 欄位依照手冊設定
Step3: 查看一下內容
|
2,862
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.feature_extraction.text import CountVectorizer
# Create a list of text documents:
text = ["The quick brown fox jumped over the lazy dog."]
# Create the transform:
vectorizer = CountVectorizer()
# Tokenize and build vocabulary:
vectorizer.fit(text)
# Summarize:
print("vectorizer.vocabulary: {}".format(vectorizer.vocabulary_))
# Encode the document:
vector = vectorizer.transform(text)
# Summarize the encoded vector:
print("vector.shape: {}".format(vector.shape))
print("type(vector): {}".format(type(vector)))
print("vector.toarray(): {}".format(vector.toarray()))
print(vectorizer.vocabulary_)
print("vector.shape: {}".format(vector.shape))
print("type(vector): {}".format(type(vector)))
print("vector.toarray(): {}".format(vector.toarray()))
# Encode another sample document:
text2 = ["the puppy"]
vector = vectorizer.transform(text2)
print(vector.toarray())
from sklearn.feature_extraction.text import TfidfVectorizer
# List of text documents:
text = ["The quick brown fox jumped over the lazy dog.",
"The dog.",
"The fox"]
# Create the transform:
vectorizer = TfidfVectorizer()
# Tokenize and build vocabulary:
vectorizer.fit(text)
# Summarize:
print("vectorizer.vocabulary_: {}".format(vectorizer.vocabulary_))
print("vectorizer.idf_: {}".format(vectorizer.idf_))
# Encode document:
vector = vectorizer.transform([text[0]])
# Summarize encoded vector:
print("vector.shape: {}".format(vector.shape))
print("vector.toarray(): {}".format(vector.toarray()))
from sklearn.feature_extraction.text import HashingVectorizer
# Make a list of text documents:
text = ["The quick brown fox jumped over the lazy dog."]
# Create the transform:
vectorizer = HashingVectorizer(n_features=20)
# Encode document:
vector = vectorizer.transform(text)
print("vector: \n{}".format(vector))
# Summarize encoded vector:
print("vector.shape: {}".format(vector.shape))
print("vector.toarray(): {}".format(vector.toarray()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Above, you can see that we access the vocabulary to see what exactly was tokenized by calling
Step2: We can see that all words were made lowercase by default and that the punctuation was ignored.
Step3: Importantly, the same vectorizer can be used on documents that contain words not included in the vocabulary.
Step4: Running this example prints the array version of the encoded sparse vector showing one occurrence of the one word in the vocabulary and the other word in the vocabulary ignored completely.
Step5: A vocabulary of 8 words is learned from the documents and each word is assigned a unique integer index in the output vector.
|
2,863
|
<ASSISTANT_TASK:>
Python Code:
# NOTE: we use non-random initializations for the cluster centers
# to make autograding feasible; normally cluster centers would be
# randomly initialized.
data = np.load('data/X.npz')
X = data['X']
centers = data['centers']
print ('X: \n' + str(X))
print ('\ncenters: \n' + str(centers))
k_means??
def distance(a, b):
Returns the Euclidean distance between two points,
a and b, in R^2.
Parameters
----------
a, b : numpy arrays of shape (2,)
The (x,y) coordinates for two points, a and b,
in R^2. E.g., a[0] is the x coordinate,
and a[1] is the y coordinate.
Returns
-------
distance : float
The Euclidean distance between a and b
### BEGIN SOLUTION
return np.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)
### END SOLUTION
# add your own test cases here!
Check distances computes the correct values
from numpy.testing import assert_allclose
assert_allclose(distance(np.array([0.0, 0.0]), np.array([0.0, 1.0])), 1.0)
assert_allclose(distance(np.array([3.0, 3.0]), np.array([4.3, 5.0])), 2.3853720883753127)
assert_allclose(distance(np.array([130.0, -25.0]), np.array([0.4, 15.0])), 135.63244449614552)
print("Success!")
def update_assignments(num_clusters, X, centers):
Returns the cluster assignment (number) for each data point
in X, computed as the closest cluster center.
Parameters
----------
num_clusters : int
The number of disjoint clusters (i.e., k) in
the X
X : numpy array of shape (m, 2)
An array of m data points in R^2.
centers : numpy array of shape (num_clusters, 2)
The coordinates for the centers of each cluster
Returns
-------
cluster_assignments : numpy array of shape (m,)
An array containing the cluster label assignments
for each data point in X. Each cluster label is an integer
between 0 and (num_clusters - 1).
### BEGIN SOLUTION
cluster_assignments = []
for x in X:
cluster_assignments.append(np.array([distance(x, c) for c in centers]).argmin())
return np.array(cluster_assignments)
### END SOLUTION
# add your own test cases here!
Check update_assignments computes the correct values
from nose.tools import assert_equal
from numpy.testing import assert_array_equal
# load the data
data = np.load('data/X.npz')
X = data['X']
# validate update_assignments using different values
actual = update_assignments(2, X, np.array([[3, 2], [1, 4]]))
expected = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0])
# is the output of the correct shape?
assert_equal(actual.shape[0], X.shape[0])
# are the cluster labels correct?
assert_array_equal(expected, actual)
# validate update_assignments using different values
actual = update_assignments(3, X[:X.shape[0]/2], np.array([X[0], X[1], X[2]]))
expected = np.array([0, 1, 2, 2, 0, 2, 1, 2, 2, 2, 0, 0, 0, 0, 0])
# is the output of the correct shape?
assert_equal(actual.shape[0], X.shape[0] / 2)
# are the cluster labels correct?
assert_array_equal(expected, actual)
# check that it uses distance
old_distance = distance
del distance
try:
update_assignments(2, X, np.array([[3, 2], [1, 4]]))
except NameError:
pass
else:
raise AssertionError("update_assignments does not call distance")
finally:
distance = old_distance
del old_distance
print("Success!")
def update_parameters(num_clusters, X, cluster_assignment):
Recalculates cluster centers running update_assignments.
Parameters
----------
num_clusters : int
The number of disjoint clusters (i.e., k) in
the X
X : numpy array of shape (m, 2)
An array of m data points in R^2
cluster_assignment : numpy array of shape (m,)
The array of cluster labels assigned to each data
point as returned from update_assignments
Returns
-------
updated_centers : numpy array of shape (num_clusters, 2)
An array containing the new positions for each of
the cluster centers
### BEGIN SOLUTION
updated_centers = []
for i in np.unique(cluster_assignment):
cluster_idx = np.argwhere(cluster_assignment == i).ravel()
updated_centers.append(np.mean(X[cluster_idx,:], axis=0))
return np.asarray(updated_centers)
### END SOLUTION
# add your own test cases here!
Check update_parameters computes the correct values
from nose.tools import assert_equal
from numpy.testing import assert_allclose
# load the data
data = np.load('data/X.npz')
X = data['X']
# validate update_assignments using different values
cluster_assignment1 = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0])
actual = update_parameters(2, X, cluster_assignment1)
expected = np.array([[ 3.24286584, 2.71362623], [ 2.80577245, 4.07633606]])
assert_allclose(expected, actual)
cluster_assignment2 = np.array([0, 1, 2, 2, 0, 2, 1, 2, 2, 2, 0, 0, 0, 0, 0])
actual = update_parameters(3, X[:X.shape[0]/2], cluster_assignment2)
expected = np.array([[ 3.4914304 , 2.79181724], [ 3.03095255, 2.02958778], [ 2.86686881, 1.76070598]])
assert_allclose(expected, actual, rtol=1e-6)
print("Success!")
# load the edata
data = np.load('data/X.npz')
X = data['X']
centers = data['centers']
# run k-means
cluster_assignments, updated_centers = k_means(3, X, centers, update_assignments, update_parameters, n_iter=4)
def assign_new_object(new_object, updated_centers):
Returns the cluster label (number) for new_object using k-means
clustering.
Parameters
----------
new_object : numpy array of shape (2,)
The (x,y) coordinates of a new object to be classified
updated_centers : numpy array of shape (num_clusters,2)
An array containing the updated (x,y) coordinates for
each cluster center
Returns
-------
label : int
The cluster label assignment for new_object. This is a
number between 0 and and (num_clusters - 1).
### BEGIN SOLUTION
return np.array([distance(new_object, c) for c in updated_centers]).argmin()
### END SOLUTION
# add your own test cases here!
Check assign_new_object computes the correct values
from nose.tools import assert_equal
# validate update_assignments using different values
centers1 = np.array([[ 3.17014624, 2.42738134], [ 2.90932354, 4.26426491]])
assert_equal(assign_new_object(np.array([0, 1]), centers1), 0)
assert_equal(assign_new_object(np.array([1, 0]), centers1), 0)
assert_equal(assign_new_object(np.array([3, 2]), centers1), 0)
assert_equal(assign_new_object(np.array([2, 4]), centers1), 1)
centers2 = np.array([[ 3.170146, 2.427381], [ 3.109456, 1.902395], [ 2.964183, 1.827484]])
assert_equal(assign_new_object(np.array([0, 1]), centers2), 2)
assert_equal(assign_new_object(np.array([1, 0]), centers2), 2)
assert_equal(assign_new_object(np.array([3, 2]), centers2), 1)
assert_equal(assign_new_object(np.array([2, 4]), centers2), 0)
# check that it uses distance
old_distance = distance
del distance
try:
update_assignments(2, X, np.array([[3, 2], [1, 4]]))
except NameError:
pass
else:
raise AssertionError("assign_new_object does not call distance")
finally:
distance = old_distance
del old_distance
print("Success!")
# load the edata
data = np.load('data/X.npz')
X = data['X']
centers = data['centers']
# run k-means
cluster_assignments, updated_centers = k_means(3, X, centers, update_assignments, update_parameters, n_iter=4)
new_object = np.array([3.3, 3.5]) # image coordinates
label = assign_new_object(new_object, updated_centers)
print ('The new object was assigned to cluster: '+ str(label))
plot_final(X, cluster_assignments, updated_centers, new_object, assign_new_object)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Also, take a look at the imported functions k_means
Step4: This is the function you will run in Part C once you have completed the helper functions in parts A and B.
Step7: <div class="alert alert-success">Now, we will write a function to update the cluster that each point is assigned to by computing the distance to the center of each cluster. Complete the `update_assignments` function to do this using your `distances` function.</div>
Step10: Part B (1.5 points)
Step11: Part C
Step14: If the functions you completed above are working properly, you should see a figure containing a subplot of the output from steps (1) and (2) for four iterations of the algorithm. This plot should give you a sense of how the algorithm progresses over time. The data points are each assigned to one of three colors corresponding to their current cluster label. The cluster centers are plotted as stars.
Step15: Part E (1.5 points)
Step16: Once you've implemented assign_new_object, give it a spin on the image of the Shih-Tzu
Step17: Finally, we can visualize this result against the true assignments using the helper function plot_final
|
2,864
|
<ASSISTANT_TASK:>
Python Code:
import csv
data = {}
with open("songdata.csv") as file:
for author,_,_,lyric in csv.reader(file):
data[author] = data.get(author,{})
for word in set(lyric.lower().split()):
data[author][word] = data[author].get(word,0) + 1
data["ABBA"]
import re
# https://stackoverflow.com/questions/875968/how-to-remove-symbols-from-a-string-with-python
def strip_simbols(string):
return re.sub(r'[^\w]', '', string)
import csv
data = {}
with open("songdata.csv") as file:
for author,_,_,lyric in csv.reader(file):
data[author] = data.get(author,{})
for word in set(lyric.lower().split()):
word = strip_simbols(word)
data[author][word] = data[author].get(word,0) + 1
data["ABBA"]
for author in data:
print(author)
import utilidades_diccionarios
from IPython.display import display, Markdown, Latex
import random
artists = list(data.keys())
artist_a = random.choice(artists)
artist_b = random.choice(artists)
diference = utilidades_diccionarios.calcular_distancia(data[artist_a],data[artist_b],2)
display(Markdown( f'Diferencia entre *{artist_a}* y _{artist_b}_ es: **{diference}** '))
artist_a = random.choice(artists)
artist_b = random.choice(artists)
diference = utilidades_diccionarios.calcular_distancia(data[artist_a],data[artist_b],2)
display(Markdown( f'Diferencia entre *{artist_a}* y _{artist_b}_ es: **{diference}** '))
artist_a = random.choice(artists)
artist_b = random.choice(artists)
diference = utilidades_diccionarios.calcular_distancia(data[artist_a],data[artist_b],2)
display(Markdown( f'Diferencia entre *{artist_a}* y _{artist_b}_ es: **{diference}** '))
artist_a = random.choice(artists)
artist_b = random.choice(artists)
diference = utilidades_diccionarios.calcular_distancia(data[artist_a],data[artist_b],2)
display(Markdown( f'Diferencia entre *{artist_a}* y _{artist_b}_ es: **{diference}** '))
artist_a = random.choice(artists)
artist_b = random.choice(artists)
diference = utilidades_diccionarios.calcular_distancia(data[artist_a],data[artist_b],2)
display(Markdown( f'Diferencia entre *{artist_a}* y _{artist_b}_ es: **{diference}** '))
import csv
data = {}
authors = ["ABBA"]
with open("songdata.csv") as file:
for author,_,_,lyric in csv.reader(file):
if author not in authors:
continue
prev = ""
for word in lyric.lower().split():
new_word = word
while new_word and not new_word[-1].isalpha():
new_word = new_word[:-1]
data[prev] = data.get(prev,[])
data[prev].append(new_word)
prev = new_word
if not word[-1].isalpha():
data[prev] = data.get(prev,[])
data[prev].append(word[-1])
prev = word[-1]
data[prev] = data.get(prev,[])
data[prev].append("")
cancion = []
prev = ""
for _ in range(100):
next_word = random.choice(data[prev])
cancion.append(next_word)
prev = next_word
display(Markdown("_" + " ".join(cancion) + "_") )
import csv
data = {}
authors = ["ABBA"]
with open("songdata.csv") as file:
prev = ("","")
for author,_,_,lyric in csv.reader(file):
if author not in authors:
continue
for word in lyric.lower().split():
new_word = word
if not word[0].isalpha():
data[prev] = data.get(prev,[])
data[prev].append(word[0])
prev = (prev[-1],word[0])
while new_word and not new_word[0].isalpha():
new_word = new_word[1:]
while new_word and not new_word[-1].isalpha():
new_word = new_word[:-1]
data[prev] = data.get(prev,[])
data[prev].append(new_word)
prev = (prev[-1],new_word)
if not word[-1].isalpha():
data[prev] = data.get(prev,[])
data[prev].append(word[-1])
prev = (prev[-1],word[-1])
data[prev] = data.get(prev,[])
data[prev].append("")
prev = (prev[-1],"")
cancion = []
prev = ("","")
for _ in range(100):
next_word = random.choice(data[prev])
cancion.append(next_word)
prev = (prev[-1],next_word)
display(Markdown("_" + " ".join(cancion) + "_") )
import csv
data = {}
authors = ["ABBA","Stevie Wonder","Hank Williams"]
with open("songdata.csv") as file:
prev = ("","")
for author,_,_,lyric in csv.reader(file):
if author not in authors:
continue
for word in lyric.lower().split():
new_word = word
if not word[0].isalpha():
data[prev] = data.get(prev,[])
data[prev].append(word[0])
prev = (prev[-1],word[0])
while new_word and not new_word[0].isalpha():
new_word = new_word[1:]
while new_word and not new_word[-1].isalpha():
new_word = new_word[:-1]
data[prev] = data.get(prev,[])
data[prev].append(new_word)
prev = (prev[-1],new_word)
if not word[-1].isalpha():
data[prev] = data.get(prev,[])
data[prev].append(word[-1])
prev = (prev[-1],word[-1])
data[prev] = data.get(prev,[])
data[prev].append("")
prev = (prev[-1],"")
cancion = []
prev = ("","")
for _ in range(100):
next_word = random.choice(data[prev])
cancion.append(next_word)
prev = (prev[-1],next_word)
display(Markdown("_" + " ".join(cancion) + "_") )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Algo que no hicimos durante la clase, pero que si hay que hacer, es sacar las palabras que no nos dan informacion. Otra cosa que habria que hacer es sacar todos los simbolos.
Step2: Importo las utilidades de diccionario de la primer parte del TP, de donde vamos a usar la funcion calcular_distancia como metrica. Las metricas siempre se definen en base a lo que vayamos a hacer, existen metricas mas o menos aceptadas para ciertas cosas, pero no evita que podamos definir las nuestras propias (con cierto fundamento claro esta).
Step3: Pruebo que tanto se parecen algunos artistas al azar. Voy a necesitar markdown para que se vea mas lindo, asi que puedo importar la funcion para mostrarlo directamente desde el output
Step4: Extra
Step5: Lo que hice es armar un diccionario con las palabras de las canciones, y a cada una asociarle una lista con todas las palabras que aparecen despues de esa. Al ser una lista con repeticiones, si elijo una palabra al azar, es mas probable que elija una palabra que aparece mas seguido que otras. Entonces, al igual que con el predictivo, puedo armar una cancion de ABBA asi
Step6: Incluso para ser algo rapido, alguna clase de sentido tiene. Si le queremos dar mas sentido, deberiamos analizar mas que la palabra anterior, por ejemplo las 2 anteriores, y eso deberia mejorar
Step7: Tambien podemos combinar varias canciones de varios artistas para hacer algo aun mas loco.
|
2,865
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -U tensorflow-addons
import tensorflow_addons as tfa
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# normalize data
x_train, x_test = x_train / 255.0, x_test / 255.0
# build the model using the Sequential API
model = Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
# initialize TimeStopping callback
time_stopping_callback = tfa.callbacks.TimeStopping(seconds=5, verbose=1)
# train the model with tqdm_callback
# make sure to set verbose = 0 to disable
# the default progress bar.
model.fit(x_train, y_train,
batch_size=64,
epochs=100,
callbacks=[time_stopping_callback],
validation_data=(x_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorFlowアドオンのコールバック:TimeStopping
Step2: データのインポートと正規化
Step3: シンプルなMNIST CNNモデルの構築
Step4: シンプルなTimeStoppingの使用法
|
2,866
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer
from sklearn.svm import SVR
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 5
N = 1000
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
# split in training and test data
X_train, X_test, y_train, y_test = train_test_split(X,z,test_size=0.2)
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train, y_train)
# The mean squared error and R2 score
print("MSE before scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test), y_test)))
print("R2 score before scaling {:.2f}".format(svm.score(X_test,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values after scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values after scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train_scaled, y_train)
print("MSE after scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test_scaled), y_test)))
print("R2 score for scaled data: {:.2f}".format(svm.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
svm = SVC(C=100)
svm.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(svm.score(X_test,y_test)))
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values before scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Min-Max scaling: {:.2f}".format(svm.score(X_test_scaled,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Standar Scaler: {:.2f}".format(svm.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
# Set up training data
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(logreg.score(X_test,y_test)))
# Scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
import pandas as pd
# Making a data frame
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
fig, axes = plt.subplots(15,2,figsize=(10,20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins =50)
ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, alpha = 0.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("Feature magnitude")
ax[0].set_ylabel("Frequency")
ax[0].legend(["Malignant", "Benign"], loc ="best")
fig.tight_layout()
plt.show()
import seaborn as sns
correlation_matrix = cancerpd.corr().round(1)
# use the heatmap function from seaborn to plot the correlation matrix
# annot = True to print the values inside the square
sns.heatmap(data=correlation_matrix, annot=True)
plt.show()
#print eigvalues of correlation matrix
EigValues, EigVectors = np.linalg.eig(correlation_matrix)
print(EigValues)
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
correlation_matrix = cancerpd.corr().round(1)
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
W = np.vstack((x, y))
C = np.cov(W)
print(C)
import numpy as np
n = 100
# define two vectors
x = np.random.random(size=n)
y = 4+3*x+np.random.normal(size=n)
#scaling the x and y vectors
x = x - np.mean(x)
y = y - np.mean(y)
variance_x = np.sum(x@x)/n
variance_y = np.sum(y@y)/n
print(variance_x)
print(variance_y)
cov_xy = np.sum(x@y)/n
cov_xx = np.sum(x@x)/n
cov_yy = np.sum(y@y)/n
C = np.zeros((2,2))
C[0,0]= cov_xx/variance_x
C[1,1]= cov_yy/variance_y
C[0,1]= cov_xy/np.sqrt(variance_y*variance_x)
C[1,0]= C[0,1]
print(C)
import numpy as np
import pandas as pd
n = 10
x = np.random.normal(size=n)
x = x - np.mean(x)
y = 4+3*x+np.random.normal(size=n)
y = y - np.mean(y)
X = (np.vstack((x, y))).T
print(X)
Xpd = pd.DataFrame(X)
print(Xpd)
correlation_matrix = Xpd.corr()
print(correlation_matrix)
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 100
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = pd.DataFrame(X)
# subtract the mean values and set up the covariance matrix
Xpd = Xpd - Xpd.mean()
covariance_matrix = Xpd.cov()
print(covariance_matrix)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
n = 10000
mean = (-1, 2)
cov = [[4, 2], [2, 2]]
X = np.random.multivariate_normal(mean, cov, n)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
# we center it ourselves
X_centered = X - X.mean(axis=0)
print(df.cov())
print(np.cov(X_centered.T))
# extract the relevant columns from the centered design matrix of dim n x 2
x = X_centered[:,0]
y = X_centered[:,1]
Cov = np.zeros((2,2))
Cov[0,1] = np.sum(x.T@y)/(n-1.0)
Cov[0,0] = np.sum(x.T@x)/(n-1.0)
Cov[1,1] = np.sum(y.T@y)/(n-1.0)
Cov[1,0]= Cov[0,1]
print("Centered covariance using own code")
print(Cov)
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
# diagonalize and obtain eigenvalues, not necessarily sorted
EigValues, EigVectors = np.linalg.eig(Cov)
# sort eigenvectors and eigenvalues
#permute = EigValues.argsort()
#EigValues = EigValues[permute]
#EigVectors = EigVectors[:,permute]
print("Eigenvalues of Covariance matrix")
for i in range(2):
print(EigValues[i])
FirstEigvector = EigVectors[:,0]
SecondEigvector = EigVectors[:,1]
print("First eigenvector")
print(FirstEigvector)
print("Second eigenvector")
print(SecondEigvector)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2Dsl = pca.fit_transform(X)
print("Eigenvector of largest eigenvalue")
print(pca.components_.T[:, 0])
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 vanilla matrix
rows = 10
cols = 5
X = np.random.randn(rows,cols)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
display(df)
# we center it ourselves
X_centered = X - X.mean(axis=0)
# Then check the difference between pandas and our own set up
print(X_centered-df)
#Now we do an SVD
U, s, V = np.linalg.svd(X_centered)
c1 = V.T[:, 0]
c2 = V.T[:, 1]
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
print(X2D)
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X)
print(X2D)
pca.components_.T[:, 0].
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_train,y_train)))
# We scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Then perform again a log reg fit
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D_train = pca.fit_transform(X_train_scaled)
# and finally compute the log reg fit and the score on the training data
logreg.fit(X2D_train,y_train)
print("Train set accuracy scaled and PCA data: {:.2f}".format(logreg.score(X2D_train,y_train)))
pca = PCA()
pca.fit(X)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X)
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple preprocessing examples, breast cancer data and classification, Support Vector Machines
Step2: More on Cancer Data, now with Logistic Regression
Step3: Why should we think of reducing the dimensionality
Step4: In the above example we note two things. In the first plot we display
Step5: and then
Step6: Diagonalizing this matrix we can in turn say something about which
Step7: Correlation Matrix
Step8: We see that the matrix elements along the diagonal are one as they
Step9: We expand this model to the Franke function discussed above.
Step10: We note here that the covariance is zero for the first rows and
Step11: Now we are going to implement the PCA algorithm. We will break it down into various substeps.
Step12: Alternatively, we could use the functions we discussed
Step13: Note that the way we define the covariance matrix here has a factor $n-1$ instead of $n$. This is included in the cov() function by numpy and pandas.
Step14: Depending on the number of points $n$, we will get results that are close to the covariance values defined above.
Step15: This code does not contain all the above elements, but it shows how we can use Scikit-Learn to extract the eigenvector which corresponds to the largest eigenvalue. Try to address the questions we pose before the above code. Try also to change the values of the covariance matrix by making one of the diagonal elements much larger than the other. What do you observe then?
Step16: PCA assumes that the dataset is centered around the origin. Scikit-Learn’s PCA classes take care of centering
Step17: <!-- !split -->
Step18: After fitting the PCA transformer to the dataset, you can access the principal components using the
Step19: Another very useful piece of information is the explained variance ratio of each principal component,
Step20: We see that our training data after the PCA decomposition has a performance similar to the non-scaled data.
Step21: You could then set $n_components=d$ and run PCA again. However, there is a much better option
Step22: Incremental PCA
|
2,867
|
<ASSISTANT_TASK:>
Python Code:
target = pd.read_csv('../data/train_target.csv')
target.describe()
target = target / 1000
logtarget = np.log1p(target)
def read():
Read training and test data and return a dataframe with ['Dataset','Id'] multi-index
raw_train = pd.read_csv('../data/train_prepared_light.csv')
raw_test = pd.read_csv('../data/test_prepared_light.csv')
df = pd.concat([raw_train, raw_test], keys=['train', 'test'])
df.index.names = 'Dataset', 'Id'
return df
df = read()
pp = samlib.Pipeline(df.copy())
assert pp == df # the pipeline output equals df
def select_categorical_features(df):
return df.loc[:, df.dtypes == object]
pp.append(select_categorical_features)
pp().shape
pp().describe()
pp().isnull().sum()
plt.figure(figsize=(12, 10))
pp().describe().loc['unique'].sort_values(ascending=False).plot(kind='barh')
plt.title('Number of categories per feature')
nulls = pp().isnull().sum()
plt.figure(figsize=(12, 10))
ax = nulls[nulls > 0].sort_values(ascending=False).plot(kind='barh')
plt.title('Number of nulls per feature')
def replace_bad_nulls(df):
nulls = df.isnull().sum()
bad_nulls_colz = nulls[nulls > 0].sort_values()['BsmtFinType1':].index
return samlib.fillna(df, lambda x: 'none', bad_nulls_colz)
pp.append(replace_bad_nulls)
def fill_nulls_with_mode(df):
return samlib.fillna(df, lambda dg: dg.mode().loc[0])
pp.append(fill_nulls_with_mode)
pp()['LotShape'].head()
def ordered_categories(series):
dg = pd.DataFrame(series).copy()
dg.loc['train','LogSalePrice'] = logtarget.values
return dg.groupby(dg.columns[0]).median().sort_values('LogSalePrice', ascending=False)
ordered_categories(pp()['LotShape'])
def categorize(feature):
feature = feature.copy()
return pd.Categorical(feature, ordered_categories(feature).index)
categorize(pp()['LotShape'])
def objects_to_categories(df):
return df.apply(categorize)
objects_to_categories(pp())['LotShape'].head()
pp.append(objects_to_categories)
pp()['LotShape'].head()
def order_columns_by_uniques(df):
colz_ordered_by_unique = df.describe().loc['unique'].sort_values(ascending=False).index
return df.reindex_axis(colz_ordered_by_unique, axis=1)
pp.append(order_columns_by_uniques)
pp().head()
df = pp()
df.shape
train = pp().loc['train']
def plot_price_dist(y='LotShape', data=train, logtarget=logtarget, **kwargs):
Plot the price distribution for each category
dg = data[[y]].copy()
dg['LogSalePrice'] = logtarget
# Order categories by mean value
sns.violinplot(x="LogSalePrice", y=y, data=dg, scale='width', **kwargs)
#sns.swarmplot(x="LogSalePrice", y=y, data=dg, color="w", alpha=.5);
def plot_value_counts(y=None, data=df, **kwargs):
if y is None:
if data.shape[1] == 1:
y = data.columns[0]
else:
raise ValueError('Must pass y or a dataframe with a single column')
return sns.countplot(y=y, data=data, **kwargs)
train.shape
samlib.featureplots(train, nrows=21, ncols=4, figsize=(2, 8), plotfuncs=(plot_price_dist, plot_value_counts), axis=1)
looks_good = 'Neighborhood'
looks_bad = 'Condition1'
# medians
med_price = logtarget.median().values
df = pp()
sharps = df.apply(lambda col: ordered_categories(col).std().values).iloc[0] / med_price
fig, ax = plt.subplots(1,1, figsize=(12, 12))
sharps.sort_values().plot(kind='barh', ax=ax)
plt.title('Std of median logprices for each category (high is better)')
from scipy.stats import entropy
df = pp()
unicounts = df.apply(lambda ser: entropy(ser.value_counts()))
fig, ax = plt.subplots(1,1, figsize=(12, 12))
unicounts.sort_values().plot(kind='barh', ax=ax)
plt.title('Entropy of value counts for each category (high is better)')
goodness = sharps * unicounts
fig, ax = plt.subplots(1,1, figsize=(12, 12))
goodness.sort_values().plot(kind='barh', ax=ax)
plt.title('Goodness of category (high is better)')
topcolz = goodness.sort_values(ascending=False)[:12].index
topcolz
samlib.featureplots(train[topcolz], nrows=6, ncols=4, figsize=(2, 8), plotfuncs=(plot_price_dist, plot_value_counts), axis=1)
df = pp()
df.columns = goodness.sort_values(ascending=False).index
df.to_csv('cleaned_categorical_vars_with_colz_sorted_by_goodness.csv', index=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The sale price is in hte hundreds of thousands, so let's divide the price by 1000 to get more manageable numbers.
Step3: Merge the training and test datasets for data preparation
Step4: Categorical features
Step5: Number of categories per feature
Step6: Number of nulls per feature
Step7: But that's a lot of numbers to digest. Better get started plotting! To help with plotting, but also to improve linear regression models, we're going to standardize our data. But before that we must deal with the NaN values.
Step8: Replace true nulls with mode
Step9: Change dtypes to category
Step10: Order columns by uniques
Step11: Explore categories
Step13: Plot sale price distribution for each category
Step14: Use samlib.featureplots to plot the distribution and value count of each category.
Step15: Let's create an indicator to detect the bad features, that
Step16: The medians of the logprice within each categories should have maximum variance (the less the variance, the less we can distinguish the price within each category).
Step17: The entropy of the value counts should be large
Step18: Good features have a high variability and more uniform counts.
Step19: Let's plot our top features and check that they indeed appear helpful (good variability in the medians and high entropy in the counts).
Step20: Sort columns by goodness and save
|
2,868
|
<ASSISTANT_TASK:>
Python Code:
# This is an inline comment: Python3
print('hello world')
# Python2
print 'hello world'
1 * 1.0
a = 3
type(a)
b = 3 > 5
print(b), type(b)
L = ['red', 'blue', 'green', 'black', 'white']
print(L)
L[1], L[3:], L[3:15]
L[1] = 'yellow'
print(L)
T = ('red', 'black')
T[1] = 'yellow'
print(L)
G = L
print(G,L)
# But now:
L[1] = 'blue'
print(G,L)
# Let us now create an independent copy:
G = L[:]
print('Independent: ', G, L)
L[1] = 'yellow'
print('Independent: ', G, L)
import urllib; from IPython.core.display import HTML
HTML(urllib.urlopen('http://bit.ly/1K5apRH').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Types
Step2: Let us now turn to containers
Step3: Lists are mutable objects, i.e. they can be changed.
Step4: What is an example of an immutable object?
Step5: Let us now turn to the distinction between independent copies and references to objects.
Step6: Miscellaneous
|
2,869
|
<ASSISTANT_TASK:>
Python Code:
from poppy.creatures import Poppy4dofArmMini
mini_dof = Poppy4dofArmMini()
for m in mini_dof.motors:
m.compliant = False
m.goto_position(0,1)
for m in mini_dof.motors:
m.pid=(4,1,0.1)
mini_dof.m3.goto_behavior= 'minjerk'
mini_dof.m3.goto_behavior
from pypot.primitive import Primitive
import time
%pylab inline
class Graph_primitive(Primitive):
def __init__(self,robot,motors_name):
self.robot = robot
Primitive.__init__(self, robot)
self.fake_motors={}
for name in motors_name:
self.fake_motors[name] = getattr(self.robot, name)
self.position={}
self.load={}
self.speed={}
def setup(self):
for m in self.fake_motors.keys():
self.position[m] = []
self.speed[m] = []
self.load[m] = []
self.python_time=[]
self.pypot_time=[]
def run(self):
t0 = time.time()
while not self.should_stop():
for m in self.fake_motors.keys():
self.position[m].append(self.fake_motors[m].present_position)
self.load[m].append(self.fake_motors[m].present_load)
self.speed[m].append(self.fake_motors[m].present_speed)
self.python_time.append(time.time()-t0)
self.pypot_time.append(self.elapsed_time)
time.sleep(0.02)
graph = Graph_primitive(mini_dof,['m3',])
graph.start()
mini_dof.m3.goto_position(90,1,wait=True)
mini_dof.m3.goto_position(0,1)
graph.stop()
figure(1)
plot(graph.pypot_time,graph.position['m3'])
xlabel('elapsed time seconds')
ylabel('position')
title ('Record position function of elapsed time')
graph.start()
mini_dof.m3.goto_position(90,0.1,wait=True)
mini_dof.m3.goto_position(0,1,wait=True)
graph.stop()
figure(1)
plot(graph.pypot_time,graph.position['m3'])
xlabel('elapsed time seconds')
ylabel('position')
title ('Record position function of elapsed time')
mini_dof.m3.goto_behavior = 'dummy'
graph.start()
mini_dof.m3.goto_position(90,1,wait=True)
mini_dof.m3.goto_position(0,1)
graph.stop()
figure(1)
plot(graph.pypot_time,graph.position['m3'])
xlabel('elapsed time seconds')
ylabel('position')
title ('Record position function of elapsed time')
graph.start()
for i in range(3):
mini_dof.m3.goto_position(90,0.5,wait=True)
mini_dof.m3.goto_position(-90,0.5,wait=True)
mini_dof.m3.goto_position(0,1,wait=True)
graph.stop()
figure(1)
plot(graph.pypot_time,graph.position['m3'])
xlabel('elapsed time seconds')
ylabel('position')
title ('Record position function of elapsed time')
def wait_position(motor) :
while abs(motor.present_position-motor.goal_position) > 2: # the precision in degree is 2° , you can change but usually XL-320 motors have not a precision more than 1°
time.sleep(0.02)
graph.start()
for i in range(3):
mini_dof.m3.goto_position(90,0.5)
wait_position(mini_dof.m3)
mini_dof.m3.goto_position(-90,0.5,wait=True)
wait_position(mini_dof.m3)
mini_dof.m3.goto_position(0,1,wait=True)
graph.stop()
figure(1)
plot(graph.pypot_time,graph.position['m3'])
xlabel('elapsed time seconds')
ylabel('position')
title ('Record position function of elapsed time')
for m in mini_dof.motors:
m.moving_speed = 100 # degrees per seconde
def wait_position(robot) :
while abs(
robot.m1.present_position-robot.m1.goal_position) > 5 or abs(
robot.m2.present_position-robot.m2.goal_position) > 5 or abs(
robot.m3.present_position-robot.m3.goal_position) >5 or abs(
robot.m4.present_position-robot.m4.goal_position) >5 : # the precision in degree is 2° , you can change but usually XL-320 motors have not a precision more than 1°
time.sleep(0.02)
for i in range(10):
mini_dof.m1.goal_position = 90
mini_dof.m2.goal_position = 10
mini_dof.m3.goal_position = 80
mini_dof.m4.goal_position = -40
wait_position(mini_dof)
for m in mini_dof.motors:
m.goal_position = 0
wait_position(mini_dof)
mini_dof.m1.goal_position = -30
mini_dof.m2.goal_position = 10
mini_dof.m3.goal_position = 80
mini_dof.m4.goal_position = -40
wait_position(mini_dof)
for m in mini_dof.motors:
m.goal_position = 0
wait_position(mini_dof)
for m in mini_dof.motors:
m.moving_speed = 0 # degrees per seconde
mini_dof.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: With the goto_position method, you can set an angle and a time. But you can not be absolutely sure that the position will be effectively reach. Because the priority is on the time.
Step2: We need to adjust the PID of the motors, because if not you can have more than 3 degrees to difference between the present_position and the goal_position.
Step3: Minjerk behavior
Step4: Make a primitive to record data from motors
Step5: Do a simple movement
Step6: Display the graph
Step7: But with
Step8: The position is definitely not reach.
Step9: Do a simple movement
Step10: Display the graph
Step11: You can see that the 90 degree is not reach in one second. So the movement
Step12: This is awful ! The position are really far from desired position. Because, pypot calcule a theorical speed wich seems to be always under the real one.
Step13: Positions are now respected (+/-
|
2,870
|
<ASSISTANT_TASK:>
Python Code:
gPlayers = [ 'X', 'O' ]
gStart = tuple( tuple(' ' for col in range(3)) for row in range(3) )
gStart
def to_list(State):
return [list(row) for row in State]
def to_tuple(State):
return tuple(tuple(row) for row in State)
def empty(Board):
return [ (row, col) for row in range(3)
for col in range(3)
if Board[row][col] == ' '
]
empty(gStart)
def toString(State):
result = '+-+-+-+\n'
for Row in State:
result += '|'
for cell in Row:
result += cell + '|'
result += '\n+-+-+-+\n'
return result
print(toString(gStart))
def next_states(State, player):
Empty = empty(State)
Result = []
for row, col in Empty:
NextState = to_list(State)
NextState[row][col] = player
NextState = to_tuple(NextState)
Result.append(NextState)
return Result
for S in next_states(gStart, 'X'):
print(toString(S))
gAllLines = [ [ (row, col) for col in range(3) ] for row in range(3) ] \
+ [ [ (row, col) for row in range(3) ] for col in range(3) ] \
+ [ [ (0, 0), (1, 1), (2, 2) ] ] \
+ [ [ (0, 2), (1, 1), (2, 0) ] ]
gAllLines
def utility(State):
for Line in gAllLines:
Marks = { State[row][col] for row, col in Line }
if len(Marks) == 1 and Marks != { ' ' }:
if Marks == { 'X' }:
return 1
else:
return -1
for row in range(3):
for col in range(3):
if State[row][col] == ' ':
return None # the board is not filled
# at this point, the board has been filled, but there is no winner, hence it's a draw
return 0
print(utility(gStart))
def finished(State):
return utility(State) != None
finished(gStart)
def get_move(State):
State = to_list(State)
while True:
try:
row, col = input('Enter move here: ').split(',')
row, col = int(row), int(col)
if State[row][col] == ' ':
State[row][col] = 'O'
return to_tuple(State)
print("Don't cheat! Please try again.")
except:
print('Illegal input.')
print('row and col are numbers from the set {0,1,2}.')
def final_msg(State):
if finished(State):
if utility(State) == -1:
print('You have won!')
elif utility(State) == 1:
print('The computer has won!')
else:
print("It's a draw.")
return True
return False
import ipycanvas as cnv
g_size = 150
def create_canvas():
n = 3
canvas = cnv.Canvas(size=(g_size * n, g_size * n + 50))
display(canvas)
return canvas
def draw(State, canvas, value):
canvas.clear()
n = len(State)
canvas.font = '90px sans-serif'
canvas.text_align = 'center'
canvas.text_baseline = 'middle'
for row in range(n):
for col in range(n):
x = col * g_size
y = row * g_size
canvas.line_width = 3.0
canvas.stroke_rect(x, y, g_size, g_size)
symbol = State[row][col]
if symbol != ' ':
x += g_size // 2
y += g_size // 2
if symbol == 'X':
canvas.fill_style ='red'
else:
canvas.fill_style ='blue'
canvas.fill_text(symbol, x, y)
canvas.font = '12px sans-serif'
canvas.fill_style = 'green'
for row in range(n):
for col in range(n):
x = col * g_size + 16
y = row * g_size + 141
canvas.fill_text(f'({row}, {col})', x, y)
canvas.font = '20px sans-serif'
canvas.fill_style = 'black'
x = 1.5 * g_size
y = 3.2 * g_size
canvas.fill_text(str(value), x, y)
State = (('O', 'X', 'O'),
('X', 'O', 'X'),
('O', 'X', 'O')
)
draw(State, create_canvas(), 1)
gStart
import sys
sys.getsizeof(gStart) + sum([sys.getsizeof(row) for row in gStart]) + 9 * sys.getsizeof('_')
sys.getsizeof(1073741823)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the following naive implementation, states are represented as tuples of tuples of strings. The game starts with an empty board. An empty field on the board is represented by a blank space ' ', while an X is represented by the string 'X' and an O is represented as the string 'O'. The start state Start defined below represents an empty board.
Step2: The function to_list transforms a tuple of tuples into a list of lists. As tuples are immutable, this function is needed in order to create new states from given ones.
Step3: Conversely, the function to_tuple transforms a list of lists into a tuple of tuples.
Step4: Given a state represented by the tuple of tuples Board, the function empty(Board) returns the list of all empty positions on the board Board, i.e. it returns the list of all pairs (row, col) such that Board[row][col] == ' ' holds. These pairs are the coordinates of the fields on the Board that are not yet marked by either an 'X' or an 'O'.
Step5: In the state gStart, all positions are empty.
Step6: The function toString(State) represents the given state as a string.
Step7: Given a State and the player whose turn it is, the function next_states(State, player) computes the list of all states that can be reached from State.
Step8: The variable gAllLines collects the coordinates of all the fields of the three rows, the three columns, and the two diagonals. This variable is needed to check whether the game has already terminated.
Step9: The function utility takes one argument
Step10: finished(State) is True if and only if the game is over.
Step11: The function get_move asks the user to input a move in the format r,c where r is the row and the c is the column where the next symbol is to be placed. Rows and columns are counted starting from 0. It is assumed that the user plays as 'O', i.e. the user takes the second move.
Step12: This function informs the player, who is assumed to be 'O', about the result of the game once the game is finished.
Step13: Drawing the Board
Step14: This function creates the canvas for the start state. It draws an empty board which is later used for the game.
Step15: This function takes three arguments
Step16: Checking the Memory Footprint
Step17: The memory needed to store a single state is the memory needed to store a list containing three objects plus the memory needed to store those objects.
Step18: To compare, a single positive integer less than $1,073,741,824$ needs 28 bytes.
|
2,871
|
<ASSISTANT_TASK:>
Python Code:
import toytree
# get a random tree with 10 tips
tree1 = toytree.rtree.unittree(ntips=10, seed=123)
# draw tree with admixture from node 2 to 3
tree1.draw(ts='s', admixture_edges=(2, 3));
# draw tree with admixture from tip r2 to ancestor of r4,r5
tree1.draw(ts='s', admixture_edges=('r2', ['r4', 'r5']));
# draw tree with admixture from tip r2 to ancestor of r4,r5
tree1.draw(
ts='c',
tip_labels=True,
admixture_edges=[
('r2', 'r4', 0.5, {'stroke': 'blue', 'stroke-opacity': 0.3}, "0.32")
],
);
# draw admixture at 0.5 height of the shared edge (default option)
tree1.draw(ts='c', tip_labels=True, admixture_edges=(2, 3, 0.5));
# draw admixture at 0.5 height of each edge separately
tree1.draw(ts='c', tip_labels=True, admixture_edges=(2, 3, (0.5, 0.5)));
# draw admixture at 0.2 and 0.8 heights, respectively
tree1.draw(ts='c', tip_labels=True, admixture_edges=(2, 3, (0.2, 0.8)));
# draw admixture between edges that do not overlap in time
tree1.draw(ts='c', tip_labels=True, admixture_edges=(1, 12, 0.5));
# draw admixture at 0.5 height of the shared edge (default option)
style = {'stroke': 'orange', 'stroke-width': 10, 'stroke-opacity': 0.4}
tree1.draw(ts='c', tip_labels=True, admixture_edges=(2, 3, 0.5, style));
# draw admixture at 0.5 height of the shared edge (default option)
tree1.draw(ts='c', tip_labels=True, admixture_edges=(2, 3, 0.5, {}, "admixture"));
# the example SNAQ network-1
hnewick = "(C,D,((O,(E,#H7:::0.196):0.314):0.664,(B,((A1,A2))#H7:::0.804):10.0):10.0);"
# parse tree and admixture dict
tree, admix = toytree.utils.parse_network(hnewick)
# the admix dictionary has key,val pairs where val is an admix drawing tuple
admix
# draw tree with tuple arg from admix dictionary
tree.draw(ts='s', admixture_edges=admix.values());
tree.root(wildcard="A").draw(ts='s', admixture_edges=admix.values());
# draw tree with admixture from tip r2 to ancestor of r4,r5
tree1.draw(
ts='c',
tip_labels=True,
admixture_edges=[
(0, 2), (3, 4), (6, 7)
]
);
import numpy as np
# generate list of 100 admixture tuples w/ random timing
admix = []
for i in range(100):
src = 2
dest = 3
time = np.random.normal(0.5, 0.2)
style = {'stroke': 'blue', 'stroke-opacity': 0.01}
tup = (src, dest, time, style)
admix.append(tup)
# draw tree with admixture from tip r2 to ancestor of r4,r5
tree1.draw(
ts='c',
tip_labels=True,
admixture_edges=admix
);
hnewick = "(C,D,((O,(E,#H7:::0.196):0.314):0.664,(B,((A1,A2))#H7:::0.804):10.0):10.0);"
net, admix = toytree.utils.parse_network(hnewick, disconnect=False)
net.draw(ts='s', node_labels="name");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quick example
Step2: Styling admixture edges
Step3: Admixture timing
Step4: Admixture edge style
Step5: Label
Step6: Parsing SNAQ newick format
Step7: Works with re-rooting
Step8: Muliple admixture arguments
Step9: Parsing networks extended
|
2,872
|
<ASSISTANT_TASK:>
Python Code:
import torch
x = torch.Tensor(5, 3)
print(x)
import torch
from torch.autograd import Variable
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in))
y = Variable(torch.randn(N, D_out), requires_grad=False)
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Variables for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_fn = torch.nn.MSELoss(size_average=False)
learning_rate = 1e-4
for t in range(500):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Variable of input data to the Module and it produces
# a Variable of output data.
y_pred = model(x)
# Compute and print loss. We pass Variables containing the predicted and true
# values of y, and the loss function returns a Variable containing the
# loss.
loss = loss_fn(y_pred, y)
if t%50 == 0:
print(t, loss.data[0])
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Variables with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Variable, so
# we can access its data and gradients like we did before.
for param in model.parameters():
param.data -= learning_rate * param.grad.data
!pip install pytorch-transformers
import torch
from pytorch_transformers import *
# PyTorch-Transformers has a unified API
# for 6 transformer architectures and 27 pretrained weights.
# Model | Tokenizer | Pretrained weights shortcut
MODELS = [(BertModel, BertTokenizer, 'bert-base-uncased'),
(OpenAIGPTModel, OpenAIGPTTokenizer, 'openai-gpt'),
(GPT2Model, GPT2Tokenizer, 'gpt2'),
(TransfoXLModel, TransfoXLTokenizer, 'transfo-xl-wt103'),
(XLNetModel, XLNetTokenizer, 'xlnet-base-cased'),
(XLMModel, XLMTokenizer, 'xlm-mlm-enfr-1024')]
import torch
from pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM
# OPTIONAL: if you want to have more information on what's happening under the hood, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Tokenize input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = '[MASK]'
assert tokenized_text == ['[CLS]', 'who', 'was', 'jim', 'henson', '?', '[SEP]', 'jim', '[MASK]', 'was', 'a', 'puppet', '##eer', '[SEP]']
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
tokenized_text
?BertModel.from_pretrained
model = BertModel.from_pretrained(u"/Users/datalab/bigdata/bert-base-uncased.bin")
# Load pre-trained model (weights)
model = BertModel.from_pretrained('/Users/datalab/bigdata/bert-base-uncased-pytorch_model.bin')
# Set the model in evaluation mode to desactivate the DropOut modules
# This is IMPORTANT to have reproductible results during evaluation!
model.eval()
# If you have a GPU, put everything on cuda
# tokens_tensor = tokens_tensor.to('cuda')
# segments_tensors = segments_tensors.to('cuda')
# model.to('cuda')
# Predict hidden states features for each layer
with torch.no_grad():
# See the models docstrings for the detail of the inputs
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
# PyTorch-Transformers models always output tuples.
# See the models docstrings for the detail of all the outputs
# In our case, the first element is the hidden state of the last layer of the Bert model
encoded_layers = outputs[0]
# We have encoded our input sequence in a FloatTensor of shape (batch size, sequence length, model hidden dimension)
assert tuple(encoded_layers.shape) == (1, len(indexed_tokens), model.config.hidden_size)
# Load pre-trained model (weights)
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
model.eval()
# If you have a GPU, put everything on cuda
tokens_tensor = tokens_tensor.to('cuda')
segments_tensors = segments_tensors.to('cuda')
model.to('cuda')
# Predict all tokens
with torch.no_grad():
outputs = model(tokens_tensor, token_type_ids=segments_tensors)
predictions = outputs[0]
# confirm we were able to predict 'henson'
predicted_index = torch.argmax(predictions[0, masked_index]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
assert predicted_token == 'henson'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tutorial
Step2: nn module
Step3: https
Step4: Quickstart
|
2,873
|
<ASSISTANT_TASK:>
Python Code:
# Import numpy
import numpy as np
# Define T and g
T = 40
y0 =50
g = 0.01
# Compute yT using the direct approach and print
yT = (1+g)**T*y0
print('Direct approach: ',yT)
# Initialize a 1-dimensional array called y that has T+1 zeros
y = np.zeros(T+1)
# Set the initial value of y to equal y0
y[0] = y0
# Use a for loop to update the values of y one at a time
for t in np.arange(T):
y[t+1] = (1+g)*y[t]
# Print the final value in the array y
print('Iterative approach:',y[-1])
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Magic command for the Jupyter Notebook
%matplotlib inline
# Import numpy as np
import numpy as np
# Create an array of x values from -6 to 6
x = np.arange(-6,6,0.001)
# Create a variable y equal to the sin of x
y = np.sin(x)
# Use the plot function to plot the
plt.plot(x,y)
# Add a title and axis labels
plt.title('sin(x)')
plt.xlabel('x')
plt.ylabel('y')
# Use the help function to see the documentation for plot
help(plt.plot)
# Create an array of x values from -6 to 6
x = np.arange(-2,2,0.001)
# Create a variable y equal to the x squared
y = x**2
# Use the plot function to plot the line
plt.plot(x,y,linewidth=3,alpha = 0.6)
# Add a title and axis labels
plt.title('$f(x) = x^2$')
plt.xlabel('x')
plt.ylabel('y')
# Add grid
plt.grid()
# Create an array of x values from -6 to 6
x = np.arange(0.05,5,0.011)
# Create y variables
y1 = np.log(x)
y2 = 1/x
# Use the plot function to plot the lines
plt.plot(x,y1,'b-',linewidth=3,alpha = 0.6,label='$log(x)$')
plt.plot(x,y2,'m--',linewidth=3,alpha = 0.6,label='$1/x$')
# Add a title and axis labels
plt.title('Two functions')
plt.xlabel('x')
plt.ylabel('y')
# Set axis limits
plt.xlim([0,5])
plt.ylim([-2,4])
# legend
plt.legend(loc='lower right',ncol=2)
# Add grid
plt.grid()
# Set betas
beta0 = 1
beta1 = -0.5
# Create x values
x = np.arange(-5,5,0.01)
# create epsilon values from the standard normal distribution
epsilon = np.random.normal(size=len(x))
# create y
y = beta0 + beta1*x+epsilon
# plot
plt.plot(x,y,'o',alpha = 0.5)
# Add a title and axis labels
plt.title('Data')
plt.xlabel('x')
plt.ylabel('y')
# Set axis limits
plt.xlim([-5,5])
# Add grid
plt.grid()
# Create an array of x values from -6 to 6
x = np.arange(-2,2,0.001)
# Create y variables
y1 = x
y2 = x**2
y3 = x**3
# Use the plot function to plot the lines
plt.plot(x,y1,'b-',lw=3,label='$x$')
plt.plot(x,y2,'g--',lw=3,label='$x^2$')
plt.plot(x,y3,'m-.',lw=3,label='$x^3$')
# Add a title and axis labels
plt.title('Three functions')
plt.xlabel('x')
plt.ylabel('y')
# Add grid
plt.grid()
# legend
plt.legend(loc='lower right',ncol=3)
# Create data
x = np.arange(-6,6,0.001)
y = np.sin(x)
# Create a new figure
fig = plt.figure(figsize=(12,4))
# Create axis
ax1 = fig.add_subplot(1,1,1)
# Plot
ax1.plot(x,y,lw=3,alpha = 0.6)
# Add grid
ax1.grid()
# Create data
x = np.arange(-6,6,0.001)
y1 = np.sin(x)
y2 = np.cos(x)
# Create a new figure
fig = plt.figure(figsize=(12,4))
# Create axis 1 and plot with title
ax1 = fig.add_subplot(1,2,1)
ax1.plot(x,y1,lw=3,alpha = 0.6)
ax1.grid()
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('sin')
# Create axis 2 and plot with title
ax2 = fig.add_subplot(1,2,2)
ax2.plot(x,y2,lw=3,alpha = 0.6)
ax2.grid()
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('sin')
# Create data
x = np.arange(-2,2,0.001)
y1 = x
y2 = x**2
y3 = x**3
y4 = x**4
# Create a new figure
fig = plt.figure()
# Create axis 1 and plot with title
ax1 = fig.add_subplot(2,2,1)
ax1.plot(x,y1,lw=3,alpha = 0.6)
ax1.grid()
ax1.set_xlabel('x')
ax1.set_ylabel('y')
ax1.set_title('$x$')
# Create axis 2 and plot with title
ax2 = fig.add_subplot(2,2,2)
ax2.plot(x,y2,lw=3,alpha = 0.6)
ax2.grid()
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax2.set_title('$x^2$')
# Create axis 3 and plot with title
ax3 = fig.add_subplot(2,2,3)
ax3.plot(x,y3,lw=3,alpha = 0.6)
ax3.grid()
ax3.set_xlabel('x')
ax3.set_ylabel('y')
ax3.set_title('$x^3$')
# Create axis 4 and plot with title
ax4 = fig.add_subplot(2,2,4)
ax4.plot(x,y4,lw=3,alpha = 0.6)
ax4.grid()
ax4.set_xlabel('x')
ax4.set_ylabel('y')
ax4.set_title('$x^4$')
# Adjust margins
plt.tight_layout()
# Create data
x = np.arange(-6,6,0.001)
y = np.sin(x)
# Create a new figure, axis, and plot
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
ax1.plot(x,y,lw=3,alpha = 0.6)
ax1.grid()
# Save
plt.savefig('fig_econ129_class04_sine.png',dpi=120)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: matplotlib
Step2: Next, we want to make sure that the plots that we create are displayed in this notebook. To achieve this we have to issue a command to be interpretted by Jupyter -- called a magic command. A magic command is preceded by a % character. Magics are not Python and will create errs if used outside of the Jupyter notebook
Step3: A quick matplotlib example
Step4: The plot function
Step5: Example
Step6: Example
Step7: Example
Step8: Example
Step9: Figures, axes, and subplots
Step10: In the previous example the figure() function creates a new figure and add_subplot() puts a new axis on the figure. The command fig.add_subplot(1,1,1) means divide the figure fig into a 1 by 1 grid and assign the first component of that grid to the variable ax1.
Step11: Example
Step12: Exporting figures to image files
|
2,874
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sys
# local api/python3 path - adjust path for your system
japipath = 'C:\\j64\\j64-807\\addons\\api\\python3'
if japipath not in sys.path:
sys.path.append(japipath)
sys.path
import jbase as j
print(j.__doc__)
# start J - only one instance currently allowed
try:
j.init()
except:
print('j running')
j.dor("i. 2 3 4") # run sentence and print output result
rc = j.do(('+a.')) # run and return error code
print(rc)
j.getr() # get last output result
j.do('abc=: i.2 3') # define abc
q= j.get('abc') # get q as numpy array from J array
print (q)
j.set('ghi',23+q) # set J array from numpy array
j.dor('ghi') # print array (note typo in addon (j.__doc___)
j.do("cows =. 'don''t have a cow man'")
j.get('cows')
ido = "I do what I do because I am what I am!"
j.set("ido", ido)
j.dor("ido")
# decomment to run REPL
# j.j()
# boolean numpy array
p = np.array([True, False, True, True]).reshape(2,2)
p
j.set("p", p)
j.dor("p")
# numpy
a = np.arange(15).reshape(3, 5)
print(a)
# J
j.do("a =. 3 5 $ i. 15")
j.dor("a")
# numpy
a = np.array([2,3,4])
print(a)
# J
j.do("a =. 2 3 4")
j.dor("a")
# numpy
b = np.array([(1.5,2,3), (4,5,6)])
print(b)
# J
j.do("b =. 1.5 2 3 ,: 4 5 6")
j.dor("b")
# numpy
c = np.array( [ [1,2], [3,4] ], dtype=complex )
print(c)
# J
j.do("c =. 0 j.~ 1 2 ,: 3 4")
j.dor("c") # does not show as complex
j.dor("datatype c") # c is complex
# numpy - make complex numbers with nonzero real and imaginary parts
c + (0+4.7j)
# J - also for J
j.dor("c + 0j4.7")
# numpy
np.zeros( (3,4) )
# J
j.dor("3 4 $ 0")
# numpy - allocates array with whatever is in memory
np.empty( (2,3) )
# J - uses fill - safer but slower than numpy's trust memory method
j.dor("2 3 $ 0.0001")
# numpy
a = np.array( [20,30,40,50] )
b = np.arange( 4 )
c = a - b
print(c)
# J
j.do("a =. 20 30 40 50")
j.do("b =. i. 4")
j.do("c =. a - b")
j.dor("c")
# numpy - uses previously defined (b)
b ** 2
# J
j.dor("b ^ 2")
# numpy - uses previously defined (a)
10 * np.sin(a)
# J
j.dor("10 * 1 o. a")
# numpy - booleans are True and False
a < 35
# J - booleans are 1 and 0
j.dor("a < 35")
# numpy
a = np.array( [[1,1], [0,1]] )
b = np.array( [[2,0], [3,4]] )
# elementwise product
a * b
# J
j.do("a =. 1 1 ,: 0 1")
j.do("b =. 2 0 ,: 3 4")
j.dor("a * b")
# numpy - matrix product
np.dot(a, b)
# J - matrix product
j.dor("a +/ . * b")
# numpy - uniform pseudo random - seeds are different in Python and J processes - results will differ
a = np.random.random( (2,3) )
print(a)
# J - uniform pseudo random
j.dor("?. 2 3 $ 0")
# numpy - sum all array elements - implicit ravel
a = np.arange(100).reshape(20,5)
a.sum()
# j - sum all array elements - explicit ravel
j.dor("+/ , 20 5 $ i.100")
# numpy
b = np.arange(12).reshape(3,4)
print(b)
# sum of each column
print(b.sum(axis=0))
# min of each row
print(b.min(axis=1))
# cumulative sum along each row
print(b.cumsum(axis=1))
# transpose
print(b.T)
# J
j.do("b =. 3 4 $ i. 12")
j.dor("b")
# sum of each column
j.dor("+/ b")
# min of each row
j.dor('<./"1 b')
# cumulative sum along each row
j.dor('+/\\"0 1 b') # must escape \ character to pass +/\"0 1 properly to J
# transpose
j.dor("|: b")
# numpy
a = np.arange(10) ** 3
print(a[2])
print(a[2:5])
print(a[ : :-1]) # reversal
# J
j.do("a =. (i. 10) ^ 3")
j.dor("2 { a")
j.dor("(2 + i. 3) { a")
j.dor("|. a")
from numpy import pi
x = np.linspace( 0, 2*pi, 100, np.float64) # useful to evaluate function at lots of points
f = np.sin(x)
f
j.set("f", f)
j.get("f")
r = np.random.random((2000,3000))
r = np.asarray(r, dtype=np.float64)
r
j.set("r", r)
j.get("r")
r.shape
j.get("r").shape
j.dor("r=. ,r")
j.get("r").shape
r.sum()
b = np.ones((5,300,4), dtype=np.int64)
j.set("b", b)
b2 = j.get("b")
print(b.sum())
print(b2.sum())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure the J Python3 addon
Step2: Character data is passed as bytes.
Step3: j.j() enters a simple REPL
Step4: J accepts a subset of NumPy datatypes
Step5: As you can see a round trip of numpy booleans generates digital noise.
Step6: Basic Operations
Step7: Array Processing
Step8: Indexing and Slicing
Step9: Passing Larger Arrays
|
2,875
|
<ASSISTANT_TASK:>
Python Code:
from py2cytoscape.data.cynetwork import CyNetwork
from py2cytoscape.data.cyrest_client import CyRestClient
from py2cytoscape.data.style import StyleUtil
import py2cytoscape.util.cytoscapejs as cyjs
import py2cytoscape.cytoscapejs as renderer
import networkx as nx
import pandas as pd
import json
# !!!!!!!!!!!!!!!!! Step 0: Start Cytoscape 3 with cyREST App !!!!!!!!!!!!!!!!!!!!!!!!!!
# Step 1: Create py2cytoscape client
cy = CyRestClient()
# Reset
cy.session.delete()
# Step 2: Load network from somewhere
yeast_net = cy.network.create_from('../tests/data/galFiltered.sif')
# Step 3: Load table as pandas' DataFrame
table_data = pd.read_csv('sample_data_table.csv', index_col=0)
table_data.head()
all_suid = cy.network.get_all()
net1 = cy.network.create(all_suid[0])
print(net1.get_first_view())
# Step 4: Merge them in Cytoscape
yeast_net.update_node_table(df=table_data, network_key_col='name')
# Step 5: Apply layout
cy.layout.apply(name='degree-circle', network=yeast_net)
# Step 6: Create Visual Style as code (or by hand if you prefer)
my_yeast_style = cy.style.create('GAL Style')
basic_settings = {
# You can set default values as key-value pairs.
'NODE_FILL_COLOR': '#6AACB8',
'NODE_SIZE': 55,
'NODE_BORDER_WIDTH': 0,
'NODE_LABEL_COLOR': '#555555',
'EDGE_WIDTH': 2,
'EDGE_TRANSPARENCY': 100,
'EDGE_STROKE_UNSELECTED_PAINT': '#333333',
'NETWORK_BACKGROUND_PAINT': '#FFFFEA'
}
my_yeast_style.update_defaults(basic_settings)
# Create some mappings
my_yeast_style.create_passthrough_mapping(column='label', vp='NODE_LABEL', col_type='String')
degrees = yeast_net.get_node_column('degree.layout')
color_gradient = StyleUtil.create_2_color_gradient(min=degrees.min(), max=degrees.max(), colors=('white', '#6AACB8'))
degree_to_size = StyleUtil.create_slope(min=degrees.min(), max=degrees.max(), values=(10, 100))
my_yeast_style.create_continuous_mapping(column='degree.layout', vp='NODE_FILL_COLOR', col_type='Integer', points=color_gradient)
my_yeast_style.create_continuous_mapping(column='degree.layout', vp='NODE_SIZE', col_type='Integer', points=degree_to_size)
my_yeast_style.create_continuous_mapping(column='degree.layout', vp='NODE_WIDTH', col_type='Integer', points=degree_to_size)
my_yeast_style.create_continuous_mapping(column='degree.layout', vp='NODE_HEIGHT', col_type='Integer', points=degree_to_size)
my_yeast_style.create_continuous_mapping(column='degree.layout', vp='NODE_LABEL_FONT_SIZE', col_type='Integer', points=degree_to_size)
cy.style.apply(my_yeast_style, yeast_net)
# Step 7: (Optional) Embed as interactive Cytoscape.js widget
yeast_net_view = yeast_net.get_first_view()
style_for_widget = cy.style.get(my_yeast_style.get_name(), data_format='cytoscapejs')
renderer.render(yeast_net_view, style=style_for_widget['style'], background='radial-gradient(#FFFFFF 15%, #DDDDDD 105%)')
# HTTP Client for Python
import requests
# Standard JSON library
import json
# Basic Setup
PORT_NUMBER = 1234
BASE = 'http://localhost:' + str(PORT_NUMBER) + '/v1/'
# Header for posting data to the server as JSON
HEADERS = {'Content-Type': 'application/json'}
# Define dictionary of empty network
empty_network = {
'data': {
'name': 'I\'m empty!'
},
'elements': {
'nodes':[],
'edges':[]
}
}
res = requests.post(BASE + 'networks?collection=My%20Collection', data=json.dumps(empty_network), headers=HEADERS)
new_network_id = res.json()['networkSUID']
print('New network created with raw REST API. Its SUID is ' + str(new_network_id))
network = cy.network.create(name='My Network', collection='My network collection')
print('New network created with py2cytoscape. Its SUID is ' + str(network.get_id()))
# Create an instance of cyREST client. Default IP is 'localhost', and port number is 1234.
# cy = CyRestClient() - This default constructor creates connection to http://localhost:1234/v1
cy = CyRestClient(ip='127.0.0.1', port=1234)
# Cleanup: Delete all existing networks and tables in current Cytoscape session
cy.session.delete()
# Empty network
empty1 = cy.network.create()
# With name
empty2 = cy.network.create(name='Created in Jupyter Notebook')
# With name and collection name
empty3 = cy.network.create(name='Also created in Jupyter', collection='New network collection')
# Load a single local file
net_from_local2 = cy.network.create_from('../tests/data/galFiltered.json')
net_from_local1 = cy.network.create_from('sample_yeast_network.xgmml', collection='My Collection')
net_from_local2 = cy.network.create_from('../tests/data/galFiltered.gml', collection='My Collection')
# Load from multiple locations
network_locations = [
'sample_yeast_network.xgmml', # Local file
'http://chianti.ucsd.edu/cytoscape-data/galFiltered.sif', # Static file on a web server
'http://www.ebi.ac.uk/Tools/webservices/psicquic/intact/webservices/current/search/query/brca1?format=xml25' # or a web service
]
# This requrns Series
networks = cy.network.create_from(network_locations)
pd.DataFrame(networks, columns=['CyNetwork'])
# Cytoscape.js JSON
n1 = cy.network.create(data=cyjs.get_empty_network(), name='Created from Cytoscape.js JSON')
# Pandas DataFrame
# Example 1: From a simple text table
df_from_sif = pd.read_csv('../tests/data/galFiltered.sif', names=['source', 'interaction', 'target'], sep=' ')
df_from_sif.head()
# By default, it uses 'source' for source node column, 'target' for target node column, and 'interaction' for interaction
yeast1 = cy.network.create_from_dataframe(df_from_sif, name='Yeast network created from pandas DataFrame')
# Example 2: from more complicated table
df_from_mitab = pd.read_csv('intact_pubid_22094256.txt', sep='\t')
df_from_mitab.head()
source = df_from_mitab.columns[0]
target = df_from_mitab.columns[1]
interaction = 'Interaction identifier(s)'
title='A Systematic Screen for CDK4/6 Substrates Links FOXM1 Phosphorylation to Senescence Suppression in Cancer Cells.'
human1 = cy.network.create_from_dataframe(df_from_mitab, source_col=source, target_col=target, interaction_col=interaction, name=title)
# Import edge attributes and node attributes at the same time (TBD)
# NetworkX
nx_graph = nx.scale_free_graph(100)
nx.set_node_attributes(nx_graph, 'Degree', nx.degree(nx_graph))
nx.set_node_attributes(nx_graph, 'Betweenness_Centrality', nx.betweenness_centrality(nx_graph))
scale_free100 = cy.network.create_from_networkx(nx_graph, collection='Generated by NetworkX')
# TODO: igraph
# TODO: Numpy adj. martix
# TODO: GraphX
# As Cytoscape.js (dict)
yeast1_json = yeast1.to_json()
# print(json.dumps(yeast1_json, indent=4))
# As NetworkX graph object
sf100 = scale_free100.to_networkx()
num_nodes = sf100.number_of_nodes()
num_edges = sf100.number_of_edges()
print('Number of Nodes: ' + str(num_nodes))
print('Number of Edges: ' + str(num_edges))
# As a simple, SIF-like DataFrame
yeast1_df = yeast1.to_dataframe()
yeast1_df.head()
network_suid = yeast1.get_id()
print('This object references to Cytoscape network with SUID ' + str(network_suid) + '\n')
print('And its name is: ' + str(yeast1.get_network_value(column='name')) + '\n')
nodes = yeast1.get_nodes()
edges = yeast1.get_edges()
print('* This network has ' + str(len(nodes)) + ' nodes and ' + str(len(edges)) + ' edges\n')
# Get a row in the node table as pandas Series object
node0 = nodes[0]
row = yeast1.get_node_value(id=node0)
print(row)
# Or, pick one cell in the table
cell = yeast1.get_node_value(id=node0, column='name')
print('\nThis node has name: ' + str(cell))
# Create a new CyNetwork object from existing network
network_ref1 = cy.network.create(suid=yeast1.get_id())
# And they are considered as same objects.
print(network_ref1 == yeast1)
print(network_ref1.get_network_value(column='name'))
# Get table from Cytoscape
node_table = scale_free100.get_node_table()
edge_table = scale_free100.get_edge_table()
network_table = scale_free100.get_network_table()
node_table.head()
network_table.transpose().head()
names = scale_free100.get_node_column('Degree')
print(names.head())
# Node Column information. "name" is the unique Index
scale_free100.get_node_columns()
# Add new nodes: Simply send the list of node names. NAMES SHOULD BE UNIQUE!
new_node_names = ['a', 'b', 'c']
# Return value contains dictionary from name to SUID.
new_nodes = scale_free100.add_nodes(new_node_names)
# Add new edges
# Send a list of tuples: (source node SUID, target node SUID, interaction type
new_edges = []
new_edges.append((new_nodes['a'], new_nodes['b'], 'type1'))
new_edges.append((new_nodes['a'], new_nodes['c'], 'type2'))
new_edges.append((new_nodes['b'], new_nodes['c'], 'type3'))
new_edge_ids = scale_free100.add_edges(new_edges)
new_edge_ids
# Delete node
scale_free100.delete_node(new_nodes['a'])
# Delete edge
scale_free100.delete_edge(new_edge_ids.index[0])
# Small utility function to convert ID sets
import requests
def uniprot_id_mapping_service(query=None, from_id=None, to_id=None):
# Uniprot ID Mapping service
url = 'http://www.uniprot.org/mapping/'
payload = {
'from': from_id,
'to': to_id,
'format':'tab',
'query': query
}
res = requests.get(url, params=payload)
df = pd.read_csv(res.url, sep='\t')
res.close()
return df
# Get node table from Cytoscape
yeast_node_table = yeast1.get_node_table()
# From KEGG ID to UniprotKB ID
query1 = ' '.join(yeast_node_table['name'].map(lambda gene_id: 'sce:' + gene_id).values)
id_map_kegg2uniprot = uniprot_id_mapping_service(query1, from_id='KEGG_ID', to_id='ID')
id_map_kegg2uniprot.columns = ['kegg', 'uniprot']
# From UniprotKB to SGD
query2 = ' '.join(id_map_kegg2uniprot['uniprot'].values)
id_map_uniprot2sgd = uniprot_id_mapping_service(query2, from_id='ID', to_id='SGD_ID')
id_map_uniprot2sgd.columns = ['uniprot', 'sgd']
# From UniprotKB to Entrez Gene ID
query3 = ' '.join(id_map_kegg2uniprot['uniprot'].values)
id_map_uniprot2ncbi = uniprot_id_mapping_service(query3, from_id='ID', to_id='P_ENTREZGENEID')
id_map_uniprot2ncbi.columns = ['uniprot', 'entrez']
# Merge them
merged = pd.merge(id_map_kegg2uniprot, id_map_uniprot2sgd, on='uniprot')
merged = pd.merge(merged, id_map_uniprot2ncbi, on='uniprot')
# Add key column by removing prefix
merged['name'] = merged['kegg'].map(lambda kegg_id : kegg_id[4:])
merged.head()
update_url = BASE + 'networks/' + str(yeast1.get_id()) + '/tables/defaultnode'
print(update_url)
ut = {
'key': 'name',
'dataKey': 'name',
'data': [
{
'name': 'YBR112C',
'foo': 'aaaaaaaa'
}
]
}
requests.put(update_url, json=ut, headers=HEADERS)
# Now update existing node table with the data frame above.
yeast1.update_node_table(merged, network_key_col='name', data_key_col='name')
# Check the table is actually updated
yeast1.get_node_table().head()
# Delete columns
yeast1.delete_node_table_column('kegg')
# Create columns
yeast1.create_node_column(name='New Empty Double Column', data_type='Double', is_immutable=False, is_list=False)
# Default is String, mutable column.
yeast1.create_node_column(name='Empty String Col')
yeast1.get_node_table().head()
# Get all existing Visual Styles
import json
styles = cy.style.get_all()
print(json.dumps(styles, indent=4))
# Create a new style
style1 = cy.style.create('sample_style1')
# Get a reference to the existing style
default_style = cy.style.create('default')
print(style1.get_name())
print(default_style.get_name())
# Get all available Visual Properties
print(len(cy.style.vps.get_all()))
# Get Visual Properties for each data type
node_vps = cy.style.vps.get_node_visual_props()
edge_vps = cy.style.vps.get_edge_visual_props()
network_vps = cy.style.vps.get_network_visual_props()
print(pd.Series(edge_vps).head())
# Prepare key-value pair for Style defaults
new_defaults = {
# Node defaults
'NODE_FILL_COLOR': '#eeeeff',
'NODE_SIZE': 20,
'NODE_BORDER_WIDTH': 0,
'NODE_TRANSPARENCY': 120,
'NODE_LABEL_COLOR': 'white',
# Edge defaults
'EDGE_WIDTH': 3,
'EDGE_STROKE_UNSELECTED_PAINT': '#aaaaaa',
'EDGE_LINE_TYPE': 'LONG_DASH',
'EDGE_TRANSPARENCY': 120,
# Network defaults
'NETWORK_BACKGROUND_PAINT': 'black'
}
# Update
style1.update_defaults(new_defaults)
# Apply the new style
cy.style.apply(style1, yeast1)
# Passthrough mapping
style1.create_passthrough_mapping(column='name', col_type='String', vp='NODE_LABEL')
# Discrete mapping: Simply prepare key-value pairs and send it
kv_pair = {
'pp': 'pink',
'pd': 'green'
}
style1.create_discrete_mapping(column='interaction',
col_type='String', vp='EDGE_STROKE_UNSELECTED_PAINT', mappings=kv_pair)
# Continuous mapping
points = [
{
'value': '1.0',
'lesser':'white',
'equal':'white',
'greater': 'white'
},
{
'value': '20.0',
'lesser':'green',
'equal':'green',
'greater': 'green'
}
]
minimal_style = cy.style.create('Minimal')
minimal_style.create_continuous_mapping(column='Degree', col_type='Double', vp='NODE_FILL_COLOR', points=points)
# Or, use utility for simple mapping
simple_slope = StyleUtil.create_slope(min=1, max=20, values=(10, 60))
minimal_style.create_continuous_mapping(column='Degree', col_type='Double', vp='NODE_SIZE', points=simple_slope)
# Apply the new style
cy.style.apply(minimal_style, scale_free100)
# Get list of available layout algorithms
layouts = cy.layout.get_all()
print(json.dumps(layouts, indent=4))
# Apply layout
cy.layout.apply(name='circular', network=yeast1)
yeast1.get_views()
yeast_view1 = yeast1.get_first_view()
node_views = yeast_view1['elements']['nodes']
df3 = pd.DataFrame(node_views)
df3.head()
from py2cytoscape.cytoscapejs import viewer as cyjs
cy.layout.apply(network=scale_free100)
view1 = scale_free100.get_first_view()
view2 = yeast1.get_first_view()
# print(view1)
cyjs.render(view2, 'default2', background='#efefef')
# Use Cytoscape.js style JSON
cyjs_style = cy.style.get(minimal_style.get_name(), data_format='cytoscapejs')
cyjs.render(view1, style=cyjs_style['style'], background='white')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Long Description
Step2: With py2cytoscape
Step3: Status
Step4: Creating empty networks
Step5: Load networks from files, URLs or web services
Step6: Create networks from various types of data
Step7: Get Network from Cytoscape
Step8: Working with CyNetwork API
Step9: Get references from existing networks
Step10: Tables as DataFrame
Step11: Edit Network Topology
Step12: Update Table
Step13: Create / Delete Table Data
Step14: Visual Styles
Step15: Set default values
Step16: Visual Mappings
Step17: Layouts
Step18: Embed Interactive Widget
|
2,876
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn import linear_model, datasets
# Load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Create cross-validated logistic regression
clf = linear_model.LogisticRegressionCV(Cs=100)
# Train model
clf.fit(X, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris Dataset
Step2: Use Cross-Validation To Find The Best Value Of C
|
2,877
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
print np.linspace(0,9,10), np.exp(-np.linspace(0,9,10))
# This line configures matplotlib to show figures embedded in the notebook,
# instead of opening a new window for each figure. More about that later.
# If you are using an old version of IPython, try using '%pylab inline' instead.
%matplotlib inline
from scipy.optimize import minimize
from scipy.special import expit
import matplotlib.pyplot as plt
import timeit
fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2
minimize(fun,(2,1),method="Nelder-Mead")
def fun_jacf(x):
np.asarray(x)
return np.array([2*(x[0] - 1),2*(x[1] - 2.5)])
minimize(fun,(2,1),method="BFGS",jac=fun_jacf)
def cost(v,w,u,t):
v = np.array(v) # Don't know why but np.asarray(v) doesn't work here.
w = np.array(w)
u = np.array(u)
fvec = np.array(trigf(t*w + u) ) # This is a vector!!!
yt = 1 + np.sum ( t * v * fvec ) # For a given t, this calculates the value of y(t), given the parameters, v, w, u.
return ( np.sum (v*fvec + t * v* fvec * ( 1 - fvec ) * w ) + yt ) ** 2
# return np.sum(np.array( v*np.array( trigf( np.array( t*w ) + u ) ) ) + np.array( t*np.array( v*np.array( trigf(np.array( t*w ) + u)) ) ) * ( 1 - np.array( trigf( np.array( t*w )+u) ) ) * w + ( 1 + np.array( t*np.array( v*np.array( trigf( np.array(t*w)+u ) ) ) ) ) ) # trigf() should return an array with the same length of the input.
def trigf(x):
#return 1/(1+np.exp(-x)) #
return expit(x)
test11 = np.ones(30)
cost(np.array([1,1,1]),[1,1,1],[1,1,1],1)
def costTotal(v,w,u,t):
t = np.array(t)
costt = 0
for temp in t:
costt = costt + cost(v,w,u,temp)
return costt
test11 = np.ones(30)
tlintest = np.linspace(0,1,2)
print costTotal(np.ones(10),np.ones(10),2*np.ones(10),tlintest)
print costTotal(np.ones(10),np.ones(10),np.ones(10),tlintest)
tlin = np.linspace(0,5,11)
print tlin
## No need to define such a function! Use np.split(x,3) instead.
np.zeros(30)
# This is only an example of 2dimensional neural network.
costTotalF = lambda x: costTotal(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],tlin)
initGuess = np.zeros(30)
# initGuess = np.random.rand(1,30)+2
start1 = timeit.default_timer()
minimize(costTotalF,initGuess,method="Nelder-Mead")
# minimize(costTotalF,initGuess,method="L-BFGS-B")
# minimize(costTotalF,initGuess,method="TNC")
stop1 = timeit.default_timer()
print stop1 - start1
def mhelper(v,w,u,t): ## This function should output a result ## t is a number in this function not array!!
v = np.array(v)
w = np.array(w)
u = np.array(u)
return np.sum( v*trigf( t*w + u ) + t* v* trigf(t*w + u) * ( 1 - trigf( t*w +u) ) * w ) + ( 1 + np.sum( t * v * trigf( t*w +u ) ) )
# Checked # Pass
def vhelper(v,w,u,t):
v = np.array(v)
w = np.array(w)
u = np.array(u)
return trigf(t*w+u) + t*trigf(t*w+u)*( 1-trigf(t*w+u) )*w + t*trigf(t*w+u)
def whelper(v,w,u,t):
v = np.array(v)
w = np.array(w)
u = np.array(u)
return v*t*trigf(t*w+u)*( 1- trigf(t*w+u) ) + t*v*( trigf(t*w+u)*(1-trigf(t*w+u))*t* (1-trigf(t*w+u)) )*w - t*v*trigf(t*w+u)*trigf(t*w+u)*(1-trigf(t*w+u))*t*w + t*v*trigf(t*w+u)*(1-trigf(t*w+u)) + t*v*trigf(t*w+u)*(1-trigf(t*w+u))*t
def uhelper(v,w,u,t):
v = np.array(v)
w = np.array(w)
u = np.array(u)
return v*trigf(t*w+u)*( 1 - trigf(t*w+u)) + t* v * trigf(t*w+u) * (1-trigf(t*w+u))*(1-trigf(t*w+u))*w - t*v*trigf(t*w+u)*trigf(t*w+u)*(1-trigf(t*w+u))*w + t*v*trigf(t*w+u)*(1-trigf(t*w+u))
mhelper([1,2],[2,3],[3,4],[1])
vhelper([1,2],[2,3],[3,4],[1,2])
def mhelperT(v,w,u,t):
t = np.array(t)
mhelperT = 0
for temp in t:
mhelperT = mhelperT + mhelper(v,w,u,temp)
return mhelperT
def vhelperT(v,w,u,t):
t = np.array(t)
vhelperT = 0
for temp in t:
vhelperT = vhelperT + vhelper(v,w,u,temp)
return vhelperT
def whelperT(v,w,u,t):
t = np.array(t)
whelperT = 0
for temp in t:
whelperT = whelperT + whelper(v,w,u,temp)
return whelperT
def uhelperT(v,w,u,t):
t = np.array(t)
uhelperT = 0
for temp in t:
uhelperT = uhelperT + uhelper(v,w,u,temp)
return uhelperT
def costJac(v,w,u,t):
v = np.array(v)
w = np.array(w)
u = np.array(u)
vout = 0
wout = 0
uout = 0
for temp in t:
vout = vout + 2*mhelper(v,w,u,temp)*vhelper(v,w,u,temp)
wout = wout + 2*mhelper(v,w,u,temp)*whelper(v,w,u,temp)
uout = uout + 2*mhelper(v,w,u,temp)*uhelper(v,w,u,temp)
out = np.hstack((vout,wout,uout))
return np.array(out)
print uhelperT([1,2],[2,3],[3,4],[1,2,3]),mhelperT([1,2],[2,3],[3,4],[1]),whelperT([1,2],[2,3],[3,4],[1]),vhelperT([1,2],[2,3],[3,4],[1])
costJac([1,2,3],[2,3,1],[3,4,3],[1,2])
costJacF = lambda x: costJac(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],tlin)
initGuessJ = np.zeros(30)
# initGuessJ = np.random.rand(1,30)+2
minimize(costTotalF,initGuessJ,method="Newton-CG",jac=costJacF)
# funYNN(np.ones(10),np.ones(10),np.ones(10),2)
test13=np.array([-57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 ,
-57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 ,
-57.2424592 , -57.2424592 , -0.28879104, -0.28879104,
-0.28879104, -0.28879104, -0.28879104, -0.28879104,
-0.28879104, -0.28879104, -0.28879104, -0.28879104,
-6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 ,
-6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 ,
-6.5643978 , -6.5643978 ])
for i in np.linspace(0,5,11):
print i,functionYNN(np.split(test13,3)[0],np.split(test13,3)[1],np.split(test13,3)[2],np.array([i]))[0]
temp14 = np.array([])
for i in np.linspace(0,5,11):
temp14 = np.append(temp14,functionYNN(np.split(test13,3)[0],np.split(test13,3)[1],np.split(test13,3)[2],np.array([i]))[0])
testTLin = np.linspace(0,5,11)
plt.figure(figsize=(10,6.18))
plt.plot(testTLin,functionY(testTLin),'bs')
plt.plot(testTLin,temp14,'r-')
plt.show()
temp16 = np.array([1.,0.60129567, 0.36281265 , 0.22220159 , 0.13660321,0.08295538 , 0.04904239 ,0.02817984 , 0.01636932 , 0.01048201, 0.00741816])
temp15 = np.linspace(0,5,11)
print temp15
plt.plot(temp15,temp16)
plt.plot(temp15,functionY(temp15),'bs')
plt.show()
test17 = np.array([])
for temp in np.linspace(0,5,11):
test171 = 1 + expit(10*temp)
test17 = np.append(test17,test171)
print np.array(test17)
1 + expit(10*0)
def functionYNNSt(v,w,u,t): # t is a single scalar value
t = np.array(t)
return 1 + np.sum(t * v * trigf( t*w +u ) )
def functionYNN(v,w,u,t):
t = np.array(t)
func = np.asarray([])
for temp in t:
func = np.append(func, functionYNNSt(v,w,u,temp) )
return np.array(func)
def functionY(t):
return np.exp(-t)
print functionYNN(np.array([1,2]),np.array([1,2]),np.array([1,2]),tlin)
# structArray=np.array([-1.77606225*np.exp(-01), -3.52080053*np.exp(-01), -1.77606225*np.exp(-01),
# -1.77606225*np.exp(-01), -8.65246997*np.exp(-14), 1.00000000,
# -8.65246997*np.exp(-14), -8.65246997*np.exp(-14), -1.13618293*np.exp(-14),
# -7.57778017*np.exp(-16), -1.13618293*np.exp(-14), -1.13618293*np.exp(-14)])
#structArray=np.array([-1.6001368 , -1.6001368 , -2.08065131, -2.06818762, -2.07367757,
# -2.06779168, -2.07260669, -2.08533436, -2.07112826, -2.06893266,
# -0.03859167, -0.03859167, -0.25919807, -0.66904303, -0.41571841,
# -0.76917468, -0.4483773 , -0.17544777, -1.03122022, -0.90581106,
# -3.46409689, -3.46409689, -2.83715218, -2.84817563, -2.8434598 ,
# -2.84773205, -2.84446398, -2.85001617, -2.83613622, -2.84402863])
structArray=np.array([ 0.1330613 , 1.05982273, 0.18777729, -0.60789078, -0.96393469,
-0.65270373, -1.55257864, 0.8002259 , -0.12414033, -0.21230861,
-0.88629202, 0.47527367, 0.21401419, 0.2130512 , -1.5236408 ,
1.35208616, -0.48922234, -0.85850735, 0.72135512, -1.03407686,
2.29041152, 0.91184671, -0.56987761, 0.16597395, -0.43267372,
2.1772668 , -0.1318482 , -0.80817762, 0.44533168, -0.28545885])
structArrayJ = np.array([-11.45706046, -11.45706046, -11.45706046, -11.45706046,
-11.45706046, -11.45706046, -11.45706046, -11.45706046,
-11.45706046, -11.45706046, -0.44524438, -0.44524438,
-0.44524438, -0.44524438, -0.44524438, -0.44524438,
-0.44524438, -0.44524438, -0.44524438, -0.44524438,
-4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 ,
-4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 ,
-4.7477771 , -4.7477771 ])
print("The Structure Array is \n {}".format(structArray))
# print np.split(structArray,3)[0],np.split(structArray,3)[1],np.split(structArray,3)[2]
testTLin = np.linspace(0,5,11)
print "\n \n The plot is"
plt.figure(figsize=(10,6.18))
plt.plot(testTLin,functionY(testTLin),'bs')
plt.plot(testTLin,functionYNN(structArray[0],structArray[1],structArray[2],testTLin),'g^')
plt.plot(testTLin,functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testTLin),'r^')
plt.yscale('log')
plt.show()
print functionY(testTLin), functionYNN(structArray[0],structArray[1],structArray[2],testTLin), functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testTLin)
## Test of Numpy
temp1=np.asarray([1,2,3])
temp2=np.asarray([4,5,6])
temp3=np.asarray([7,8,9])
temp1*temp2
print 3*temp1
temp1+temp2
print temp1*temp2*temp3*temp1
1/(1+np.exp(-temp1))
temp1 + temp2
[1,2] + [2,3]
1 - 3*np.array([1,2])
temp1**2
1+np.asarray([1,2,3])
def testfunction(v,w,u,t):
v = np.array(v)
w = np.array(w)
u = np.array(u)
return t*w + u
#return np.sum(v*trigf( t*w + u ))
testfunction([2,3,4],[3,4,5],[4,5,7],2)
def costS(v,w,u,x):
v = np.array(v) # Don't know why but np.asarray(v) doesn't work here.
w = np.array(w)
u = np.array(u)
fvec = np.array(trigf(x*w + u) ) # This is a vector!!!
yx = np.sum ( x * v * fvec ) # For a given x, this calculates the value of y(t), given the parameters, v, w, u.
dySLASHdt = np.sum (v*fvec + x * v* fvec * ( 1 - fvec ) * w )
return ( dySLASHdt - yx )**2
costS(np.array([2,3,4]),[3,4,5],[4,5,7],4)
def costSTotal(v,w,u,x):
x = np.array(x)
costSt = 0
for temp in x:
costSt = costSt + costS(v,w,u,temp)
return costSt
print costSTotal([1,2,3],[2,3,2],[3,4,1],[1,2,3,4,5,2,6,1])
xlinS = np.linspace(0,1,10)
print xlinS
# This is only an example of 2dimensional neural network.
costSTotalF = lambda x: costSTotal(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],xlinS)
# initGuessS = np.zeros(30)
initGuessS = np.random.rand(1,30)+2
# minimize(costTotalF,([1,0,3,0,1,1,2,0,1,0,1,0]),method="Nelder-Mead")
minimize(costSTotalF,(initGuessS),method="L-BFGS-B")
# minimize(costTotalF,([1,0,3,0,1,1,2,0,1,0,1,0]),method="TNC")
def functionSYNN(v,w,u,x): # t is a single scalar value
x = np.array(x)
func = np.asarray([])
for temp in x:
tempfunc = np.sum(temp * v * trigf( temp*w +u ) )
func = np.append(func, tempfunc)
return np.array(func)
def functionSY(x):
return x**4 - x**3 + 2*x
# structArray=np.array([-1.77606225*np.exp(-01), -3.52080053*np.exp(-01), -1.77606225*np.exp(-01),
# -1.77606225*np.exp(-01), -8.65246997*np.exp(-14), 1.00000000,
# -8.65246997*np.exp(-14), -8.65246997*np.exp(-14), -1.13618293*np.exp(-14),
# -7.57778017*np.exp(-16), -1.13618293*np.exp(-14), -1.13618293*np.exp(-14)])
#structArray=np.array([-1.6001368 , -1.6001368 , -2.08065131, -2.06818762, -2.07367757,
# -2.06779168, -2.07260669, -2.08533436, -2.07112826, -2.06893266,
# -0.03859167, -0.03859167, -0.25919807, -0.66904303, -0.41571841,
# -0.76917468, -0.4483773 , -0.17544777, -1.03122022, -0.90581106,
# -3.46409689, -3.46409689, -2.83715218, -2.84817563, -2.8434598 ,
# -2.84773205, -2.84446398, -2.85001617, -2.83613622, -2.84402863])
structArrayS=np.array([ 0.01462306, 0.13467016, 0.43137834, 0.32915392, 0.16398891,
-0.36502654, -0.1943661 , 0.16082714, -0.2923346 , -0.38280994,
2.23127245, 1.97866504, 2.95181241, 2.70643394, 2.19371603,
2.63386948, 2.20213407, 2.81089774, 2.43916804, 2.80375489,
2.32389017, 2.16118574, 2.7346048 , 2.18630694, 2.19932286,
2.52525807, 2.22125577, 2.81758156, 2.27231039, 2.6118171 ])
print("The Structure Array is \n {}".format(structArray))
# print np.split(structArray,3)[0],np.split(structArray,3)[1],np.split(structArray,3)[2]
testXLinS = np.linspace(0,1,10)
print "\n \n The plot is"
plt.figure(figsize=(10,6.18))
plt.plot(testXLinS,functionSY(testXLinS),'bs')
plt.plot(testXLinS,functionSYNN(structArrayS[0],structArrayS[1],structArrayS[2],testXLinS),'g^')
## plt.plot(testXLin,functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testXLin),'r^')
plt.show()
print functionY(testXLinS), functionYNN(structArrayS[0],structArrayS[1],structArrayS[2],testXLinS)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Balance bewteen 'speed' (Beta-coefficient) and 'momentum' of the learning
Step2: Minimize An Expression
Step3: Here is a summary
Step4: Caution
Step5: Test cost function
Step6: Next step is to optimize this cost. To do this we need the derivitive. But anyway let's try a simple minimization first.
Step7: Test total cost
Step8: Suppose the parameters are five dimensional and we have 10 data points.
Step9: Define a list divier that splits an array into three arrays.
Step10: It shows that the minimization depends greatly on the initial guess. It is not true for a simple scenario with gradient descent however it could be the case if the landscape is too complicated.
Step11: Define the jac of cost function
Step12: Test Results
Step13: A Even Simpler Equation
|
2,878
|
<ASSISTANT_TASK:>
Python Code:
!conda install boto3 --yes
import logging
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
from pyspark.sql.types import *
def create_struct(schema):
Take a JSON schema and return a pyspark StructType of equivalent structure.
replace_definitions(schema, schema['definitions'])
assert '$ref' not in str(schema), 're-write didnt work'
struct = StructType()
for row in get_rows(schema):
struct.add(row)
return struct
def replace_definitions(schema, definitions):
Replace references in the JSON schema with their definitions.
if 'properties' in schema:
for prop, meta in schema['properties'].items():
replace_definitions(meta, definitions)
elif 'items' in schema:
if '$ref' in schema['items']:
ref = schema['items']['$ref'].split('/')[-1]
schema['items'] = definitions[ref]
replace_definitions(schema['items'], definitions)
else:
replace_definitions(schema['items'], definitions)
elif '$ref' in str(schema):
err_msg = "Reference not found for schema: {}".format(str(schema))
log.error(err_msg)
raise ValueError(err_msg)
def get_rows(schema):
Map the fields in a JSON schema to corresponding data structures in pyspark.
if 'properties' not in schema:
err_msg = "Invalid JSON schema: properties field is missing."
log.error(err_msg)
raise ValueError(err_msg)
for prop in sorted(schema['properties']):
meta = schema['properties'][prop]
if 'string' in meta['type']:
logging.debug("{!r} allows the type to be String AND Integer".format(prop))
yield StructField(prop, StringType(), 'null' in meta['type'])
elif 'integer' in meta['type']:
yield StructField(prop, IntegerType(), 'null' in meta['type'])
elif 'boolean' in meta['type']:
yield StructField(prop, BooleanType(), 'null' in meta['type'])
elif meta['type'] == 'array' and 'items' not in meta:
# Assuming strings in the array
yield StructField(prop, ArrayType(StringType(), False), True)
elif meta['type'] == 'array' and 'items' in meta:
struct = StructType()
for row in get_rows(meta['items']):
struct.add(row)
yield StructField(prop, ArrayType(struct), True)
elif meta['type'] == 'object':
struct = StructType()
for row in get_rows(meta):
struct.add(row)
yield StructField(prop, struct, True)
else:
err_msg = "Invalid JSON schema: {}".format(str(meta)[:100])
log.error(err_msg)
raise ValueError(err_msg)
import boto3
import botocore
import json
import tempfile
import urllib2
def fetch_schema():
Fetch the crash data schema from an s3 location or github location. This
returns the corresponding JSON schema in a python dictionary.
region = "us-west-2"
bucket = "crashstats-telemetry-crashes-prod-us-west-2"
key = "crash_report.json"
fallback_url = "https://raw.githubusercontent.com/mozilla/socorro/master/socorro/schemas/crash_report.json"
try:
log.info("Fetching latest crash data schema from s3://{}/{}".format(bucket, key))
s3 = boto3.client('s3', region_name=region)
# download schema to memory via a file like object
resp = tempfile.TemporaryFile()
s3.download_fileobj(bucket, key, resp)
resp.seek(0)
except botocore.exceptions.ClientError as e:
log.warning(("Could not fetch schema from s3://{}/{}: {}\n"
"Fetching crash data schema from {}")
.format(bucket, key, e, fallback_url))
resp = urllib2.urlopen(fallback_url)
return json.load(resp)
from datetime import datetime as dt, timedelta, date
from pyspark.sql import SQLContext
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days) + 1):
yield (end_date - timedelta(n)).strftime("%Y%m%d")
def import_day(d, schema, version):
Convert JSON data stored in an S3 bucket into parquet, indexed by crash_date.
source_s3path = "s3://crashstats-telemetry-crashes-prod-us-west-2/v1/crash_report"
dest_s3path = "s3://telemetry-parquet/socorro_crash/"
num_partitions = 10
log.info("Processing {}, started at {}".format(d, dt.utcnow()))
cur_source_s3path = "{}/{}".format(source_s3path, d)
cur_dest_s3path = "{}/v{}/crash_date={}".format(dest_s3path, version, d)
df = sqlContext.read.json(cur_source_s3path, schema=schema)
df.repartition(num_partitions).write.parquet(cur_dest_s3path, mode="overwrite")
def backfill(start_date_yyyymmdd, schema, version):
Import data from a start date to yesterday's date.
Example:
backfill("20160902", crash_schema, version)
start_date = dt.strptime(start_date_yyyymmdd, "%Y%m%d")
end_date = dt.utcnow() - timedelta(1) # yesterday
for d in daterange(start_date, end_date):
try:
import_day(d)
except Exception as e:
log.error(e)
from os import environ
# get the relevant date
yesterday = dt.strftime(dt.utcnow() - timedelta(1), "%Y%m%d")
target_date = environ.get('date', yesterday)
# fetch and generate the schema
schema_data = fetch_schema()
crash_schema = create_struct(schema_data)
version = schema_data.get('$target_version', 0) # default to v0
# process the data
import_day(target_date, crash_schema, version)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: We create the pyspark datatype for representing the crash data in spark. This is a slightly modified version of peterbe/crash-report-struct-code.
Step6: First fetch from the primary source in s3 as per bug 1312006. We fall back to the github location if this is not available.
Step9: Read crash data as json, convert it to parquet
|
2,879
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('../Data/kc_house_data.gl/')
from math import log, sqrt
sales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)
sales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)
sales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']
# In the dataset, 'floors' was defined with type string,
# so we'll convert them to float, before creating a new feature.
sales['floors'] = sales['floors'].astype(float)
sales['floors_square'] = sales['floors']*sales['floors']
all_features = ['bedrooms', 'bedrooms_square',
'bathrooms',
'sqft_living', 'sqft_living_sqrt',
'sqft_lot', 'sqft_lot_sqrt',
'floors', 'floors_square',
'waterfront', 'view', 'condition', 'grade',
'sqft_above',
'sqft_basement',
'yr_built', 'yr_renovated']
model_all = graphlab.linear_regression.create(sales, target='price', features=all_features,
validation_set=None,
l2_penalty=0., l1_penalty=1e10)
model_all_mask = model_all["coefficients"]["value"] > 0.0
model_all["coefficients"][model_all_mask].print_rows(num_rows=20)
(training_and_validation, testing) = sales.random_split(.9,seed=1) # initial train/test split
(training, validation) = training_and_validation.random_split(0.5, seed=1) # split training into train and validate
import numpy as np
def get_rss(model, data, outcome):
predictions = model.predict(data)
residuals = predictions - outcome
rss = sum(pow(residuals,2))
return(rss)
l1_rss = {}
for l1 in np.logspace(1, 7, num=13):
l1_rss[l1] = get_rss(graphlab.linear_regression.create(training, target='price', features=all_features, validation_set=None,
l2_penalty=0., l1_penalty=l1, verbose=False),
validation,
validation["price"])
min_value = min(l1_rss.values())
min_key = [key for key, value in l1_rss.iteritems() if value == min_value]
print "l1 value " + str(min_key) + " yielded rss of " + str(min_value)
model_best_l1 = graphlab.linear_regression.create(training, target='price', features=all_features, validation_set=None,
l2_penalty=0., l1_penalty=10, verbose=False)
rss_best_l1 = get_rss(model_best_l1,testing,testing["price"])
print rss_best_l1
model_best_l1_mask = model_best_l1["coefficients"]["value"] > 0.0
model_best_l1["coefficients"][model_best_l1_mask].print_rows(num_rows=20)
model_best_l1["coefficients"]["value"].nnz()
max_nonzeros = 7
l1_penalty_values = np.logspace(8, 10, num=20)
l1_penalty_values
l1_penalty_nnz = {}
for l1 in l1_penalty_values:
model_l1_penalty = graphlab.linear_regression.create(training, target='price', features=all_features,
validation_set=None, l2_penalty=0., l1_penalty=l1, verbose=False)
l1_penalty_nnz[l1] = model_l1_penalty["coefficients"]["value"].nnz()
print l1_penalty_nnz
from collections import OrderedDict
sorted_l1_penalty_nnz = OrderedDict(sorted(l1_penalty_nnz.items(), key=lambda t: t[0]))
print sorted_l1_penalty_nnz
l1_penalty_min = float('NaN')
l1_penalty_max = float('NaN')
for i in xrange(1,len(sorted_l1_penalty_nnz)):
if sorted_l1_penalty_nnz.values()[i-1] >= max_nonzeros and sorted_l1_penalty_nnz.values()[i] <= max_nonzeros:
l1_penalty_min = sorted_l1_penalty_nnz.keys()[i-1]
l1_penalty_max = sorted_l1_penalty_nnz.keys()[i]
break
print l1_penalty_min
print l1_penalty_max
l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)
print l1_penalty_values
l1_penalty_rss = {}
for l1 in l1_penalty_values:
l1_penalty_model = graphlab.linear_regression.create(training, target='price', features=all_features, validation_set=None, l2_penalty=0., l1_penalty=l1, verbose=False)
l1_penalty_rss[l1] = (get_rss(l1_penalty_model,validation,validation["price"]), l1_penalty_model["coefficients"])
sorted_l1_penalty_rss = OrderedDict(sorted(l1_penalty_rss.items(), key=lambda t: t[1][0]))
for item in sorted_l1_penalty_rss.items():
if( item[1][1]["value"].nnz() == max_nonzeros):
print ("l1", item[0])
print ("rss", item[1][0])
l1_penalty_model_mask = item[1][1]["value"] > 0.0
item[1][1][l1_penalty_model_mask].print_rows(num_rows=20)
#print ("coefficients", item[1][1])
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Create new features
Step3: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.
Step4: Applying L1 penalty requires adding an extra parameter (l1_penalty) to the linear regression call in GraphLab Create. (Other tools may have separate implementations of LASSO.) Note that it's important to set l2_penalty=0 to ensure we don't introduce an additional L2 penalty.
Step5: Find what features had non-zero weight.
Step6: Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
Step7: Next, we write a loop that does the following
Step8: QUIZ QUESTIONS
Step9: QUIZ QUESTION
Step10: Exploring the larger range of values to find a narrow range with the desired sparsity
Step11: Now, implement a loop that search through this space of possible l1_penalty values
Step12: Out of this large range, we want to find the two ends of our desired narrow range of l1_penalty. At one end, we will have l1_penalty values that have too few non-zeros, and at the other end, we will have an l1_penalty that has too many non-zeros.
Step13: QUIZ QUESTIONS
Step14: For l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20)
|
2,880
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (None, None)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: Boston Housing tabular regression\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for Boston Housing
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import numpy as np
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.001, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=20, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=100, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
parser.add_argument('--param-file', dest='param_file',
default='/tmp/param.txt', type=str,
help='Output file for parameters')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
def make_dataset():
# Scaling Boston Housing data features
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float)
return feature, max
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
params = []
for _ in range(13):
x_train[_], max = scale(x_train[_])
x_test[_], _ = scale(x_test[_])
params.append(max)
# store the normalization (max) value for each feature
with tf.io.gfile.GFile(args.param_file, 'w') as f:
f.write(str(params))
return (x_train, y_train), (x_test, y_test)
# Build the Keras model
def build_and_compile_dnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(128, activation='relu', input_shape=(13,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='linear')
])
model.compile(
loss='mse',
optimizer=tf.keras.optimizers.RMSprop(learning_rate=args.lr))
return model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
BATCH_SIZE = 16
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_dnn_model()
# Train the model
(x_train, y_train), (x_test, y_test) = make_dataset()
model.fit(x_train, y_train, epochs=args.epochs, batch_size=GLOBAL_BATCH_SIZE)
model.save(args.model_dir)
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_boston.tar.gz
job = aip.CustomTrainingJob(
display_name="boston_" + TIMESTAMP,
script_path="custom/trainer/task.py",
container_uri=TRAIN_IMAGE,
requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"],
)
print(job)
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
if TRAIN_GPU:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=True,
)
else:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=True,
)
model_path_to_deploy = MODEL_DIR
import tensorflow as tf
local_model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import boston_housing
(_, _), (x_test, y_test) = boston_housing.load_data(
path="boston_housing.npz", test_split=0.2, seed=113
)
def scale(feature):
max = np.max(feature)
feature = (feature / max).astype(np.float32)
return feature
# Let's save one data item that has not been scaled
x_test_notscaled = x_test[0:1].copy()
for _ in range(13):
x_test[_] = scale(x_test[_])
x_test = x_test.astype(np.float32)
print(x_test.shape, x_test.dtype, y_test.shape)
print("scaled", x_test[0])
print("unscaled", x_test_notscaled)
local_model.evaluate(x_test, y_test)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
serving_output = list(loaded.signatures["serving_default"].structured_outputs.keys())[0]
print("Serving function output:", serving_output)
input_name = local_model.input.name
print("Model input name:", input_name)
output_name = local_model.output.name
print("Model output name:", output_name)
XAI = "ig" # [ shapley, ig, xrai ]
if XAI == "shapley":
PARAMETERS = {"sampled_shapley_attribution": {"path_count": 10}}
elif XAI == "ig":
PARAMETERS = {"integrated_gradients_attribution": {"step_count": 50}}
elif XAI == "xrai":
PARAMETERS = {"xrai_attribution": {"step_count": 50}}
parameters = aip.explain.ExplanationParameters(PARAMETERS)
from google.cloud.aiplatform.explain.metadata.tf.v2 import \
saved_model_metadata_builder
builder = saved_model_metadata_builder.SavedModelMetadataBuilder(MODEL_DIR)
metadata = builder.get_metadata_protobuf()
print(metadata)
model = aip.Model.upload(
display_name="boston_" + TIMESTAMP,
artifact_uri=MODEL_DIR,
serving_container_image_uri=DEPLOY_IMAGE,
explanation_parameters=parameters,
explanation_metadata=metadata,
sync=False,
)
model.wait()
DEPLOYED_NAME = "boston-" + TIMESTAMP
TRAFFIC_SPLIT = {"0": 100}
MIN_NODES = 1
MAX_NODES = 1
if DEPLOY_GPU:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=DEPLOY_NGPU,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
else:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=0,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
test_item = x_test[0]
test_label = y_test[0]
print(test_item.shape)
instances_list = [test_item.tolist()]
prediction = endpoint.explain(instances_list)
print(prediction)
value = prediction[0][0][0]
print("Predicted Value:", value)
from tabulate import tabulate
feature_names = [
"crim",
"zn",
"indus",
"chas",
"nox",
"rm",
"age",
"dis",
"rad",
"tax",
"ptratio",
"b",
"lstat",
]
attributions = (
prediction.explanations[0].attributions[0].feature_attributions[serving_input]
)
rows = []
for i, val in enumerate(feature_names):
rows.append([val, test_item[i], attributions[i]])
print(tabulate(rows, headers=["Feature name", "Feature value", "Attribution value"]))
# Prepare 10 test examples to your model for prediction
instances = []
for i in range(10):
instances.append(x_test[i].tolist())
response = endpoint.explain(instances)
import numpy as np
def sanity_check_explanations(
explanation, prediction, mean_tgt_value=None, variance_tgt_value=None
):
passed_test = 0
total_test = 1
# `attributions` is a dict where keys are the feature names
# and values are the feature attributions for each feature
baseline_score = explanation.attributions[0].baseline_output_value
print("baseline:", baseline_score)
# Sanity check 1
# The prediction at the input is equal to that at the baseline.
# Please use a different baseline. Some suggestions are: random input, training
# set mean.
if abs(prediction - baseline_score) <= 0.05:
print("Warning: example score and baseline score are too close.")
print("You might not get attributions.")
else:
passed_test += 1
print("Sanity Check 1: Passed")
print(passed_test, " out of ", total_test, " sanity checks passed.")
i = 0
for explanation in response.explanations:
try:
prediction = np.max(response.predictions[i]["scores"])
except TypeError:
prediction = np.max(response.predictions[i])
sanity_check_explanations(explanation, prediction)
i += 1
endpoint.undeploy_all()
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Set hardware accelerators
Step13: Set pre-built containers
Step14: Set machine type
Step15: Tutorial
Step16: Task.py contents
Step17: Store training script on your Cloud Storage bucket
Step18: Create and run custom training job
Step19: Prepare your command-line arguments
Step20: Run the custom training job
Step21: Load the saved model
Step22: Evaluate the model
Step23: Perform the model evaluation
Step24: Get the serving function signature
Step25: Explanation Specification
Step26: Explanation Metadata
Step27: Upload the model
Step28: Deploy the model
Step29: Get test item
Step30: Make the prediction with explanation
Step31: Understanding the explanations response
Step32: Examine feature attributions
Step33: Check your explanations and baselines
Step34: Sanity check
Step35: Undeploy the model
Step36: Cleaning up
|
2,881
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
from sklearn.externals import joblib
import utils.preprocessing as pp
import predictor.feature_extraction as fe
def generate_one_set(params):
# print(('-'*70 + '\n {}, {} \n' + '-'*70).format(params['base_days'].values, params['ahead_days'].values))
tic = time()
train_val_time = int(params['train_val_time'])
base_days = int(params['base_days'])
step_days = int(params['step_days'])
ahead_days = int(params['ahead_days'])
print('Generating: base{}_ahead{}'.format(base_days, ahead_days))
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Getting the data
data_df = pd.read_pickle('../../data/data_train_val_df.pkl')
today = data_df.index[-1] # Real date
print(pid + ') data_df loaded')
# Drop symbols with many missing points
data_df = pp.drop_irrelevant_symbols(data_df, params['GOOD_DATA_RATIO'])
print(pid + ') Irrelevant symbols dropped.')
# Generate the intervals for the predictor
x, y = fe.generate_train_intervals(data_df,
train_val_time,
base_days,
step_days,
ahead_days,
today,
fe.feature_volume_one_to_one,
target_feature=fe.VOLUME_FEATURE)
print(pid + ') Intervals generated')
# Drop "bad" samples and fill missing data
x_y_df = pd.concat([x, y], axis=1)
x_y_df = pp.drop_irrelevant_samples(x_y_df, params['SAMPLES_GOOD_DATA_RATIO'])
x = x_y_df.iloc[:, :-1]
y = x_y_df.iloc[:, -1]
x = pp.fill_missing(x)
print(pid + ') Irrelevant samples dropped and missing data filled.')
# Pickle that
x.to_pickle('../../data/x_volume_{}.pkl'.format(pid))
y.to_pickle('../../data/y_volume_{}.pkl'.format(pid))
toc = time()
print('%s) %i intervals generated in: %i seconds.' % (pid, x.shape[0], (toc-tic)))
return pid, x, y
best_params_df = pd.read_pickle('../../data/best_params_final_df.pkl').loc[1,:]
to_drop = [
'model',
'mre',
'r2',
'x_filename',
'y_filename',
'train_days'
]
best_params_df.drop(to_drop, inplace=True)
best_params_df
generate_one_set(best_params_df)
x_volume = pd.read_pickle('../../data/x_volume_base112_ahead1.pkl')
print(x_volume.shape)
x_volume.head()
y_volume = pd.read_pickle('../../data/y_volume_base112_ahead1.pkl')
print(y_volume.shape)
y_volume.head()
def generate_one_test_set(params, data_df):
# print(('-'*70 + '\n {}, {} \n' + '-'*70).format(params['base_days'].values, params['ahead_days'].values))
tic = time()
train_val_time = int(params['train_val_time'])
base_days = int(params['base_days'])
step_days = int(params['step_days'])
ahead_days = int(params['ahead_days'])
print('Generating: base{}_ahead{}'.format(base_days, ahead_days))
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Getting the data
today = data_df.index[-1] # Real date
print(pid + ') data_df loaded')
# Drop symbols with many missing points
y_train_df = pd.read_pickle('../../data/y_volume_{}.pkl'.format(pid))
kept_symbols = y_train_df.index.get_level_values(1).unique().tolist()
data_df = data_df.loc[:, (slice(None), kept_symbols)]
print(pid + ') Irrelevant symbols dropped.')
# Generate the intervals for the predictor
x, y = fe.generate_train_intervals(data_df,
train_val_time,
base_days,
step_days,
ahead_days,
today,
fe.feature_volume_one_to_one,
target_feature=fe.VOLUME_FEATURE)
print(pid + ') Intervals generated')
# Drop "bad" samples and fill missing data
x_y_df = pd.concat([x, y], axis=1)
x_y_df = pp.drop_irrelevant_samples(x_y_df, params['SAMPLES_GOOD_DATA_RATIO'])
x = x_y_df.iloc[:, :-1]
y = x_y_df.iloc[:, -1]
x = pp.fill_missing(x)
print(pid + ') Irrelevant samples dropped and missing data filled.')
# Pickle that
x.to_pickle('../../data/x_volume_{}_test.pkl'.format(pid))
y.to_pickle('../../data/y_volume_{}_test.pkl'.format(pid))
toc = time()
print('%s) %i intervals generated in: %i seconds.' % (pid, x.shape[0], (toc-tic)))
return pid, x,
data_test_df = pd.read_pickle('../../data/data_test_df.pkl')
generate_one_test_set(best_params_df, data_test_df)
x_volume_test = pd.read_pickle('../../data/x_volume_base112_ahead1_test.pkl')
print(x_volume_test.shape)
x_volume_test.head()
y_volume_test = pd.read_pickle('../../data/y_volume_base112_ahead1_test.pkl')
print(y_volume_test.shape)
y_volume_test.head()
best_params_df = pd.read_pickle('../../data/best_params_final_df.pkl')
import predictor.feature_extraction as fe
from predictor.linear_predictor import LinearPredictor
import utils.misc as misc
import predictor.evaluation as ev
ahead_days = 1
# Get some parameters
train_days = int(best_params_df.loc[ahead_days, 'train_days'])
GOOD_DATA_RATIO, \
train_val_time, \
base_days, \
step_days, \
ahead_days, \
SAMPLES_GOOD_DATA_RATIO, \
x_filename, \
y_filename = misc.unpack_params(best_params_df.loc[ahead_days,:])
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Get the datasets
x_train = pd.read_pickle('../../data/x_volume_{}.pkl'.format(pid))
y_train = pd.read_pickle('../../data/y_volume_{}.pkl'.format(pid))
x_test = pd.read_pickle('../../data/x_volume_{}_test.pkl'.format(pid)).sort_index()
y_test = pd.DataFrame(pd.read_pickle('../../data/y_volume_{}_test.pkl'.format(pid))).sort_index()
# Let's cut the training set to use only the required number of samples
end_date = x_train.index.levels[0][-1]
start_date = fe.add_market_days(end_date, -train_days)
x_sub_df = x_train.loc[(slice(start_date,None),slice(None)),:]
y_sub_df = pd.DataFrame(y_train.loc[(slice(start_date,None),slice(None))])
# Create the estimator and train
estimator = LinearPredictor()
estimator.fit(x_sub_df, y_sub_df)
# Get the training and test predictions
y_train_pred = estimator.predict(x_sub_df)
y_test_pred = estimator.predict(x_test)
# Get the training and test metrics for each symbol
metrics_train = ev.get_metrics_df(y_sub_df, y_train_pred)
metrics_test = ev.get_metrics_df(y_test, y_test_pred)
# Show the mean metrics
metrics_df = pd.DataFrame(columns=['train', 'test'])
metrics_df['train'] = metrics_train.mean()
metrics_df['test'] = metrics_test.mean()
print('Mean metrics: \n{}\n{}'.format(metrics_df,'-'*70))
# Plot the metrics in time
metrics_train_time = ev.get_metrics_in_time(y_sub_df, y_train_pred, base_days + ahead_days)
metrics_test_time = ev.get_metrics_in_time(y_test, y_test_pred, base_days + ahead_days)
plt.plot(metrics_train_time[2], metrics_train_time[0], label='train', marker='.')
plt.plot(metrics_test_time[2], metrics_test_time[0], label='test', marker='.')
plt.title('$r^2$ metrics')
plt.legend()
plt.figure()
plt.plot(metrics_train_time[2], metrics_train_time[1], label='train', marker='.')
plt.plot(metrics_test_time[2], metrics_test_time[1], label='test', marker='.')
plt.title('MRE metrics')
plt.legend()
joblib.dump(estimator, '../../data/best_volume_predictor.pkl')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's generate the datasets
Step2: Let's generate the test dataset, also
Step3: Let's train a predictor with the same hyperparameters as for the 'Close' one.
|
2,882
|
<ASSISTANT_TASK:>
Python Code:
data_dir = './data'
# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
#data_dir = '/input'
DON'T MODIFY ANYTHING IN THIS CELL
import helper
helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)
show_n_images = 25
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
import os
from glob import glob
from matplotlib import pyplot
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
show_n_images = 25
DON'T MODIFY ANYTHING IN THIS CELL
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
import problem_unittests as tests
def model_inputs(image_width, image_height, image_channels, z_dim):
Create the model inputs
:param image_width: The input image width
:param image_height: The input image height
:param image_channels: The number of image channels
:param z_dim: The dimension of Z
:return: Tuple of (tensor of real input images, tensor of z data, learning rate)
# TODO: Implement Function
real_image = tf.placeholder(tf.float32, (None, image_width, image_height,
image_channels), name='real_image')
z_data = tf.placeholder(tf.float32, (None, z_dim), name="z_data")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
return real_image, z_data, learning_rate
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def discriminator(images, reuse=False, alpha=0.001):
Create the discriminator network
:param images: Tensor of input image(s)
:param reuse: Boolean if the weights should be reused
:return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)
# TODO: Implement Function
with tf.variable_scope('discriminator', reuse=reuse):
# layer 1 in 28*28*(1 or 3)
conv_1 = tf.layers.conv2d(images, 16, 2, 2, padding='same')
conv_1 = tf.maximum(conv_1, conv_1*alpha)
# layer 2 in 14x14*16
conv_2 = tf.layers.conv2d(conv_1, 32, 2, 2, padding='same')
conv_2 = tf.layers.batch_normalization(conv_2)
conv_2 = tf.maximum(conv_2, conv_2*alpha)
# layer 3 in 7x7*32
conv_3 = tf.layers.conv2d(conv_2, 64, 2, 2, padding='same')
conv_3 = tf.layers.batch_normalization(conv_3)
conv_3 = tf.maximum(conv_3, conv_3*alpha)
# output in 4x4x64
flat = tf.reshape(conv_2, (-1, 32*4*4))
logits = tf.layers.dense(flat, 1)
output = tf.sigmoid(logits)
return output, logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_discriminator(discriminator, tf)
def generator(z, out_channel_dim, is_train=True, alpha=0.001):
Create the generator network
:param z: Input z
:param out_channel_dim: The number of channels in the output image
:param is_train: Boolean if generator is being used for training
:return: The tensor output of the generator
# TODO: Implement Function
with tf.variable_scope('generator', reuse=not is_train):
# layer 1 input is z, a flat vector
layer_1 = tf.layers.dense(z, 2*2*512)
layer_1 = tf.reshape(layer_1,(-1,2,2,512))
# layer 2 - 2x2x512
conv_1 = tf.layers.conv2d_transpose(layer_1, 256, 2, 2, padding='same')
conv_1 = tf.maximum(conv_1, conv_1*alpha)
# layer 3 - 4x4x256
conv_2 = tf.layers.conv2d_transpose(conv_1, 128, 4, 1, padding='valid')
conv_2 = tf.layers.batch_normalization(conv_2, training=is_train)
conv_2 = tf.maximum(conv_2, conv_2*alpha)
# layer 4 - 7x7x128
conv_3 = tf.layers.conv2d_transpose(conv_2, 64, 2, 2, padding='same')
conv_3 = tf.layers.batch_normalization(conv_3, training=is_train)
conv_3 = tf.maximum(conv_3, conv_3*alpha)
# layer 5 - 14x14x128
conv_4 = tf.layers.conv2d_transpose(conv_3, 32, 2, 2, padding='same')
conv_4 = tf.maximum(conv_4, conv_4*alpha)
# output - 28x28x64
logits = tf.layers.conv2d_transpose(conv_4, out_channel_dim, 2, 1, padding='same')
output = tf.tanh(logits)
return output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_generator(generator, tf)
def model_loss(input_real, input_z, out_channel_dim, smooth=0.9):
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
# TODO: Implement Function
g_model = generator(input_z, out_channel_dim)
d_output_real, d_logits_real = discriminator(input_real)
d_output_fake, d_logits_fake = discriminator(g_model, reuse=True)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
return d_loss, g_loss
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_loss(model_loss)
def model_opt(d_loss, g_loss, learning_rate, beta1):
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
# TODO: Implement Function
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith("generator")]
d_vars = [var for var in t_vars if var.name.startswith("discriminator")]
# batch normalization needs to update the graph
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
g_updates = [opt for opt in update_ops if opt.name.startswith('generator')]
with tf.control_dependencies(g_updates):
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_opt(model_opt, tf)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
Show example output for the generator
:param sess: TensorFlow session
:param n_images: Number of Images to display
:param input_z: Input Z Tensor
:param out_channel_dim: The number of channels in the output image
:param image_mode: The mode to use for images ("RGB" or "L")
cmap = None if image_mode == 'RGB' else 'gray'
z_dim = input_z.get_shape().as_list()[-1]
example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])
samples = sess.run(
generator(input_z, out_channel_dim, False),
feed_dict={input_z: example_z})
images_grid = helper.images_square_grid(samples, image_mode)
pyplot.imshow(images_grid, cmap=cmap)
pyplot.show()
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):
Train the GAN
:param epoch_count: Number of epochs
:param batch_size: Batch Size
:param z_dim: Z dimension
:param learning_rate: Learning Rate
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:param get_batches: Function to get batches
:param data_shape: Shape of the data
:param data_image_mode: The image mode to use for images ("RGB" or "L")
# TODO: Build Model
# input shape
_, image_width, image_height, image_channels = data_shape
# model inputs
input_real, input_z, learn_rate = model_inputs(image_width, image_height, image_channels, z_dim)
# model losses
d_loss, g_loss = model_loss(input_real, input_z, image_channels)
# optimization
d_opt, g_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
saver = tf.train.Saver()
samples = []
losses = []
steps = 0
print_loss_every = 10
show_images_every = 80
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epoch_count):
for batch_images in get_batches(batch_size):
# TODO: Train Model
# Get images, reshape and rescale to pass to D
# images are b/w -0.5 to 0.5, so rescaling to -1 to 1
batch_images = batch_images * 2
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size,z_dim))
batch_images = batch_images * 2.0
# Run optimizers
_ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_opt, feed_dict={input_z: batch_z})
steps += 1
# get the losses and print them out every so often
if steps % print_loss_every == 0:
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(epoch_i+1, epoch_count),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# show generated images every so often
if steps % show_images_every == 0:
show_generator_output(sess, 20, input_z, image_channels, data_image_mode)
batch_size = 64
z_dim = 128
learning_rate = 0.001
beta1 = 0.5
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
epochs = 2
mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches,
mnist_dataset.shape, mnist_dataset.image_mode)
batch_size = None
z_dim = None
learning_rate = None
beta1 = None
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
epochs = 1
celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))
with tf.Graph().as_default():
train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches,
celeba_dataset.shape, celeba_dataset.image_mode)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Face Generation
Step3: Explore the Data
Step5: CelebA
Step7: Preprocess the Data
Step10: Input
Step13: Discriminator
Step16: Generator
Step19: Loss
Step22: Optimization
Step25: Neural Network Training
Step27: Train
Step29: MNIST
Step31: CelebA
|
2,883
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([1, 2, 3]) # Create a rank 1 array
print(type(a)) # Prints "<type 'numpy.ndarray'>"
print(a.shape) # Prints "(3,)"
print(a[0], a[1], a[2]) # Prints "1 2 3"
a[0] = 5 # Change an element of the array
print(a) # Prints "[5, 2, 3]"
a = np.zeros(10) # Create an array of all zeros
print(a)
b = np.ones(10) # Create an array of all ones
print(b) # Prints "[[ 1. 1.]]"
c = np.full(10, 7) # Create a constant array
print(c) # Prints "[[ 7. 7.]
# [ 7. 7.]]"
e = np.random.random(10) # Create an array filled with random values
print(e)
a = np.random.random(15) # Create an array of rank 15
print(a.shape)
print(a)
# Select elements 10, 11 and 15 of the array
print(a[9], a[10], a[14])
# Now take a slice: all elements, but the first and second
print('Taking a slice!')
b = a[2:]
print(b)
# And notice how the rank changed!
print('We have removed two elements!')
print(b.shape)
print('Take another slice: all elements in between the third and tenth')
c = a[2:11]
print(c)
a = np.full(10, 7.)
b = np.full(10, 4.)
print(a)
print(b)
print('Sum of a and b')
print(a + b)
print('Difference of a and b')
print(a - b)
print('Elementwise product of a and b')
print(a * b)
print('Elementwise division of a and b')
print(a / b)
print('Elementwise square root of b')
print(np.sqrt(b))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As you can see, we can easily access all this information. One-dimensional arrays in NumPy can be used to represent vectors, while two-dimensional arrays can be used to represent matrices.
Step2: index counting in Python starts from 0 not from 1! As you can see, subscript notation can also be used to modify the value at a given index.
Step3: NumPy supports indexing arrays via slicing like Matlab does. While indexing via a single integer number will return the element at that position of the array, i.e. a scalar, slicing will return another array with fewer dimensions. For example
Step4: As you can see, we can quite easily select just parts of an array and either create new, smaller arrays or just obtain the value at a certain position.
|
2,884
|
<ASSISTANT_TASK:>
Python Code:
def secondary_polygon(Angle ) :
edges_primary = 360 // Angle
if edges_primary >= 6 :
edges_max_secondary = edges_primary // 2
return edges_max_secondary + 3
else :
return "Not ▁ Possible "
if __name__== ' __main __' :
Angle = 45
print(secondary_polygon(Angle ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,885
|
<ASSISTANT_TASK:>
Python Code:
one_to_ten = [1,2,3,4,5,6,7,8,9,10]
print one_to_ten
one_to_ten = []
one_to_ten.append(1)
print one_to_ten
one_to_ten.append(2)
print one_to_ten
one_to_ten.append(3)
print one_to_ten
one_to_ten.append(4)
print one_to_ten
one_to_ten.append(5)
print one_to_ten
one_to_ten.append(6)
print one_to_ten
one_to_ten.append(7)
print one_to_ten
one_to_ten.append(8)
print one_to_ten
one_to_ten.append(9)
print one_to_ten
one_to_ten.append(10)
print one_to_ten
i = 1
while(i <= 10):
print i
i = i+1
i = 1;
while(i <= 10):
print i
i = i + 3
#WRITE YOUR CODE HERE
#SOlUTION
i = 2
while(i <= 20):
print i
i = i+2
#WRITE YOUR CODE HERE
one_to_ten = []
#SOLUTION
i = 1
while(i <= 10):
one_to_ten.append(i)
i = i+1
print one_to_ten
#WRITE YOUR CODE BELOW
import time
#import board
temperatures = []
#SOLUTION
i = 0
while(i <= 5):
#temperatures.append(board.getTemperature)
i = i+1
times.
import matplotlib.pyplot as plot
plot.plot(temperatures)
plot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is great, because now we can remember values that our sensors returned at specific instances of time. There are many operations we can use to modify a list. One important one that we will use here is the append operation. Essentially what it lets us do is add an element to the end of the list. An example is shown below. Initially one_to_ten is empty, we want to see how we can fill it up. Initially we bind one_to_ten to an empty list, which is denoted by [].
Step2: We can see that every time we called one_to_ten.append(x), we appended x to the list. For more operations we can perform on lists, see the python documentation here.
Step3: Let me explain what the code is doing. If you remember the if/else paradigm from the previous lesson, you'll remember that the if statement takes in a boolean value, and runs the code in the if block. A while loop also does this, except when it finishes running the code, it returns back to the condition statement, and if it is still true, runs the code again, whereas an if statement will just continue. In the code above, initial i is equal to 1, which we print. We then increment i by 1, so now i = 2. Since 2 <= 10, we run the code again, and print i, then increment. We keep doing this until i is incremented to 11, at which point 11 <= 10 is not true, so the while loop terminates. Note, it is important that we increment i by 1 every time, or else the condition statement is always true.
Step4: In the block below, write some code, using a while loop, that will print out every even number, starting from 2 and ending with 20. It should print out
Step5: Now we can easily create the list one_to_ten by using a loop! Try to see if you can make one_to_ten by using append and a while loop below.
Step6: Now we can see how we use these tools to log data from our sensors. Lets log the temperature of the room over 5 seconds.
Step7: Congratulations, you've made your first data logger! We could easily modify this code to take temperatures over an entire day, or a week, or a month, or even a year! Now we have to discuss a concept called sample rate. Simply put, a sample rate is how many samples we take a second. In the example above we had a sample rate of 1, because we took one sample every second. If we had taken 10 samples over 5 seconds, we would have a 0.5 second delay between each sample, and had a sample rate of 2, because we would have taken 2 samples every single second. In general we can see that $sample rate = \frac{total samples}{total time}$. In general a higher sample rate is good because it allows us to see more data points between points. If we have too low of a sample rate we won't be able to see any patterns/how the temperature changes, it'll just be telling us what the temperature is at certain times.
|
2,886
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from math import sqrt
import sys
from bokeh.plotting import figure, show, ColumnDataSource, save
from bokeh.models import Range1d, HoverTool
from bokeh.io import output_notebook, output_file
import quandl
from gurobipy import *
# output_notebook() #To enable Bokeh output in notebook, uncomment this line
APIToken = "xxx-xxxxxx"
quandlcodes = ["GOOG/NASDAQ_AAPL.4","WIKI/GOOGL.4", "GOOG/NASDAQ_CSCO.4","GOOG/NASDAQ_FB.4",
"GOOG/NASDAQ_MSFT.4","GOOG/NASDAQ_TSLA.4","GOOG/NASDAQ_YHOO.4","GOOG/PINK_CSGKF.4",
"YAHOO/F_EOAN.4","YAHOO/F_BMW.4","YAHOO/F_ADS.4","GOOG/NYSE_ABB.4","GOOG/VTX_ADEN.4",
"GOOG/VTX_NOVN.4","GOOG/VTX_HOLN.4","GOOG/NYSE_UBS.4", "GOOG/NYSE_SAP.4", "YAHOO/SW_SNBN.4",
"YAHOO/IBM.4", "YAHOO/RIG.4" , "YAHOO/CTXS.4", "YAHOO/INTC.4","YAHOO/KO.4",
"YAHOO/NKE.4","YAHOO/MCD.4","YAHOO/EBAY.4","GOOG/VTX_NESN.4","YAHOO/MI_ALV.4","YAHOO/AXAHF.4",
"GOOG/VTX_SREN.4"]
data = quandl.get(quandlcodes,authtoken=APIToken, trim_start='2009-01-01', trim_end='2016-11-09', paginate=True, per_end_date={'gte': '2009-01-01'},
qopts={'columns':['ticker', 'per_end_date']})
GrowthRates = data.pct_change()*100
syms = GrowthRates.columns
Sigma = GrowthRates.cov()
stats = pd.concat((GrowthRates.mean(),GrowthRates.std()),axis=1)
stats.columns = ['Mean_return', 'Volatility']
extremes = pd.concat((stats.idxmin(),stats.min(),stats.idxmax(),stats.max()),axis=1)
extremes.columns = ['Minimizer','Minimum','Maximizer','Maximum']
stats
fig = figure(tools="pan,box_zoom,reset,resize")
source = ColumnDataSource(stats)
hover = HoverTool(tooltips=[('Symbol','@index'),('Volatility','@Volatility'),('Mean return','@Mean_return')])
fig.add_tools(hover)
fig.circle('Volatility', 'Mean_return', size=5, color='maroon', source=source)
fig.text('Volatility', 'Mean_return', syms, text_font_size='10px', x_offset=3, y_offset=-2, source=source)
fig.xaxis.axis_label='Volatility (standard deviation)'
fig.yaxis.axis_label='Mean return'
output_file("portfolio.html")
show(fig)
# Instantiate our model
m = Model("portfolio")
# Create one variable for each stock
portvars = [m.addVar(name=symb,lb=0.0) for symb in syms]
portvars[7]=m.addVar(name='GOOG/PINK_CSGKF - Close',lb=0.0,ub=0.5)
portvars = pd.Series(portvars, index=syms)
portfolio = pd.DataFrame({'Variables':portvars})
# Commit the changes to the model
m.update()
# The total budget
p_total = portvars.sum()
# The mean return for the portfolio
p_return = stats['Mean_return'].dot(portvars)
# The (squared) volatility of the portfolio
p_risk = Sigma.dot(portvars).dot(portvars)
# Set the objective: minimize risk
m.setObjective(p_risk, GRB.MINIMIZE)
# Fix the budget
m.addConstr(p_total, GRB.EQUAL, 1)
# Select a simplex algorithm (to ensure a vertex solution)
m.setParam('Method', 1)
m.optimize()
portfolio['Minimum risk'] = portvars.apply(lambda x:x.getAttr('x'))
portfolio
# Add the return target
ret50 = 0.5 * extremes.loc['Mean_return','Maximum']
fixreturn = m.addConstr(p_return, GRB.EQUAL, ret50)
m.optimize()
portfolio['50% Max'] = portvars.apply(lambda x:x.getAttr('x'))
m.setParam('OutputFlag',False)
# Determine the range of returns. Make sure to include the lowest-risk
# portfolio in the list of options
minret = extremes.loc['Mean_return','Minimum']
maxret = extremes.loc['Mean_return','Maximum']
riskret = extremes.loc['Volatility','Minimizer']
riskret = stats.loc[riskret,'Mean_return']
riskret =sum(portfolio['Minimum risk']*stats['Mean_return'])
returns = np.unique(np.hstack((np.linspace(minret,maxret,10000),riskret)))
# Iterate through all returns
risks = returns.copy()
for k in range(len(returns)):
fixreturn.rhs = returns[k]
m.optimize()
risks[k] = sqrt(p_risk.getValue())
fig = figure(tools="pan,box_zoom,reset,resize")
# Individual stocks
fig.circle(stats['Volatility'], stats['Mean_return'], size=5, color='maroon')
fig.text(stats['Volatility'], stats['Mean_return'], syms, text_font_size='10px', x_offset=3, y_offset=-2)
fig.circle('Volatility', 'Mean_return', size=5, color='maroon', source=source)
# Divide the efficient frontier into two sections: those with
# a return less than the minimum risk portfolio, those that are greater.
tpos_n = returns >= riskret
tneg_n = returns <= riskret
fig.line(risks[tneg_n], returns[tneg_n], color='red')
fig.line(risks[tpos_n], returns[tpos_n], color='blue')
fig.xaxis.axis_label='Volatility (standard deviation)'
fig.yaxis.axis_label='Mean return'
fig.legend.orientation='bottom_left'
output_file("efffront.html")
show(fig)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First of all, we need some data to proceed. For that purpose we use Quandl. First, you're going to need the quandl package. This isn't totally necessary, as pulling from the API is quite simple with or without the package, but it does make it a bit easier and knocks out a few steps. The Quandl package can be downloaded here. If we set up quandl, next thing to do is to choose some stocks to import. The following is a random selection of stocks.
Step2: The command to import those stocks is quandl.get(). With trim_start and trim_end we can choose a desired time horizon.
Step3: Let's now calculate the growth rates and some stats
Step4: As we move towards our Markowitz portfolio designs it makes sense to view the stocks on a mean/variance scatter plot.
Step5: Gurobi
Step6: Minimum Risk Model
Step7: The efficient frontier
|
2,887
|
<ASSISTANT_TASK:>
Python Code:
import sys, os
# verbose = os.environ.get('RADICAL_PILOT_VERBOSE', 'REPORT')
os.environ['RADICAL_PILOT_VERBOSE'] = 'ERROR'
from adaptivemd import Project
project = Project('test')
from adaptivemd import LocalCluster AllegroCluster
resource_id = 'local.jhp'
if resource_id == 'local.jhp':
project.initialize(LocalJHP())
elif resource_id == 'local.sheep':
project.initialize(LocalSheep())
elif resource_id == 'fub.allegro':
project.initialize(AllegroCluster())
from adaptivemd.engine.openmm import OpenMMEngine
from adaptivemd.analysis.pyemma import PyEMMAAnalysis
from adaptivemd import File, Directory
pdb_file = File('file://../files/alanine/alanine.pdb').named('initial_pdb')
engine = OpenMMEngine(
pdb_file=pdb_file,
system_file=File('file://../files/alanine/system.xml'),
integrator_file=File('file://../files/alanine/integrator.xml'),
args='-r --report-interval 1 -p CPU --store-interval 1'
).named('openmm')
engine.name
modeller = PyEMMAAnalysis(
pdb_file=pdb_file
).named('pyemma')
import datetime
datetime.datetime.fromtimestamp(modeller.__time__).strftime("%Y-%m-%d %H:%M:%S")
project.generators.add(engine)
project.generators.add(modeller)
print project.generators
scheduler = project.get_scheduler(cores=1)
trajectory = project.new_trajectory(engine['pdb_file'], 100)
trajectory
task = engine.task_run_trajectory(trajectory)
scheduler(task)
scheduler.is_idle
print scheduler.generators
# scheduler.wait()
print project.files
print project.trajectories
scheduler.exit()
project.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We want to stop RP from reporting all sorts of stuff for this example so we set a specific environment variable to tell RP to do so. If you want to see what RP reports change it to REPORT.
Step2: We will import the appropriate parts from AdaptiveMD as we go along so it is clear what it needed at what stage. Usually you will have the block of imports at the beginning of your script or notebook as suggested in PEP8.
Step3: Let's open a project with a UNIQUE name. This will be the name used in the DB so make sure it is new and not too short. Opening a project will always create a non-existing project and reopen an exising one. You cannot chose between opening types as you would with a file. This is a precaution to not accidentally delete your project.
Step4: Now we have a handle for our project. First thing is to set it up to work on a resource.
Step5: TaskGenerators
Step6: The engine
Step7: Here we used a special prefix that can point to specific locations.
Step8: To explain this we have now an OpenMMEngine which uses the previously made pdb File object and uses the location defined in there. The same some Files for the OpenMM XML files and some args to store each frame (to keep it fast) and run using the CPU kernel.
Step9: The modeller
Step10: Again we name it pyemma for later reference.
Step11: Note, that you cannot add the same engine twice. But if you create a new engine it will be considered different and hence you can store it again.
Step12: Next we create the parameter for the engine to run the simulation. Since it seemed appropriate we use a Trajectory object (a special File with initial frame and length) as the input. You could of course pass these things separately, but this way, we can actualy reference the no yet existing trajectory and do stuff with it.
Step13: This says, initial is alanine.pdb run for 100 frames and is named xxxxxxxx.dcd.
Step14: That's it, just that a trajectory description and turn it into a task that contains the shell commands and needed files, etc.
Step15: Now we have to wait. To see, if we are done, you can check the scheduler if it is still running tasks.
Step16: or you wait until it becomes idle using .wait()
Step17: If all went as expected we will now have our first trajectory.
Step18: Excellent, so cleanup and close our queue
Step19: and close the project.
|
2,888
|
<ASSISTANT_TASK:>
Python Code:
import sys
import pandas as pd
import numpy as np
ALL = -1
# DEBUG = True
DEBUG = False
##============================================================
# Data file format:
# * tab-delimited input file
# * 1st line: dimension names and the last dimension is assumed to be the measure
# * rest of the lines: data values.
def read_data(filename):
df = pd.read_csv(filename, sep='\t')
dims = df.shape[1] - 1 # the last dim is the measure
return (df, dims)
def dump_input2(input):
if DEBUG:
print("\n.. BUC_rec invoked on:")
print(input)
print("......................\n")
# helper functions
def project_data(input, d):
# Return only the d-th column of INPUT
return input.iloc[:, d]
def select_data(input, d, val):
# SELECT * FROM INPUT WHERE INPUT.d = VAL
col_name = input.columns[d]
return input[input[col_name] == val]
def remove_first_dim(input):
# Remove the first dim of the input
return input.iloc[:, 1:]
def slice_data_dim0(input, v):
# syntactic sugar to get R_{ALL} in a less verbose way
df_temp = select_data(input, 0, v)
return remove_first_dim(df_temp)
def output(val):
print('=>\t{}'.format(val))
data, d = read_data('./asset/a_.txt')
print(d)
data
project_data(data, 0)
select_data(data, 1, 2)
slice_data_dim0(data, 2)
# Note that input is a DataFrame
def buc_rec(input):
# Note that input is a DataFrame
dump_input2(input)
dims = input.shape[1]
if dims == 1:
# only the measure dim
input_sum = sum( project_data(input, 0) )
output(input_sum)
else:
# the general case
dim0_vals = set(project_data(input, 0).values)
for dim0_v in dim0_vals:
sub_data = slice_data_dim0(input, dim0_v)
buc_rec(sub_data)
## for R_{ALL}
sub_data = remove_first_dim(input)
buc_rec(sub_data)
buc_rec(data)
data.pivot_table(index = ['A'], columns = ['B'], aggfunc = np.sum, margins = True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we can implement the buc_rec() algorithm and test it.
Step2: With the following pivot table, we can easily see the output is correct (i.e., all the (non-empty) aggregates are computed).
|
2,889
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csir-csiro', 'sandbox-1', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
2,890
|
<ASSISTANT_TASK:>
Python Code:
# Pipeline class
from quantopian.pipeline import Pipeline
def make_pipeline():
# Create and return an empty Pipeline
return Pipeline()
# Import Pipeline class and USEquityPricing dataset
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import EquityPricing
from quantopian.pipeline.domain import US_EQUITIES
def make_pipeline():
# Get latest closing price
close_price = EquityPricing.close.latest
# Return Pipeline containing latest closing price
return Pipeline(
columns={
'close_price': close_price,
},
domain=US_EQUITIES,
)
# Import Pipeline class and datasets
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import EquityPricing
from quantopian.pipeline.domain import US_EQUITIES
from quantopian.pipeline.data.sentdex import sentiment
# Import built-in moving average calculation
from quantopian.pipeline.factors import SimpleMovingAverage
def make_pipeline():
# Get latest closing price
close_price = EquityPricing.close.latest
# Calculate 3 day average the sentiment signal
sentiment_score = SimpleMovingAverage(
inputs=[sentiment.sentiment_signal],
window_length=3,
)
# Return Pipeline containing close_price
# and sentiment_score
return Pipeline(
columns={
'close_price': close_price,
'sentiment_score': sentiment_score,
}
)
# Import Pipeline class and datasets
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data import EquityPricing
from quantopian.pipeline.domain import US_EQUITIES
from quantopian.pipeline.data.sentdex import sentiment
# Import built-in moving average calculation
from quantopian.pipeline.factors import SimpleMovingAverage
# Import built-in trading universe
from quantopian.pipeline.filters import QTradableStocksUS
def make_pipeline():
# Create a reference to our trading universe
base_universe = QTradableStocksUS()
# Get latest closing price
close_price = EquityPricing.close.latest
# Calculate 3 day average of sentiment scores
sentiment_score = SimpleMovingAverage(
inputs=[sentiment.sentiment_signal],
window_length=3,
)
# Return Pipeline containing close_price and
# sentiment_score that has our trading universe as screen
return Pipeline(
columns={
'close_price': close_price,
'sentiment_score': sentiment_score,
},
screen=base_universe & sentiment_score.notnan(),
domain=US_EQUITIES,
)
# Import run_pipeline method
from quantopian.research import run_pipeline
# Execute pipeline created by make_pipeline
# between start_date and end_date
pipeline_output = run_pipeline(
make_pipeline(),
start_date='2014-01-01',
end_date='2017-1-1'
)
# Display last 10 rows
pipeline_output.head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To add an output to our pipeline we need to include a reference to a dataset, and specify the computations we want to carry out on that data. For example, we will add a reference to the close column from the USEquityPricing dataset. Then, we can define our output to be the latest value from this column as follows
Step2: The Pipeline API also provides a number of built-in calculations, some of which are computed over trailing windows of data. For example, the following code imports the sentdex sentiment dataset and defines an output as the 3 day moving average of its sentiment_signal column
Step3: Universe Selection
Step4: Now that our pipeline definition is complete, we can execute it over a specific period of time using run_pipeline. The output will be a pandas DataFrame indexed by date and asset, with columns corresponding to the outputs we added to our pipeline definition
|
2,891
|
<ASSISTANT_TASK:>
Python Code:
import os
from nipype import Workflow, Node, Function
def sum(a, b):
return a + b
wf = Workflow('hello')
adder = Node(Function(input_names=['a', 'b'],
output_names=['sum'],
function=sum),
name='a_plus_b')
adder.inputs.a = 1
adder.inputs.b = 3
wf.add_nodes([adder])
wf.base_dir = os.getcwd()
eg = wf.run()
list(eg.nodes())[0].result.outputs
def concat(a, b):
return [a, b]
concater = Node(Function(input_names=['a', 'b'],
output_names=['some_list'],
function=concat),
name='concat_a_b')
wf.connect(adder, 'sum', concater, 'a')
concater.inputs.b = 3
eg = wf.run()
print(eg.nodes())
list(eg.nodes())[-1].result.outputs
def plus_one(a):
return a + 1
plusone = Node(Function(input_names=['a'],
output_names=['out'],
function=plus_one),
name='add_1')
wf.connect(concater, 'some_list', plusone, 'a')
try:
eg = wf.run()
except(RuntimeError) as err:
print("RuntimeError:", err)
else:
raise
!nipypecli crash crash*
from nipype import MapNode
plusone = MapNode(Function(input_names=['a'],
output_names=['out'],
function=plus_one),
iterfield=['a'],
name='add_1')
wf = Workflow('hello_mapnode')
adder = Node(Function(input_names=['a', 'b'],
output_names=['sum'],
function=sum),
name='a_plus_b')
adder.inputs.a = 1
adder.inputs.b = 3
wf.connect(adder, 'sum', concater, 'a')
concater.inputs.b = 3
wf.connect(concater, 'some_list', plusone, 'a')
wf.base_dir = os.getcwd()
eg = wf.run()
print(eg.nodes())
print(list(eg.nodes())[2].result.outputs)
adder.iterables = ('a', [1, 2])
adder.inputs.b = 2
eg = wf.run()
print(eg.nodes())
list(eg.nodes())[5].result.outputs
wf.write_graph(graph2use='exec')
from IPython.display import Image
Image("hello_mapnode/graph.png")
Image("hello_mapnode/graph_detailed.png")
concater.iterables = ('b', [3, 4])
eg = wf.run()
eg.nodes();
wf.write_graph(graph2use='exec')
Image("hello_mapnode/graph_detailed.png")
def merge_and_scale_data(data2):
import numpy as np
return (np.array(data2) * 1000).tolist()
from nipype import JoinNode
joiner = JoinNode(Function(input_names=['data2'],
output_names=['data_scaled'],
function=merge_and_scale_data),
name='join_scale_data',
joinsource=adder,
joinfield=['data2'])
wf.connect(plusone, 'out', joiner, 'data2')
eg = wf.run()
eg.nodes()
list(eg.nodes())[0].result.outputs
wf.write_graph(graph2use='exec')
Image("hello_mapnode/graph.png")
Image("hello_mapnode/graph_detailed.png")
%time eg = wf.run(plugin='MultiProc', plugin_args={'n_procs': 2})
wf.base_dir = os.path.join(os.getcwd(), 'alt')
%time eg = wf.run(plugin='MultiProc', plugin_args={'n_procs': 2})
%time eg = wf.run(plugin='MultiProc', plugin_args={'n_procs': 2})
#write your code here
# 1. write 3 functions: one that returns a list of number from a specific range,
# second that returns n! (you can use math.factorial) and third, that sums the elements from a list
# 2. create a workflow and define the working directory
# 3. define 3 nodes using Node and MapNode and connect them within the workflow
# 4. run the workflow and check the results
from nipype import Workflow, Node, MapNode, Function
import os
def range_fun(n_min, n_max):
return list(range(n_min, n_max+1))
def factorial(n):
# print("FACTORIAL, {}".format(n))
import math
return math.factorial(n)
def summing(terms):
return sum(terms)
wf_ex1 = Workflow('ex1')
wf_ex1.base_dir = os.getcwd()
range_nd = Node(Function(input_names=['n_min', 'n_max'],
output_names=['range_list'],
function=range_fun),
name='range_list')
factorial_nd = MapNode(Function(input_names=['n'],
output_names=['fact_out'],
function=factorial),
iterfield=['n'],
name='factorial')
summing_nd = Node(Function(input_names=['terms'],
output_names=['sum_out'],
function=summing),
name='summing')
range_nd.inputs.n_min = 0
range_nd.inputs.n_max = 3
wf_ex1.add_nodes([range_nd])
wf_ex1.connect(range_nd, 'range_list', factorial_nd, 'n')
wf_ex1.connect(factorial_nd, 'fact_out', summing_nd, "terms")
eg = wf_ex1.run()
eg.nodes()
list(eg.nodes())[2].result.outputs
print(list(eg.nodes())[0].result.outputs)
print(list(eg.nodes())[1].result.outputs)
# write your solution here
# 1. write 3 functions: one that returns a list of number from a range between 0 and some n,
# second that returns a term for a specific k, and third, that sums the elements from a list
# 2. create a workflow and define the working directory
# 3. define 3 nodes using Node and MapNode and connect them within the workflow
# 4. use iterables for 4 values of x
# 5. run the workflow and check the final results for every value of x
# we can reuse function from previous exercise, but they need some edits
from nipype import Workflow, Node, MapNode, JoinNode, Function
import os
import math
def range_fun(n_max):
return list(range(n_max+1))
def term(k, x):
import math
fract = math.factorial(2 * k + 1)
polyn = x ** (2 * k + 1)
return (-1)**k * polyn / fract
def summing(terms):
return sum(terms)
wf_ex2 = Workflow('ex2')
wf_ex2.base_dir = os.getcwd()
range_nd = Node(Function(input_names=['n_max'],
output_names=['range_list'],
function=range_fun),
name='range_list')
term_nd = MapNode(Function(input_names=['k', 'x'],
output_names=['term_out'],
function=term),
iterfield=['k'],
name='term')
summing_nd = Node(Function(input_names=['terms'],
output_names=['sum_out'],
function=summing),
name='summing')
range_nd.inputs.n_max = 15
x_list = [0, 0.5 * math.pi, math.pi, 1.5 * math.pi, 2 * math.pi]
term_nd.iterables = ('x', x_list)
wf_ex2.add_nodes([range_nd])
wf_ex2.connect(range_nd, 'range_list', term_nd, 'k')
wf_ex2.connect(term_nd, 'term_out', summing_nd, "terms")
eg = wf_ex2.run()
eg.nodes()
print(list(eg.nodes())[2].result.outputs)
print(list(eg.nodes())[4].result.outputs)
print(list(eg.nodes())[6].result.outputs)
print(list(eg.nodes())[8].result.outputs)
print(list(eg.nodes())[10].result.outputs)
# write your code here
# 1. create an additional function that takes 2 lists and combines them into one container, e.g. dictionary
# 2. use JoinNode to define a new node that merges results from Exercise 2 and connect it to the workflow
# 3. run the workflow and check the results of the merging node
def merge_results(results, x):
return dict(zip(x, results))
join_nd = JoinNode(Function(input_names=['results', 'x'],
output_names=['results_cont'],
function=merge_results),
name='merge',
joinsource=term_nd, # this is the node that used iterables for x
joinfield=['results'])
# taking the list of arguments from the previous part
join_nd.inputs.x = x_list
# connecting a new node to the summing_nd
wf_ex2.connect(summing_nd, "sum_out", join_nd, "results")
eg = wf_ex2.run()
eg.nodes()
list(eg.nodes())[1].result.outputs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating Workflow with one Node that adds two numbers
Step2: Creating a second node and connecting to the hello Workflow
Step3: And we can check results of our Workflow, we should see a list
Step4: We will try to add additional Node that adds one
Step5: This time the workflow didn't execute cleanly and we got an error. We can use nipypecli to read the crashfile (note, that if you have multiple crashfiles in the directory you'll have to provide a full name)
Step6: It clearly shows the problematic Node and its input. We tried to add an integer to a list, this operation is not allowed in Python.
Step7: Now the workflow finished without problems, let's see the results from hello.add_1
Step8: And now we will run the example with iterables
Step9: Now we have 6 nodes, we can check results for hello.add_1.a1
Step10: We can plot a general structure of the workflow
Step11: And more detailed structure with all nodes
Step12: We will introduce another iterables, for the concater Node
Step13: Now we will introduce JoinNode that allows us to merge results together
Step14: Let's check the output of hello.join_scale_data.a0 node
Step15: Exercise 1
Step16: let's print all nodes
Step17: the final result should be 10
Step18: we can also check the results of two other nodes
Step19: Exercise 2
Step20: let's check all nodes
Step21: let's print all results of ex2.summing
Step22: Great, we just implemented pretty good Sine function! Those number should be approximately 0, 1, 0, -1 and 0. If they are not, try to increase $n_max$.
Step23: let's print all nodes
Step24: and results from merge Node
|
2,892
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.set_log_level("ERROR")
ps.show_versions(numba=True, lmfit=True)
head = pd.read_csv("data_wagna/head_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2).loc["2006":]
evap = pd.read_csv("data_wagna/evap_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2)
rain = pd.read_csv("data_wagna/rain_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2)
ax = head.plot(figsize=(10,3), marker=".", linestyle=" ", color="k")
ax1 = plt.axes([0.95,0.2,0.3,0.68])
ax1.semilogx(ps.stats.acf(head).values, color="k") # Plot on log-scale
ax.set_title("Groundwater level [MASL]")
ax1.set_title("Autocorrelation");
mls_ar = {}
dts = 11
# Model settings
tmin = "2007-01-01"
tmax = "2016-12-31"
solver = ps.LmfitSolve
# The two models we compare here
config = {
"Linear": [ps.FourParam, ps.rch.Linear()],
"Nonlinear": [ps.Exponential, ps.rch.FlexModel()],
}
for name, [rfunc, rch] in config.items():
for dt in range(1, dts, 2):
# Create the basic Pastas model
ml_name = f"{name}_{dt}"
ml = ps.Model(head.iloc[::dt], name=ml_name)
# Add the recharge model
sm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=rfunc,
name="rch")
ml.add_stressmodel(sm)
# Change parameter settings for non-linear recharge model
if name == "Nonlinear":
ml.set_parameter("rch_srmax", vary=False)
ml.set_parameter("rch_kv", vary=True)
ml.set_parameter("constant_d", initial=262)
# Solve the model
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls_ar[ml_name] = ml
mls_arma = {}
for ml_name, ml in mls_ar.items():
ml = ml.copy(name=ml.name)
#Change the noise model
ml.del_noisemodel()
ml.add_noisemodel(ps.ArmaModel())
# Solve the model
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls_arma[ml_name] = ml
data = pd.DataFrame(index=range(dt, 1), columns=config.keys())
for ml in mls_ar.values():
name, i = ml.name.split("_")
n = ml.noise(tmin=tmin, tmax=tmax).asfreq(f"{i}D").fillna(0.0)
data.loc[int(i), name] = ps.stats.durbin_watson(n)[0]
data2 = pd.DataFrame(index=range(dt, 1), columns=config.keys())
for ml in mls_arma.values():
name, i = ml.name.split("_")
n = ml.noise(tmin=tmin, tmax=tmax).asfreq(f"{i}D").fillna(0.0)
data2.loc[int(i), name] = ps.stats.durbin_watson(n)[0]
# Plot the results
fig, [ax1, ax2] = plt.subplots(2,1, sharex=True, figsize=(5, 4), sharey=True)
# AR1 Model
data.plot(ax=ax1, marker=".", legend=False)
ax1.set_ylabel("DW [-]")
ax1.axhline(2., c="k", linestyle="--", zorder=-1)
ax1.text(1, 2.07, "Line of no autocorrelation")
ax1.grid()
ax1.set_title("AR(1) Noise model")
# ArmaModel
data2.plot(ax=ax2, marker=".", legend=False)
ax2.set_ylabel("DW [-]")
ax2.set_yticks([1, 1.5, 2.])
ax2.axhline(2., c="k", linestyle="--", zorder=-10)
ax2.set_ylim(0.5, 2.3)
ax2.grid()
ax2.legend(ncol=3, loc=4)
ax2.set_xlabel("$\Delta t$ [days]")
ax2.set_title("ARMA(1,1) Noise model")
plt.tight_layout()
mls = {}
dt = 10 # Select the time interval between GWL observations
for name, [rfunc, rch] in config.items():
for start in range(0, dt, 2):
ml_name = f"{name}_{start+1}"
ml = ps.Model(head.iloc[start::dt], name=ml_name)
# Add the recharge model
sm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=rfunc, name="rch")
ml.add_stressmodel(sm)
if name == "Nonlinear":
ml.set_parameter("rch_srmax", vary=False)
ml.set_parameter("rch_kv", vary=True)
ml.set_parameter("constant_d", initial=262)
# Solve the model
ml.add_noisemodel(ps.ArmaModel())
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls[ml_name] = ml
# Extract the optimal parameters and estimated standard errors
data = {}
for name in config.keys():
ml = mls["{}_1".format(name)]
p = ml.parameters
mi = pd.MultiIndex.from_product([p.index[p.vary == True].to_list(), ["opt", "std"]])
data[name] = pd.DataFrame(index=range(dt, 1), columns=mi)
for ml in mls.values():
name, i = ml.name.split("_")
df = data[name]
for par in ml.parameters.index[ml.parameters.vary == True]:
df.loc[int(i), (par, "opt")] = ml.parameters.loc[par, "optimal"]
df.loc[int(i), (par, "std")] = ml.parameters.loc[par, "stderr"] * 1.96
df = pd.concat(data, axis=1)
# Plot the results
fig, axes = plt.subplots(8,2, sharex=True, figsize=(9,7))
axes = axes.flatten()
kwargs = dict(legend=False, color="0", capsize=2, linestyle="-", marker=".")
labels = [["$A$", "$n$", "$a$", "$b$", "$f$", "$d$", "$\\alpha$", "$\\beta$"],
["$A$", "$a$", "$k_s$", "$\\gamma$", "$k_v$", "$d$", "$\\alpha$", "$\\beta$"]]
for j, rch in enumerate(["Linear", "Nonlinear"]):
axes[j].set_title(rch)
for i, par in enumerate(df[rch].columns.get_level_values(0).unique()):
df.xs((rch, par), axis=1, level=[0, 1]).plot(ax=axes[i*2+j], yerr="std", **kwargs)
axes[i*2+j].set_ylabel(labels[j][i])
for i in range(2):
axes[-i-1].set_xlabel("Calibration")
plt.tight_layout()
rch = {"Linear": pd.DataFrame(columns=range(dt, 1)),
"Nonlinear": pd.DataFrame(columns=range(dt, 1))}
for ml in mls.values():
name, i = ml.name.split("_")
rch[name].loc[:, i] = ml.get_stress("rch", tmin=tmin,
tmax="2019-12-31").resample("A").sum()
df1 = pd.concat(rch, axis=1)
df1.index = df1.index.year
fig, [ax1, ax2, ax3] = plt.subplots(3,1, figsize=(6,6))
for ml in mls.values():
if ml.name.split("_")[0] == "Linear":
ax = ax1
color = "C0"
else:
ax = ax2
color = "C1"
ml.oseries.plot(ax=ax, linestyle="-", marker=" ", c="k")
ml.simulate(tmax="2020").plot(ax=ax, alpha=0.5, c=color, x_compat=True)
ax.set_xticks([])
ax.set_ylabel("GWL [m]")
ax.set_xlim("2007", "2020")
df1.groupby(level=0, axis=1).mean().plot.bar(yerr=1.96 * df1.groupby(level=0, axis=1).std(), ax=ax3, width=0.7)
plt.legend(ncol=3, loc=2, bbox_to_anchor=(0, 3.7))
plt.ylabel("R [mm yr$^{-1}$]")
plt.xlabel("");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Read Data and plot autocorrelation
Step2: 2. Run models with AR(1) noise model
Step3: 3. Run models with ARMA(1,1) noise model
Step4: 4. Plot and compare the the results
Step5: 5. Consistency of parameter estimates
Step6: The plot above shows the estimated optimal parameters and the 95% confidence intervals of the parameters. While most of the optimal parameter are relatively stable between calibrations, some parameters show larger variations. For the linear model these are, for example, $a$ and $n$, while for the non-linear model these are $k_s$ and $\gamma$. The values of these parameters seem correlated, and it might thus be difficult to estimate the individual parameter values.
|
2,893
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from shapely.geometry import Point
from geopandas import datasets, GeoDataFrame, read_file
# NYC Boros
zippath = datasets.get_path('nybb')
polydf = read_file(zippath)
# Generate some points
b = [int(x) for x in polydf.total_bounds]
N = 8
pointdf = GeoDataFrame([
{'geometry': Point(x, y), 'value1': x + y, 'value2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2] - b[0]) / N)),
range(b[1], b[3], int((b[3] - b[1]) / N)))])
# Make sure they're using the same projection reference
pointdf.crs = polydf.crs
pointdf
polydf
pointdf.plot()
polydf.plot()
join_left_df = pointdf.sjoin(polydf, how="left")
join_left_df
# Note the NaNs where the point did not intersect a boro
join_right_df = pointdf.sjoin(polydf, how="right")
join_right_df
# Note Staten Island is repeated
join_inner_df = pointdf.sjoin(polydf, how="inner")
join_inner_df
# Note the lack of NaNs; dropped anything that didn't intersect
pointdf.sjoin(polydf, how="left", op="within")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Joins
Step2: We're not limited to using the intersection binary predicate. Any of the Shapely geometry methods that return a Boolean can be used by specifying the op kwarg.
|
2,894
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
!mkdir train
!touch train/__init__.py
%%writefile train/model_definition.py
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
model.add(tf.keras.layers.Dense(1028))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation('softmax'))
return model
import os
import time
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
# TODO -- Your code here.
print("Training time without GPUs locally: {}".format(time.time() - start))
%%writefile train/train_mult_worker_mirrored.py
import os
import time
import tensorflow as tf
import numpy as np
from . import model_definition
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
%%writefile config.yaml
# TODO 3a
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
%%writefile config.yaml
# TODO 3b
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
%%writefile config.yaml
# TODO 3c
# TODO -- Your code here.
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we will configure our environment. Be sure to change the PROJECT_ID variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist).
Step2: Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the train directory for our package and create a blank __init__.py file so Python knows that this folder contains a package.
Step3: Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.
Step4: Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the model_definition function to create our model and use tf.keras.datasets.fashion_mnist.load_data() to import the Fashion MNIST dataset.
Step5: Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy
Step6: Lab Task #3
Step7: If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
Step8: That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
|
2,895
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import numpy as np
def test_case():
sim = rebound.Simulation()
sim.integrator = 'whfast'
sim.add(m=1.) # add the Sun
sim.add(m=3.e-6, a=1.) # add Earth
sim.move_to_com()
sim.dt = 0.2
return sim
sim = test_case()
sim.integrator_whfast_safe_mode = 0
import time
Porb = 2*np.pi # orbital period for Earth, using units of G = 1, solar masses, AU and yr/2pi
sim = test_case()
print("safe_mode = {0}".format(sim.integrator_whfast_safe_mode))
start_time = time.time()
sim.integrate(1.e5*Porb)
sim.status()
print("Safe integration took {0} seconds".format(time.time() - start_time))
sim = test_case()
sim.integrator_whfast_safe_mode = 0
start_time = time.time()
sim.integrate(1.e5*Porb)
sim.status()
print("Manual integration took {0} seconds".format(time.time() - start_time))
sim.init_megno(1e-16)
sim = test_case()
sim.integrator_whfast_safe_mode = 0
def integrate_mod(sim, t_final):
while sim.t < t_final:
sim.step()
sim.particles[1].m += 1.e-10
sim.integrator_whfast_recalculate_jacobi_this_timestep = 1
sim.integrator_synchronize()
sim = test_case()
sim.integrator_whfast_safe_mode = 1
def integrate_mod(sim, t_final):
while sim.t < t_final:
sim.step()
sim.particles[1].vx += 1.e-10*sim.dt
sim.integrator_whfast_recalculate_jacobi_this_timestep = 1
sim.integrator_synchronize()
sim.integrator_whfast_corrector = 11
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: By default WHFast synchronizes and recalculates the Jacobi coordinates from the inertial ones every timestep. This guarantees that the user always gets physical particle states for output, and ensures reliable output if the user decides to, e.g., grow the particles' masses between timesteps.
Step2: Now it becomes the user's responsibility to appropriately synchronize and recalculate jacobi coordinates when needed. You can tell WHFast to recalculate Jacobi coordinates for a given timestep (say after you change a particle's mass) with the sim.integrator_whfast_recalculate_jacobi_this_timestep flag. After it recalculates Jacobi coordinates, WHFast will reset this flag to zero, so you just set it each time you mess with the particles.
Step3: In our test case with a single planet, there is effectively no interaction step, and by combining Kepler steps we get almost the full factor of 2 speedup we expect. Because Kepler steps are expensive (by virtue of having to solve the transcendental Kepler equation), this will always be an important performance boost for few-planet cases.
Step4: REBOUND will synchronize every timestep even if you set sim.integrator_whfast_safe_mode = 0 and never explicitly call sim.integrator_synchronize().
Step5: Here, because we grow the mass of the planet every timestep, we have to recalculate Jacobi coordinates every timestep (since they depend on the masses of the particles). We therefore manually set the flag to recalculate them the next timestep every time we make a change. Here we would actually get the same result if we just left sim.integrator_whfast_safe_mode = 1, since when recalculating Jacobi coordinates, WHFast automatically has to synchronize in order to get real positions and velocities for the planets. In this case WHFast is therefore synchronizing and recalculating Jacobi coordinates every timestep.
Step6: This would not give accurate results, because the sim.particles[1].vx we access after sim.step() isn't a physical velocity (it's missing a half-Kepler step). It's basically at an intermediate point in the calculation. In order to make this work, one would call sim.integrator_synchronize() between sim.step() and accessing sim.particles[1].vx, to ensure the velocity is physical.
|
2,896
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
2,897
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from IPython.display import HTML
from ipywidgets import interact
HTML('../style/code_toggle.html')
def loop_DFT(x):
Implementing the DFT in a double loop
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
X = np.zeros(N,dtype=complex)
for k in range(N):
for n in range(N):
X[k] += np.exp(-1j*2.0*np.pi*k*n/N)*x[n]
return X
def matrix_DFT(x):
Implementing the DFT in vectorised form
Input: x = the vector we want to find the DFT of
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
n = np.arange(N)
k = n.reshape((N,1))
K = np.exp(-1j*2.0*np.pi*k*n/N)
return K.dot(x)
x = np.random.random(256) #create random vector to take the DFT of
np.allclose(loop_DFT(x),matrix_DFT(x)) #compare the result using numpy's built in function
x = np.random.random(256) #create random vector to take the DFT of
np.allclose(np.fft.fft(x),matrix_DFT(x)) #compare the result using numpy's built in function
#First we simulate a time series as the sum of a number of sinusoids each with a different frequency
N = 512 #The number of samples of the time series
tmin = -10 #The minimum value of the time coordinate
tmax = 10 #The maximum value of the time coordinate
t = np.linspace(tmin,tmax,N) #The time coordinate
f1 = 1.0 #The frequency of the first sinusoid
f2 = 2.0 #The frequency of the second sinusoid
f3 = 3.0 #The frequency of the third sinusoid
#Generate the signal
y = np.sin(2.0*np.pi*f1*t) + np.sin(2.0*np.pi*f2*t) + np.sin(2.0*np.pi*f3*t)
#Take the DFT
Y = matrix_DFT(y)
#Plot the absolute value, real and imaginary parts
plt.figure(figsize=(15, 6))
plt.subplot(121)
plt.stem(abs(Y))
plt.xlabel('$k$',fontsize=18)
plt.ylabel(r'$|Y_k|$',fontsize=18)
plt.subplot(122)
plt.stem(np.angle(Y))
plt.xlabel('$k$',fontsize=18)
plt.ylabel(r'phase$(Y_k)$',fontsize=18)
#Get the sampling frequency
delt = t[1] - t[0]
fs = 1.0/delt
k = np.arange(N)
fk = k*fs/N
plt.figure(figsize=(15, 6))
plt.subplot(121)
plt.stem(fk,abs(Y))
plt.xlabel('$f_k$',fontsize=18)
plt.ylabel(r'$|Y_k|$',fontsize=18)
plt.subplot(122)
plt.stem(fk,np.angle(Y))
plt.xlabel('$f_k$',fontsize=18)
plt.ylabel(r'phase$(Y_k)$',fontsize=18)
%timeit loop_DFT(x)
%timeit matrix_DFT(x)
%timeit np.fft.fft(x)
def one_layer_FFT(x):
An implementation of the 1D Cooley-Tukey FFT using one layer
N = x.size
if N%2>0:
print "Warning: length of x in not a power of two, returning DFT"
return matrix_DFT(x)
else:
X_even = matrix_DFT(x[::2])
X_odd = matrix_DFT(x[1::2])
factor = np.exp(-2j * np.pi * np.arange(N) / N)
return np.concatenate([X_even + factor[:N / 2] * X_odd,X_even + factor[N / 2:] * X_odd])
np.allclose(np.fft.fft(x),one_layer_FFT(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step3: 2.8. The Discrete Fourier Transform (DFT) and the Fast Fourier Transform (FFT)<a id='math
Step5: Althought this would produce the correct result, this way of implementing the DFT is going to be incredibly slow. The DFT can be implemented in matrix form. Convince yourself that a vectorised implementation of this operation can be achieved with
Step6: This function will be much faster than the previous implementation. We should check that they both return the same result
Step7: Just to be sure our DFT really works, let's also compare the output of our function to numpy's built in DFT function (note numpy automatically implements a faster version of the DFT called the FFT, see the discussion below)
Step8: Great! Our function is returning the correct result. Next we do an example to demonstrate the duality between the spectral (frequency domain) and temporal (time domain) representations of a function. As the following example shows, the Fourier transform of a time series returns the frequencies contained in the signal.
Step9: It is not immediately obvious that these are the frequencies contained in the signal. However, recall, from the definition given at the outset, that the frequencies are related to the index $k$ via
Step10: Here we see that the three main peaks correspond to the frequencies contained in the input signal viz. $f_1 = 1$Hz, $f_2 = 2$Hz and $f_3 = 3$Hz. But what do the other peaks mean? The additional frequency peaks are a consequence of the following facts
Step11: That is almost a factor of ten difference. Lets compare this to numpy's built in FFT
Step13: That seems amazing! The numpy FFT is about 1000 times faster than our vectorised implementation. But how does numpy achieve this speed up? Well, by using the fast Fourier transform of course.
Step14: Lets confirm that this function returns the correct result by comparing fith numpy's FFT.
|
2,898
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
from sympy import *
init_printing()
def linear_rgb(x):
return ((x+0.055)/1.055)**2.4
x = Symbol('x', real=True)
series(linear_rgb(x), x, x0=0.5, n=4)
fast_linear_rgb = lambdify([x], series(linear_rgb(x), x, x0=0.5, n=4).removeO())
X = np.linspace(0,1,1001)
ref = linear_rgb(X) # The (almost) correct implementation.
fast = fast_linear_rgb(X) # The Taylor approximation
square = X*X # The approximation by squaring.
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,4))
ax1.plot(X, ref, label='linear')
ax1.plot(X, fast, label='fast (max/avg err: {:.4f} / {:.4f})'.format(np.max(np.abs(ref - fast)),
np.mean(np.abs(ref - fast))))
ax1.plot(X, square, label='square (max/avg err: {:.4f} / {:.4f})'.format(np.max(np.abs(ref - square)),
np.mean(np.abs(ref - square))))
ax2.plot(X, ref)
ax2.plot(X, fast)
ax2.plot(X, square)
ax2.set_xlim(0, 0.1)
ax2.set_ylim(0, 0.1)
ax2.set_title("Left end")
ax3.plot(X, ref)
ax3.plot(X, fast, ls=':')
ax3.plot(X, square)
ax3.set_xlim(0.45, 0.55)
ax3.set_ylim(0.15, 0.25)
ax3.set_title("Middle")
ax4.plot(X, ref)
ax4.plot(X, fast)
ax4.plot(X, square)
ax4.set_xlim(0.9, 1)
ax4.set_ylim(0.9, 1)
ax4.set_title("Right end")
ax1.legend();
def delinear_rgb(x):
return 1.055*(x**(1.0/2.4)) - 0.055
fast_delinear_rgb_part1 = lambdify([x], series(delinear_rgb(x), x, x0=0.015, n=6).removeO())
fast_delinear_rgb_part2 = lambdify([x], series(delinear_rgb(x), x, x0=0.03, n=6).removeO())
fast_delinear_rgb_part3 = lambdify([x], series(delinear_rgb(x), x, x0=0.6, n=6).removeO())
ref = delinear_rgb(X)
fast1 = fast_delinear_rgb_part1(X)
fast2 = fast_delinear_rgb_part2(X)
fast3 = fast_delinear_rgb_part3(X)
sqrt = np.sqrt(X)
def plot(ax):
ax.plot(X, ref, label='linear')
l, = ax.plot(X, fast1, label='fast, part1', ls=':')
ax.plot(X, fast2, label='fast, part2', c=l.get_color(), ls='--')
ax.plot(X, fast3, label='fast, part3', c=l.get_color(), ls='-')
ax.plot(X, sqrt, label='sqrt')
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(20,4))
plot(ax1)
ax1.set_ylim(0, 1)
plot(ax2)
ax2.set_xlim(0, 0.05)
ax2.set_ylim(0, 0.25)
ax2.set_title("Left end")
plot(ax3)
ax3.set_xlim(0.45, 0.55)
ax3.set_ylim(0.65, 0.75)
ax3.set_title("Middle")
plot(ax4)
ax4.set_xlim(0.95, 1)
ax4.set_ylim(0.95, 1)
ax4.set_title("Right end")
ax1.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following is the conversion from RGB to linear RGB (aka. gamma-correction), where I'm dropping the conditional part for very small values of x as a first approximation.
Step2: Now, we can use SymPy to create a symbolic version of that equation, and compute a symbolic Taylor expansion around $0.5$ (the middle of our target range) up to the fourth degree
Step3: In order to use it numerically, we will "drop the O", which means do the actual approximation, and "lambdify" the function, which turns a symbolic function into a NumPy function
Step4: As additional heuristic approximations, we'll include simply squaring the values, which should also be very fast, but quite wrong. Then, plot all these functions in order to see their behaviour, and compute errors
Step5: The inverse function (for Lab->RGB)
|
2,899
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pycomlink as pycml
import xarray as xr
from tqdm import tqdm
import urllib.request
import io
import pycomlink.processing.blackout_gap_detection as blackout_detection
# Do show xarray.Dataset representation as text because gitlab/github
# do not (yet) render the html output correctly (or sometimes not at all...)
xr.set_options(display_style="text");
# get data from 500 CMLs and eleven days
data_path = pycml.io.examples.get_example_data_path()
cmls = xr.open_dataset(data_path + "/example_cml_data.nc")
# get recieved signal levels values without default fill values (-99 and -99.9)
rsl = cmls.rsl.where(cmls.rsl > -99).isel(channel_id=0)
gap_start_list = []
gap_end_list = []
gap_mask_list = []
for cml_id in tqdm(rsl.cml_id):
# select individual RSL time series
rsl_i = rsl.sel(cml_id=cml_id)
# check whether RSL before and after gaps are below rsl_threshold
# here we take -65 dBm as threshold as all CMLs have a median RSL > -60 dBm
gap_start, gap_end = blackout_detection.get_blackout_start_and_end(
rsl=rsl_i.values, rsl_threshold=-65
)
# create a mask for all gaps fullflilling the criteria above
mask = blackout_detection.created_blackout_gap_mask_from_start_end_markers(
rsl_i.values, gap_start, gap_end
)
mask_reverse = blackout_detection.created_blackout_gap_mask_from_start_end_markers(
rsl_i.values[::-1], gap_end[::-1], gap_start[::-1]
)
mask = mask | mask_reverse[::-1]
gap_start_list.append(gap_start)
gap_end_list.append(gap_end)
gap_mask_list.append(mask)
# parse gap starts, ends and the mask to one xarray dataset with the CML data
rsl = rsl.to_dataset()
rsl["gap_start"] = (["cml_id", "time"], gap_start_list)
rsl["gap_end_list"] = (["cml_id", "time"], gap_end_list)
rsl["mask"] = (["cml_id", "time"], gap_mask_list)
# get the CMLs with the many blackout minutes (here more than 35)
rsl_blackouts = rsl.isel(cml_id=(np.array(gap_mask_list).sum(axis=1) > 35))
rsl_blackouts
# plot the three CMLs with most blackouts minutes (more than 35 minutes)
for cml_id in rsl_blackouts.cml_id:
rsl_blackouts.rsl.sel(cml_id=cml_id, time="2018-05-13").plot(
figsize=(10, 4), label="rsl"
)
(rsl_blackouts.mask.sel(cml_id=cml_id, time="2018-05-13") * -20).plot(
label="detected blackout"
)
plt.legend()
# load data e.g. with. curl -0` or `wget`
!curl -O https://zenodo.org/record/6337557/files/blackout_example_cmls.nc
!curl -O https://zenodo.org/record/6337557/files/blackout_example_radar_reference.nc
# open dataset with xarray
cmls = xr.open_dataset('blackout_example_cmls.nc')
reference = xr.open_dataset('blackout_example_radar_reference.nc')
cmls
reference
# remove default rsl values
cmls["rsl"] = cmls.rsl.where(cmls.rsl > -99).isel(channel_id=0)
cmls["tsl"] = cmls.tsl.where(cmls.rsl > -99).isel(channel_id=0)
# define a plotting function
def plt_ts(rsl, tsl, ref, mask, start, ax=None):
if ax is None:
fig, ax = plt.subplots()
ay = ax.twinx()
ay.bar(np.arange(len(ref)), ref * 60, color="#045a8d", alpha=0.6)
ay.set_ylim(-10, 180)
ay.set_yticks([0, 50, 100, 150])
ay.set_ylabel("rainfall intensity \n[mm/h]", color="#045a8d")
ax.plot(tsl, color="#238b45", label="transmitted \nsignal level", lw=2)
ax.plot(rsl, color="#cc4c02", label="recieved \nsignal level", lw=2)
ax.plot((mask * -5) + 33.5, lw=4, color="black")
ax.plot((mask + 100), color="black", lw=3, label="detected \nblackout gap")
ax.set_ylim(-92, 30)
ax.set_title("")
ax.set_xticks([0, 30, 60, 90, 120, 150, 180, 210, 240])
ax.set_xlabel("time [minutes]")
ax.legend(
loc="center left", ncol=3,
)
ax.set_title(str(start.values)[0:10] + " " + str(start.values)[11:16])
ax.set_yticks([20, 0, -20, -40, -60, -80])
ax.set_ylabel("signal level\n[dBm]")
plt.show()
for cml_id in cmls.cml_id:
rsl = cmls.sel(cml_id=cml_id).rsl
tsl = cmls.sel(cml_id=cml_id).tsl
ref = reference.sel(cml_id=cml_id)
# using the blackout gap detection as in the example above
gap_start, gap_end = blackout_detection.get_blackout_start_and_end(
rsl=rsl.values, rsl_threshold=-65
)
mask = blackout_detection.created_blackout_gap_mask_from_start_end_markers(
rsl.values, gap_start, gap_end
)
mask_reverse = blackout_detection.created_blackout_gap_mask_from_start_end_markers(
rsl.values[::-1], gap_end[::-1], gap_start[::-1]
)
mask = mask | mask_reverse[::-1]
rsl = rsl.to_dataset()
rsl["mask"] = ("time", mask)
print(
"For CML with the id "
+ str(cml_id.values)
+ " there are "
+ str(mask.sum())
+ " detected blackout minutes."
)
print("Plotting all detected gaps:")
# remove gap_start times which are less than 60 minutes after another gap_start
very_close_gaps = (
np.diff(rsl.sel(time=gap_start).time.values) / 60000000000
).astype(int) < 60
gap_time = rsl.sel(time=gap_start).time[~np.append(very_close_gaps, np.array(True))]
# plot each gap
for gap_start_time in gap_time:
start = gap_start_time - np.timedelta64(2, "h")
end = gap_start_time + np.timedelta64(2, "h")
plt_ts(
rsl=rsl.rsl.sel(time=slice(start, end)),
tsl=tsl.sel(time=slice(start, end)),
ref=ref.rainfall_amount.sel(time=slice(start, end)),
mask=rsl.mask.sel(time=slice(start, end)),
start=start,
)
print("#################################################\n")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CMLs with more than 35 minutes of blackout within the 10-day example data
Step2: Investiagte blackouts for two CMLs over three years
Step3: The data consists of RSL and TSL from two CMLs with one minutes resolution over three years and respective path-averaged reference data from RADKLIM-YW, a gauge-adjusted, climatologically correct weather radar product from the German Weather Service.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.