text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading My Saved CNN
Step2: Import Function To View Convolutional Filters
Step3: First Convolutional Layer Filters
Step4: Alrighty then... this looks like absolute garbage to me, i.e., the untrained eye. None of this particularly looks like anything. I'm trying to look for edges and whatnot, and I can make some out if I squint my eyes, but honestly it all just looks like static... Will the second level layers shed some more light into the meaning of life?
|
<ASSISTANT_TASK:>
Python Code:
import cv2
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
# TFlearn libraries
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
# sentdex's code to build the neural net using tflearn
# Input layer --> conv layer w/ max pooling --> conv layer w/ max pooling --> fully connected layer --> output layer
convnet = input_data(shape = [None, 91, 91, 1], name = 'input')
convnet = conv_2d(convnet, 32, 10, activation = 'relu', name = 'conv_1')
convnet = max_pool_2d(convnet, 2, name = 'max_pool_1')
convnet = conv_2d(convnet, 64, 10, activation = 'relu', name = 'conv_2')
convnet = max_pool_2d(convnet, 2, name = 'max_pool_2')
convnet = fully_connected(convnet, 1024, activation = 'relu', name = 'fully_connected_1')
convnet = dropout(convnet, 0.8, name = 'dropout_1')
convnet = fully_connected(convnet, 2, activation = 'softmax', name = 'fully_connected_2')
convnet = regression(convnet, optimizer = 'sgd', learning_rate = 0.01, loss = 'categorical_crossentropy', name = 'targets')
# Define and load CNN
model = tflearn.DNN(convnet)
model.load('model_4_epochs_0.03_compression_99.6_named.tflearn')
import six
def display_convolutions(model, layer, padding=4, filename=''):
if isinstance(layer, six.string_types):
vars = tflearn.get_layer_variables_by_name(layer)
variable = vars[0]
else:
variable = layer.W
data = model.get_weights(variable)
# N is the total number of convolutions
N = data.shape[2] * data.shape[3]
print('There are {} filters'.format(N))
# Ensure the resulting image is square
filters_per_row = int(np.ceil(np.sqrt(N)))
# Assume the filters are square
filter_size = data.shape[0]
# Size of the result image including padding
result_size = filters_per_row * (filter_size + padding) - padding
# Initialize result image to all zeros
result = np.zeros((result_size, result_size))
# Tile the filters into the result image
filter_x = 0
filter_y = 0
for n in range(data.shape[3]):
for c in range(data.shape[2]):
if filter_x == filters_per_row:
filter_y += 1
filter_x = 0
for i in range(filter_size):
for j in range(filter_size):
result[filter_y * (filter_size + padding) + i, filter_x * (filter_size + padding) + j] = \
data[i, j, c, n]
filter_x += 1
# Normalize image to 0-1
min = result.min()
max = result.max()
result = (result - min) / (max - min)
# Plot figure
plt.figure(figsize=(10, 10))
plt.axis('off')
plt.imshow(result, cmap='gray', interpolation='nearest')
# Save plot if filename is set
if filename != '':
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
plt.show()
# Display first convolutional layer filters (32 filters)
display_convolutions(model, 'conv_1')
# Display first convolutional layer filters ( filters)
display_convolutions(model, 'conv_2', filename = 'hello')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And what do they look like?
Step2: Things you'll note
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
sorted(plt.style.available)
def make_title(s):
return s.replace('-', ' ').replace('_', ' ').title()
def make_plots(style_str=None):
fig, axes_array = plt.subplots(2, 2)
((ax1, ax2), (ax3, ax4)) = axes_array
# sine and cosine line plots
x = np.arange(10)
y_sin = np.sin(x)
y_cos = np.cos(x)
ax1.plot(x, y_sin)
ax1.plot(x, y_cos)
ax1.set_title('sine and cosine line plots')
# a random scatter plot
points1 = np.random.rand(20,2)
points2 = np.random.rand(20,2)
ax2.scatter(points1[:, 0], points1[:, 1])
ax2.scatter(points2[:, 0], points1[:, 1])
ax2.set_title('a random scatter plot')
# a random bar graph
width = 0.4
middles = np.arange(1, 6)
heights1 = np.random.rand(5)
heights2 = np.random.rand(5)
ax3.bar(middles - width, heights1, width)
ax3.bar(middles, heights2, width)
ax3.set_title('a random bar graph')
# a random pie chart
values = np.random.rand(5)
frac_values = values/values.sum()
labels = ['A', 'B', 'C', 'D', 'E']
ax4.pie(frac_values, labels=labels)
ax4.set_title('a random pie chart')
# draw them!
fig.suptitle("{} (\'{}\')".format(make_title(style_str), style_str) if style_str else 'Default Settings', fontsize=20)
fig.subplots_adjust(hspace=0.3)
fig.set_size_inches(10, 10)
plt.show()
make_plots()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 21. ใซใใดใชๅใๅซใ่กใๆฝๅบ
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import json
def get_article(title):
for line in open('jawiki-country.json', 'r'):
data = json.loads(line)
if data['title'] == title:
return data['text'].split('\n')
England = get_article('ใคใฎใชใน')
print(type(England), England)
categorys
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define AOI
Step2: Build Request
Step3: Search Planet API
Step4: In processing the items to scenes, we are only using a small subset of the product metadata.
Step5: Investigate Landsat Scenes
Step6: Show Landsat 8 Footprints on Map
Step7: This AOI is located in a region covered by 3 different path/row tiles. This means there is 3x the coverage than in regions only covered by one path/row tile. This is particularly lucky!
Step8: It looks like the collection period is 16 days, which lines up with the Landsat 8 mission description.
Step9: So the tiles that are in the same path are very close (24sec) together from the same day. Therefore, we would want to only use one tile and pick the best image.
Step10: What about overlap? We really only want images that overlap over 20% of the AOI.
Step11: Ideally, PS scenes have daily coverage over all regions. How many days have PS coverage and how many PS scenes were taken on the same day?
Step12: Looks like the multiple collects on the same day are just a few minutes apart. They are likely crossovers between different PS satellites. Cool! Since we only want to us one PS image for a crossover, we will chose the best collect for days with multiple collects.
Step13: Now that we have the crossovers, what we are really interested in is the IDs of the landsat and PS scenes, as well as how much they overlap the AOI.
Step14: Next, we filter to overlaps that cover a significant portion of the AOI.
Step15: Browsing through the crossovers, we see that in some instances, multiple crossovers take place on the same day. Really, we are interested in 'unique crossovers', that is, crossovers that take place on unique days. Therefore, we will look at the concurrent crossovers by day.
Step16: There are 6 unique crossovers between Landsat 8 and PS that cover over 90% of our AOI between January and August in 2017. Not bad! That is definitely enough to perform comparison.
|
<ASSISTANT_TASK:>
Python Code:
# Notebook dependencies
from __future__ import print_function
import datetime
import json
import os
import ipyleaflet as ipyl
import ipywidgets as ipyw
from IPython.core.display import HTML
from IPython.display import display
import pandas as pd
from planet import api
from planet.api import filters
from shapely import geometry as sgeom
aoi = {u'geometry': {u'type': u'Polygon', u'coordinates': [[[-121.3113248348236, 38.28911976564886], [-121.3113248348236, 38.34622533958], [-121.2344205379486, 38.34622533958], [-121.2344205379486, 38.28911976564886], [-121.3113248348236, 38.28911976564886]]]}, u'type': u'Feature', u'properties': {u'style': {u'opacity': 0.5, u'fillOpacity': 0.2, u'noClip': False, u'weight': 4, u'color': u'blue', u'lineCap': None, u'dashArray': None, u'smoothFactor': 1, u'stroke': True, u'fillColor': None, u'clickable': True, u'lineJoin': None, u'fill': True}}}
json.dumps(aoi)
# define the date range for imagery
start_date = datetime.datetime(year=2017,month=1,day=1)
stop_date = datetime.datetime(year=2017,month=8,day=23)
# filters.build_search_request() item types:
# Landsat 8 - 'Landsat8L1G'
# Sentinel - 'Sentinel2L1C'
# PS Orthotile = 'PSOrthoTile'
def build_landsat_request(aoi_geom, start_date, stop_date):
query = filters.and_filter(
filters.geom_filter(aoi_geom),
filters.range_filter('cloud_cover', lt=5),
# ensure has all assets, unfortunately also filters 'L1TP'
# filters.string_filter('quality_category', 'standard'),
filters.range_filter('sun_elevation', gt=0), # filter out Landsat night scenes
filters.date_range('acquired', gt=start_date),
filters.date_range('acquired', lt=stop_date)
)
return filters.build_search_request(query, ['Landsat8L1G'])
def build_ps_request(aoi_geom, start_date, stop_date):
query = filters.and_filter(
filters.geom_filter(aoi_geom),
filters.range_filter('cloud_cover', lt=0.05),
filters.date_range('acquired', gt=start_date),
filters.date_range('acquired', lt=stop_date)
)
return filters.build_search_request(query, ['PSOrthoTile'])
print(build_landsat_request(aoi['geometry'], start_date, stop_date))
print(build_ps_request(aoi['geometry'], start_date, stop_date))
def get_api_key():
return os.environ['PL_API_KEY']
# quick check that key is defined
assert get_api_key(), "PL_API_KEY not defined."
def create_client():
return api.ClientV1(api_key=get_api_key())
def search_pl_api(request, limit=500):
client = create_client()
result = client.quick_search(request)
# note that this returns a generator
return result.items_iter(limit=limit)
items = list(search_pl_api(build_ps_request(aoi['geometry'], start_date, stop_date)))
print(len(items))
# uncomment below to see entire metadata for a PS orthotile
# print(json.dumps(items[0], indent=4))
del items
items = list(search_pl_api(build_landsat_request(aoi['geometry'], start_date, stop_date)))
print(len(items))
# uncomment below to see entire metadata for a landsat scene
# print(json.dumps(items[0], indent=4))
del items
def items_to_scenes(items):
item_types = []
def _get_props(item):
props = item['properties']
props.update({
'thumbnail': item['_links']['thumbnail'],
'item_type': item['properties']['item_type'],
'id': item['id'],
'acquired': item['properties']['acquired'],
'footprint': item['geometry']
})
return props
scenes = pd.DataFrame(data=[_get_props(i) for i in items])
# acquired column to index, it is unique and will be used a lot for processing
scenes.index = pd.to_datetime(scenes['acquired'])
del scenes['acquired']
scenes.sort_index(inplace=True)
return scenes
scenes = items_to_scenes(search_pl_api(build_landsat_request(aoi['geometry'],
start_date, stop_date)))
# display(scenes[:1])
print(scenes.thumbnail.tolist()[0])
del scenes
landsat_scenes = items_to_scenes(search_pl_api(build_landsat_request(aoi['geometry'],
start_date, stop_date)))
# How many Landsat 8 scenes match the query?
print(len(landsat_scenes))
def landsat_scenes_to_features_layer(scenes):
features_style = {
'color': 'grey',
'weight': 1,
'fillColor': 'grey',
'fillOpacity': 0.15}
features = [{"geometry": r.footprint,
"type": "Feature",
"properties": {"style": features_style,
"wrs_path": r.wrs_path,
"wrs_row": r.wrs_row}}
for r in scenes.itertuples()]
return features
def create_landsat_hover_handler(scenes, label):
def hover_handler(event=None, id=None, properties=None):
wrs_path = properties['wrs_path']
wrs_row = properties['wrs_row']
path_row_query = 'wrs_path=={} and wrs_row=={}'.format(wrs_path, wrs_row)
count = len(scenes.query(path_row_query))
label.value = 'path: {}, row: {}, count: {}'.format(wrs_path, wrs_row, count)
return hover_handler
def create_landsat_feature_layer(scenes, label):
features = landsat_scenes_to_features_layer(scenes)
# Footprint feature layer
feature_collection = {
"type": "FeatureCollection",
"features": features
}
feature_layer = ipyl.GeoJSON(data=feature_collection)
feature_layer.on_hover(create_landsat_hover_handler(scenes, label))
return feature_layer
# Initialize map using parameters from above map
# and deleting map instance if it exists
try:
del fp_map
except NameError:
pass
zoom = 6
center = [38.28993659801203, -120.14648437499999] # lat/lon
# Create map, adding box drawing controls
# Reuse parameters if map already exists
try:
center = fp_map.center
zoom = fp_map.zoom
print(zoom)
print(center)
except NameError:
pass
# Change tile layer to one that makes it easier to see crop features
# Layer selected using https://leaflet-extras.github.io/leaflet-providers/preview/
map_tiles = ipyl.TileLayer(url='http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png')
fp_map = ipyl.Map(
center=center,
zoom=zoom,
default_tiles = map_tiles
)
label = ipyw.Label(layout=ipyw.Layout(width='100%'))
fp_map.add_layer(create_landsat_feature_layer(landsat_scenes, label)) # landsat layer
fp_map.add_layer(ipyl.GeoJSON(data=aoi)) # aoi layer
# Display map and label
ipyw.VBox([fp_map, label])
def time_diff_stats(group):
time_diff = group.index.to_series().diff() # time difference between rows in group
stats = {'median': time_diff.median(),
'mean': time_diff.mean(),
'std': time_diff.std(),
'count': time_diff.count(),
'min': time_diff.min(),
'max': time_diff.max()}
return pd.Series(stats)
landsat_scenes.groupby(['wrs_path', 'wrs_row']).apply(time_diff_stats)
def find_closest(date_time, data_frame):
# inspired by:
# https://stackoverflow.com/questions/36933725/pandas-time-series-join-by-closest-time
time_deltas = (data_frame.index - date_time).to_series().reset_index(drop=True).abs()
idx_min = time_deltas.idxmin()
min_delta = time_deltas[idx_min]
return (idx_min, min_delta)
def closest_time(group):
'''group: data frame with acquisition time as index'''
inquiry_date = datetime.datetime(year=2017,month=3,day=7)
idx, _ = find_closest(inquiry_date, group)
return group.index.to_series().iloc[idx]
# for accurate results, we look at the closest time for each path/row tile to a given time
# using just the first entry could result in a longer time gap between collects due to
# the timing of the first entries
landsat_scenes.groupby(['wrs_path', 'wrs_row']).apply(closest_time)
all_ps_scenes = items_to_scenes(search_pl_api(build_ps_request(aoi['geometry'], start_date, stop_date)))
# How many PS scenes match query?
print(len(all_ps_scenes))
all_ps_scenes[:1]
def aoi_overlap_percent(footprint, aoi):
aoi_shape = sgeom.shape(aoi['geometry'])
footprint_shape = sgeom.shape(footprint)
overlap = aoi_shape.intersection(footprint_shape)
return overlap.area / aoi_shape.area
overlap_percent = all_ps_scenes.footprint.apply(aoi_overlap_percent, args=(aoi,))
all_ps_scenes = all_ps_scenes.assign(overlap_percent = overlap_percent)
all_ps_scenes.head()
print(len(all_ps_scenes))
ps_scenes = all_ps_scenes[all_ps_scenes.overlap_percent > 0.20]
print(len(ps_scenes))
# ps_scenes.index.to_series().head()
# ps_scenes.filter(items=['id']).groupby(pd.Grouper(freq='D')).agg('count')
# Use PS acquisition year, month, and day as index and group by those indices
# https://stackoverflow.com/questions/14646336/pandas-grouping-intra-day-timeseries-by-date
daily_ps_scenes = ps_scenes.index.to_series().groupby([ps_scenes.index.year,
ps_scenes.index.month,
ps_scenes.index.day])
daily_count = daily_ps_scenes.agg('count')
daily_count.index.names = ['y', 'm', 'd']
# How many days is the count greater than 1?
daily_multiple_count = daily_count[daily_count > 1]
print('Out of {} days of coverage, {} days have multiple collects.'.format( \
len(daily_count), len(daily_multiple_count)))
daily_multiple_count.head()
def scenes_and_count(group):
entry = {'count': len(group),
'acquisition_time': group.index.tolist()}
return pd.DataFrame(entry)
daily_count_and_scenes = daily_ps_scenes.apply(scenes_and_count)
# need to rename indices because right now multiple are called 'acquired', which
# causes a bug when we try to run the query
daily_count_and_scenes.index.names = ['y', 'm', 'd', 'num']
multiplecoverage = daily_count_and_scenes.query('count > 1')
multiplecoverage.query('m == 7') # look at just occurrence in July
def find_crossovers(acquired_time, landsat_scenes):
'''landsat_scenes: pandas dataframe with acquisition time as index'''
closest_idx, closest_delta = find_closest(acquired_time, landsat_scenes)
closest_landsat = landsat_scenes.iloc[closest_idx]
crossover = {'landsat_acquisition': closest_landsat.name,
'delta': closest_delta}
return pd.Series(crossover)
# fetch PS scenes
ps_scenes = items_to_scenes(search_pl_api(build_ps_request(aoi['geometry'],
start_date, stop_date)))
# for each PS scene, find the closest Landsat scene
crossovers = ps_scenes.index.to_series().apply(find_crossovers, args=(landsat_scenes,))
# filter to crossovers within 1hr
concurrent_crossovers = crossovers[crossovers['delta'] < pd.Timedelta('1 hours')]
print(len(concurrent_crossovers))
concurrent_crossovers
def get_crossover_info(crossovers, aoi):
def get_scene_info(acquisition_time, scenes):
scene = scenes.loc[acquisition_time]
scene_info = {'id': scene.id,
'thumbnail': scene.thumbnail,
# we are going to use the footprints as shapes so convert to shapes now
'footprint': sgeom.shape(scene.footprint)}
return pd.Series(scene_info)
landsat_info = crossovers.landsat_acquisition.apply(get_scene_info, args=(landsat_scenes,))
ps_info = crossovers.index.to_series().apply(get_scene_info, args=(ps_scenes,))
footprint_info = pd.DataFrame({'landsat': landsat_info.footprint,
'ps': ps_info.footprint})
overlaps = footprint_info.apply(lambda x: x.landsat.intersection(x.ps),
axis=1)
aoi_shape = sgeom.shape(aoi['geometry'])
overlap_percent = overlaps.apply(lambda x: x.intersection(aoi_shape).area / aoi_shape.area)
crossover_info = pd.DataFrame({'overlap': overlaps,
'overlap_percent': overlap_percent,
'ps_id': ps_info.id,
'ps_thumbnail': ps_info.thumbnail,
'landsat_id': landsat_info.id,
'landsat_thumbnail': landsat_info.thumbnail})
return crossover_info
crossover_info = get_crossover_info(concurrent_crossovers, aoi)
print(len(crossover_info))
significant_crossovers_info = crossover_info[crossover_info.overlap_percent > 0.9]
print(len(significant_crossovers_info))
significant_crossovers_info
def group_by_day(data_frame):
return data_frame.groupby([data_frame.index.year,
data_frame.index.month,
data_frame.index.day])
unique_crossover_days = group_by_day(significant_crossovers_info.index.to_series()).count()
print(len(unique_crossover_days))
print(unique_crossover_days)
# https://stackoverflow.com/questions/36006136/how-to-display-images-in-a-row-with-ipython-display
def make_html(image):
return '<img src="{0}" alt="{0}"style="display:inline;margin:1px"/>' \
.format(image)
def display_thumbnails(row):
print(row.name)
display(HTML(''.join(make_html(t)
for t in (row.ps_thumbnail, row.landsat_thumbnail))))
_ = significant_crossovers_info.apply(display_thumbnails, axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pre-Processing
Step2: Due to memory and time constraints we'll use our quick and dirty tokenizer
Step3: First get the sentences
Step4: Now the words
Step5: We'll double check that we don't have any empty sentences
Step6: We should now have a list of lists with sentences and words
Step7: Word2Vec
Step8: Embeddings
Step9: gensim comes with some handy methods to analyze word relationships. similarity will give us a number from 0-1 based on how similar two words are. If this sounds like cosine similarity for words, you'd be right! It just takes the cosine similarity of these high dimensional vectors
Step10: We can also find cosine distance between two clusters of word vectors. Each cluster is measured as the mean of its words
Step11: We can find words that don't belong with doesnt_match. It finds the mean vector of the words in the list, and identifies the furthest away
Step12: The most famous implementation of this vector math is semantics. What happens if we take
Step13: Schmidt looked at words associated with male and female pronouns to investigate gender. Let's try take all the female pronouns and subtracting the male pronouns
Step14: And the opposite
Step15: How about together (genderless in Schmidt's sense)?
Step16: Homework
Step17: We need to get the vector from each word, just like above, and add that to a list
Step18: We can then calculate pairwise the cosine distance
Step19: We'll use MDS to reduce the dimensions to two
Step20: Some fancy matplotlib code...
Step21: What kinds of semantic relationships exist in the diagram above? Are there any words that seem out of place? How do you think they go there?
Step22: To load up a model, we just ask gensim. Here's a model trained on Eighteenth Century Collections Online corpus (~2500 texts) made available by Ryan Heuser
Step23: How does this differ from our novels model?
Step24: So how can we use word embeddings as features? Believe it or not, one of the most effective ways is to simply average each dimension of our embedding across all the words for a given document. Recall our w2v model for novels was trained for 100 dimensions. Creating the features for a specific document would entail first extracting the 100 dimensions for each word, then average each dimension across all words
Step25: This gives us a set X array with 100 features. We can write a function to do this for us for any given string
Step26: We can then featurize all of our documents
Step27: We can fit and score the machine learning modle just as before
Step28: What about Heuser's model?
Step29: Cool! But wait, what if we wanted to know why the model was making decisions. If we ask for the most postive coefficients
Step30: And the negative
Step31: These are the indices for the important features. But what are these features now?
|
<ASSISTANT_TASK:>
Python Code:
metadata_tb = Table.read_table('../09-Topic-Modeling/data/txtlab_Novel150_English.csv')
fiction_path = '../09-Topic-Modeling/data/txtlab_Novel150_English/'
novel_list = []
# Iterate through filenames in metadata table
for filename in metadata_tb['filename']:
# Read in novel text as single string, make lowercase
with open(fiction_path + filename, 'r') as file_in:
novel = file_in.read()
# Add novel text as single string to master list
novel_list.append(novel)
from nltk.tokenize import sent_tokenize
def fast_tokenize(text):
# Iterate through text removing punctuation characters
no_punct = "".join([char for char in text if char not in punctuation])
# Split text over whitespace into list of words
tokens = no_punct.split()
return tokens
sentences = [sentence for novel in novel_list for sentence in sent_tokenize(novel)]
words_by_sentence = [fast_tokenize(sentence.lower()) for sentence in sentences]
words_by_sentence = [sentence for sentence in words_by_sentence if sentence != []]
words_by_sentence[:2]
model = gensim.models.Word2Vec(words_by_sentence, size=100, window=5, \
min_count=5, sg=0, alpha=0.025, iter=5, batch_words=10000)
model['whale']
model.similarity('sense','sensibility')
model.n_similarity(['sense','sensibility'],['whale','harpoon'])
model.doesnt_match(['pride','prejudice', 'harpoon'])
model.most_similar(positive=['woman', 'king'], negative=['man'])
model.most_similar(positive=['she','her','hers','herself'], negative=['he','him','his','himself'])
model.most_similar(positive=['he','him','his','himself'], negative=['she','her','hers','herself'])
model.most_similar(positive=['she','her','hers','herself','he','him','his','himself'], topn=50)
her_tokens = [token for token,weight in model.most_similar(positive=['she','her','hers','herself'], \
negative=['he','him','his','himself'], topn=50)]
vectors = [model[word] for word in her_tokens]
from sklearn.metrics import pairwise
dist_matrix = pairwise.pairwise_distances(vectors, metric='cosine')
from sklearn.manifold import MDS
mds = MDS(n_components = 2, dissimilarity='precomputed')
embeddings = mds.fit_transform(dist_matrix)
_, ax = plt.subplots(figsize=(10,10))
ax.scatter(embeddings[:,0], embeddings[:,1], alpha=0)
for i in range(len(vectors)):
ax.annotate(her_tokens[i], ((embeddings[i,0], embeddings[i,1])))
model.wv.save_word2vec_format('word2vec.txtalb_Novel150_English.txt')
ecco_model = gensim.models.KeyedVectors.load_word2vec_format('data/word2vec.ECCO-TCP.txt')
ecco_model.most_similar(positive=['woman', 'king'], negative=['man'])
ecco_model.most_similar(positive=['she','her','hers','herself'], negative=['he','him','his','himself'])
from nltk.corpus import movie_reviews
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.utils import shuffle
reviews = [movie_reviews.raw(fileid) for fileid in movie_reviews.fileids()]
judgements = [movie_reviews.categories(fileid)[0] for fileid in movie_reviews.fileids()]
np.random.seed(0)
X, y = shuffle(reviews, judgements, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=50)
# get tfidf values
tfidf = TfidfVectorizer()
tfidf.fit(X)
X_train_transformed = tfidf.transform(X_train)
X_test_transformed = tfidf.transform(X_test)
# build and test logit
logit_class = LogisticRegression(penalty='l2', C=1000)
logit_model = logit_class.fit(X_train_transformed, y_train)
logit_model.score(X_test_transformed, y_test)
np.mean([model[w] for w in fast_tokenize(X[0]) if w in model], axis=0)
def w2v_featurize(document, model):
return np.mean([model[w] for w in fast_tokenize(document) if w in model], axis=0)
X_train_w2v = [w2v_featurize(d, model) for d in X_train]
X_test_w2v = [w2v_featurize(d, model) for d in X_test]
logit_class = LogisticRegression(random_state=0, penalty='l2', C=1000)
logit_model = logit_class.fit(X_train_w2v, y_train)
logit_model.score(X_test_w2v, y_test)
X_train_w2v = [w2v_featurize(d, ecco_model) for d in X_train]
X_test_w2v = [w2v_featurize(d, ecco_model) for d in X_test]
logit_class = LogisticRegression(random_state=0, penalty='l2', C=1000)
logit_model = logit_class.fit(X_train_w2v, y_train)
logit_model.score(X_test_w2v, y_test)
np.argsort(logit_model.coef_[0])[-10:]
np.argsort(logit_model.coef_[0])[:10]
X_train_transformed
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Za zaฤetek povsem sledimo korakom, ki smo jih naredili โna rokeโ. Povzamimo โalgoritemโ
Step2: Vkljuฤimo izpis formul v lepลกi obliki, ki ga omogoฤa SymPy.
Step3: Od tu naprej postane precej komplicirano, kako reลกitve programsko izluลกฤiti iz zadnjega rezultata. ฤe nas zanimajo le reลกitve, lahko zgornji postopek izpustimo in preprosto uporabimo funkcijo solve.
Step4: Grafiฤna reลกitev
Step5: Naloga
|
<ASSISTANT_TASK:>
Python Code:
import sympy as sym
x = sym.symbols("x") # spremenljivka x je matematiฤni simbol
enacba = sym.Eq(x+2/x,3)
enacba
sym.init_printing() # lepลกi izpis formul
enacba
# vse ฤlene damo na levo stran in pomnoลพimo z x
leva = (enacba.lhs - enacba.rhs)*x
leva
# levo stran razpiลกemo/zmnoลพimo
leva = sym.expand(leva)
leva
# levo stran faktoriziramo
leva = sym.factor(leva)
leva
# reลกitve enaฤbe najlaลพje dobimo s funkcijo solve
resitve = sym.solve(enacba)
resitve
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
t = np.arange(-1,3,0.01) # zaporedje x-ov, v katerih bomo tabelirali funkcijo
leva_f = sym.lambdify(x,enacba.lhs) # lambdify iz leve strani enaฤbe naredi python funkcijo, ki jo uporabimo na t
desna_f = sym.lambdify(x,enacba.rhs) # podobno za desno stran (rhs - right hand side, lhs - left hand side)
plt.plot(t,leva_f(t)) # leva stran /funkcija leva_f deluje po komponentah seznama t
plt.plot(t,[desna_f(ti) for ti in t]) # funkcija desna_t je konstanta (ลกtevilo 3) in zato ne vrne seznama iste dolลพine kot t
plt.ylim(0,5)
plt.plot(resitve,[leva_f(r) for r in resitve],'or')
plt.show()
import disqus
%reload_ext disqus
%disqus matpy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Stephen Jesse, Suhas Somnath, and Chris R. Smith,
Step1: Load pycroscopy compatible ptychography dataset
Step2: Inspect the contents of this h5 data file
Step3: Read some basic parameters for visualization
Step4: Visualize the Raw Ronchigrams
Step5: Performing Singular Value Decompostion (SVD)
Step6: Visualize the SVD results
Step7: U (Eigenvalues or loading maps)
Step8: V (Eigenvectors)
Step9: Clustering
Step10: Visualize k-means results
Step11: Visualize the hierarchical clustering
Step12: Save and close
|
<ASSISTANT_TASK:>
Python Code:
# Ensure python 3 compatibility
from __future__ import division, print_function, absolute_import
# Import necessary libraries:
import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IPython.display import display
import ipywidgets as widgets
import pycroscopy as px
# set up notebook to show plots within the notebook
% matplotlib inline
# Select a file to work on:
# h5_path = px.io_utils.uiGetFile('*.h5', 'pycroscopy formatted Ptychography dataset')
h5_path = r"\\nanophase\IFgroup\SPM software development\Raw_Data\Ptychography\20120212_21_GB.h5"
print('Working on:\n' + h5_path)
# Open the file
h5_file = h5py.File(h5_path, mode='r+')
print('Datasets and datagroups within the file:\n------------------------------------')
px.hdf_utils.print_tree(h5_file)
print('\nThe main dataset:\n------------------------------------')
print(h5_file['/Measurement_000/Channel_000/Raw_Data'])
print('\nThe ancillary datasets:\n------------------------------------')
print(h5_file['/Measurement_000/Channel_000/Position_Indices'])
print(h5_file['/Measurement_000/Channel_000/Position_Values'])
print(h5_file['/Measurement_000/Channel_000/Spectroscopic_Indices'])
print(h5_file['/Measurement_000/Channel_000/Spectroscopic_Values'])
print('\nMetadata or attributes in a datagroup\n------------------------------------')
for key in h5_file['/Measurement_000'].attrs:
print('{} : {}'.format(key, h5_file['/Measurement_000'].attrs[key]))
# Select the dataset containing the raw data to start working with:
h5_main = px.hdf_utils.getDataSet(h5_file, 'Raw_Data')[-1]
# Read some necessary parameters:
h5_pos_inds = px.hdf_utils.getAuxData(h5_main, auxDataName=['Position_Indices'])[0]
num_rows = len(np.unique(h5_pos_inds[:, 0]))
num_cols = len(np.unique(h5_pos_inds[:, 1]))
h5_spec_inds = px.hdf_utils.getAuxData(h5_main, auxDataName=['Spectroscopic_Indices'])[0]
num_sensor_rows = len(np.unique(h5_spec_inds[0, :]))
num_sensor_cols = len(np.unique(h5_spec_inds[1, :]))
coarse_row = int(0.5*num_rows)
coarse_col = int(0.5*num_cols)
coarse_pos = coarse_row * num_rows + coarse_col
current_ronch = np.reshape(h5_main[coarse_pos], (num_sensor_rows, num_sensor_cols))
fig, axes = plt.subplots(ncols=2, figsize=(14,7))
axes[0].hold(True)
axes[0].set_title('Mean Response')
axes[0].imshow(np.reshape(h5_main.parent['Spectroscopic_Mean'], (num_rows, num_cols)),
cmap=px.plot_utils.cmap_jet_white_center(), origin='lower')
main_vert_line = axes[0].axvline(x=coarse_col, color='k')
main_hor_line = axes[0].axhline(y=coarse_row, color='k')
axes[1].set_title('Ronchigram at current pixel')
img_zoom = axes[1].imshow(current_ronch,cmap=px.plot_utils.cmap_jet_white_center(), origin='lower')
def move_zoom_box(coarse_row, coarse_col):
main_vert_line.set_xdata(coarse_col)
main_hor_line.set_ydata(coarse_row)
coarse_pos = coarse_row * num_rows + coarse_col
current_ronch = np.reshape(h5_main[coarse_pos], (num_sensor_rows, num_sensor_cols))
img_zoom.set_data(current_ronch)
#img_zoom.set_clim(vmax=ronch_max, vmin=ronch_min)
display(fig)
widgets.interact(move_zoom_box, coarse_row=(0, num_rows, 1),
coarse_col=(0, num_cols, 1));
# First check if SVD was already computed on this dataset:
h5_svd_group = px.hdf_utils.findH5group(h5_main, 'SVD')
if len(h5_svd_group) == 0:
print('No prior SVD results found - doing SVD now')
h5_svd_group = px.doSVD(h5_main, num_comps=256)
else:
print('Taking previous SVD results already present in file')
h5_svd_group = h5_svd_group[-1]
h5_u = h5_svd_group['U']
h5_v = h5_svd_group['V']
h5_s = h5_svd_group['S']
num_comps = 16
# Visualize variance of the principal components
fig, axes = px.plot_utils.plotScree(h5_s, title='Variance')
# Visualize the eigenvalues or loading maps from SVD:
loadings = np.reshape(h5_u[:, :num_comps], (num_rows, num_cols, -1))
fig, axes = px.plot_utils.plot_map_stack(loadings, num_comps=num_comps, heading='Eigenvalues',
cmap=px.plot_utils.cmap_jet_white_center())
# Visualize the eigenvectors from SVD:
eigenvectors = np.reshape(h5_v[:num_comps], (-1, num_sensor_rows, num_sensor_cols))
eigenvectors = np.transpose(eigenvectors, (1, 2, 0))
fig, axes = px.plot_utils.plot_map_stack(eigenvectors, num_comps=num_comps, heading='Eigenvectors',
cmap=px.plot_utils.cmap_jet_white_center())
# Attempt to find any previous computation
h5_kmeans_group = px.hdf_utils.findH5group(h5_u, 'Cluster')
if len(h5_kmeans_group) == 0:
print('No k-Means computation found. Doing K-Means now')
num_clusters = 32
num_comps_for_clustering = 128
estimator = px.Cluster(h5_u, 'KMeans', num_comps=num_comps_for_clustering, n_clusters=num_clusters)
h5_kmeans_group = estimator.do_cluster()
else:
print('Taking existing results of previous K-Means computation')
h5_kmeans_group = h5_kmeans_group[-1]
h5_labels = h5_kmeans_group['Labels']
h5_centroids = h5_kmeans_group['Mean_Response']
# In case we take existing results, we need to get these parameters
num_clusters = h5_centroids.shape[0]
num_comps_for_clustering = h5_centroids.shape[1]
label_mat = np.reshape(h5_labels, (num_rows, num_cols))
fig, axis = plt.subplots(figsize=(7,7))
axis.imshow(label_mat, cmap=px.plot_utils.cmap_jet_white_center())
axis.set_title('k-Means Cluster labels');
e_vals = np.reshape(h5_u[:, :num_comps_for_clustering],
(num_rows, num_cols, -1))
fig = px.plot_utils.plot_cluster_dendrogram(label_mat, e_vals,
num_comps_for_clustering,
num_clusters,
last=num_clusters);
h5_file.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrieving training and test data
Step2: Visualize the training data
Step3: Building the network
Step4: Training the network
Step5: Testing
|
<ASSISTANT_TASK:>
Python Code:
# Import Numpy, TensorFlow, TFLearn, and MNIST data
import numpy as np
import tensorflow as tf
import tflearn
import tflearn.datasets.mnist as mnist
# Retrieve the training and test data
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)
# Visualizing the data
import matplotlib.pyplot as plt
%matplotlib inline
# Function for displaying a training image by it's index in the MNIST set
def show_digit(index):
label = trainY[index].argmax(axis=0)
# Reshape 784 array into 28x28 image
image = trainX[index].reshape([28,28])
plt.title('Training data, index: %d, Label: %d' % (index, label))
plt.imshow(image, cmap='gray_r')
plt.show()
# Display the first (index 0) training image
show_digit(0)
# Define the neural network
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
# Include the input layer, hidden layer(s), and set how you want to train the model
net=tflearn.input_data(None,trainX.shape[1])
net=tflearn.fully_connected(net,128,activation='ReLU')
# This model assumes that your network is named "net"
model = tflearn.DNN(net)
return model
# Build the model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=20)
# Compare the labels that our model predicts with the actual labels
# Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample.
predictions = np.array(model.predict(testX)).argmax(axis=1)
# Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels
actual = testY.argmax(axis=1)
test_accuracy = np.mean(predictions == actual, axis=0)
# Print out the result
print("Test accuracy: ", test_accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I used View Source in my browser to figure out where the age range is on the page, and what it looks like. The most predictable spot, that will work on every period's page, is in the infobox. It's given as a range, in italic text, with "million years ago" right after it.
Step2: Now we have the entire text of the webpage, along with some metadata. The text is stored in r.text, and I happen to know roughly where the relevant bit of text is
Step3: We can get at that bit of text using a regular expression
Step4: And if we're really cunning, we can get the start and end ages
Step5: An exercise for you, dear reader
Step6: You should be able to call your function like this
Step7: Now we can make a function that makes the sentence we made before, calling the function you just wrote
Step8: Natural gas prices from Yahoo Finance
Step9: The symbol s we're passing is HHF17.NYM. This means
Step10: This should work
|
<ASSISTANT_TASK:>
Python Code:
url = "http://en.wikipedia.org/wiki/Jurassic" # Line 1
import requests # I don't count these lines.
r = requests.get(url) # Line 2
r.text[7400:7600] # I don't count these lines either.
import re
s = re.search(r'<i>(.+?million years ago)</i>', r.text)
text = s.group(1)
text
start, end = re.search(r'<i>([\.0-9]+)โ([\.0-9]+) million years ago</i>', r.text).groups() # Line 3
duration = float(start) - float(end) # Line 4
print("According to Wikipedia, the Jurassic lasted {:.2f} Ma.".format(duration)) # Line 5
def get_age(period):
url = # Make a URL out of a base URL and the period name
r = # Make the request.
start, end = # Provide the regex.
return float(start), float(end)
period = "Jurassic"
get_age(period)
def duration(period):
t0, t1 = get_age(period)
duration = t0 - t1
response = "According to Wikipedia, the {0} lasted {1:.2f} Ma.".format(period, duration)
return response
duration('Cretaceous')
import requests
url = "http://download.finance.yahoo.com/d/quotes.csv" # Line 6
params = {'s': 'HHG17.NYM', 'f': 'l1'} # Line 7
r = requests.get(url, params=params) # Line 8
price = float(r.text) # Line 9
print("Henry Hub price for Feb 2017: ${:.2f}".format(price)) # Line 10
import time
def get_symbol(benchmark):
# I'll help you with the time.
# We compute a time 45 days in the future for a price
future = time.gmtime(time.time() + 90*24*60*60)
month = future.tm_mon
year = future.tm_year
month_codes = ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']
# This is where you come in.
month = #### Get the appropriate code for the month.
year = #### Make a string for the year.
return benchmark + month + year + ".NYM"
get_symbol('CL')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we download the data, packed in NixIO structure, from https
Step2: The data is represented as a neo.Block with one neo.Segment inside, which contains raw neo.SpikeTrains. For more information on neo.Block, neo.Segment, and neo.SpikeTrain refer to https
Step3: Even though we see an increase of the firing rate, we cannot find a propagating activity just by looking at the raster plot above.
Step4: 2.2. Analytical probability matrix
Step5: 2.3. Joint probability matrix
Step6: 2.4. Mask matrix
Step7: 2.5. Find clusters in the mask matrix
Step8: 2.6. Sequences of synchronous events
Step9: With the sequences of synchronous events sses we found a permutation of the spiketrains that reveals the synfire chain as in the ground truth ordering in the last Figure, shown below..
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import quantities as pq
import neo
import elephant
from elephant import asset
%load_ext autoreload
plt.style.use('dark_background')
plt.rcParams['figure.autolayout'] = False
plt.rcParams['figure.figsize'] = 20, 12
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 14
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
plt.rcParams['text.latex.preamble'] = r"\usepackage{subdepth}, \usepackage{type1cm}"
plt.rcParams['mathtext.fontset'] = 'cm'
!curl https://web.gin.g-node.org/INM-6/elephant-data/raw/master/dataset-2/asset_showcase_500.nix --output asset_showcase_500.nix --location
with neo.NixIO('asset_showcase_500.nix', 'ro') as f:
block = f.read_block()
segment = block.segments[0]
spiketrains = segment.spiketrains
plt.figure()
plt.eventplot([st.magnitude for st in spiketrains], linewidths=5, linelengths=5)
plt.xlabel('time [ms]')
plt.ylabel('neuron id')
plt.title('Raw spikes')
plt.show()
# 2.1.1) create ASSET analysis object
# hint: try different bin sizes, e.g. bin_size=2.5, 3.5, 4.0 ms
asset_obj = asset.ASSET(spiketrains, bin_size=3*pq.ms)
# 2.1.2) compute the intersection matrix
imat = asset_obj.intersection_matrix()
plt.matshow(imat)
plt.colorbar();
pmat = asset_obj.probability_matrix_analytical(imat, kernel_width=50*pq.ms)
plt.matshow(pmat)
plt.colorbar();
# hint: try different filter_shapes, e.g. filter_shape=(7,3)
jmat = asset_obj.joint_probability_matrix(pmat, filter_shape=(11, 3), n_largest=3)
plt.matshow(jmat)
plt.colorbar();
# hint: try different alphas for pmat and jmat
# hint: try alphas in range [0.99, 1-1e-6]
# hint: you can call 'asset.ASSET.mask_matrices(...)' without creating the asset_obj
alpha = .99
mmat = asset_obj.mask_matrices([pmat, jmat], [alpha, alpha])
plt.matshow(mmat);
# hint: you can call asset.ASSET.cluster_matrix_entries(...) without creating the asset_obj
cmat = asset_obj.cluster_matrix_entries(mmat, max_distance=11, min_neighbors=10, stretch=5)
plt.matshow(cmat)
plt.colorbar();
sses = asset_obj.extract_synchronous_events(cmat)
sses.keys()
cluster_id = 1
cluster_chain = []
for chain in sses[cluster_id].values():
cluster_chain.extend(chain)
_, st_indices = np.unique(cluster_chain, return_index=True)
st_indices = np.take(cluster_chain, np.sort(st_indices))
reordered_sts = [spiketrains[idx] for idx in st_indices]
spiketrains_not_a_pattern = [spiketrains[idx] for idx in range(len(spiketrains))
if idx not in st_indices]
reordered_sts.extend(spiketrains_not_a_pattern)
plt.figure()
plt.eventplot([st.magnitude for st in reordered_sts], linewidths=5, linelengths=5)
plt.xlabel('time [ms]')
plt.ylabel('reordered neuron id')
plt.title('Reconstructed ordering of the neurons (y-axis) with synfire chains');
ordering_true = segment.annotations['spiketrain_ordering']
spiketrains_ordered = [spiketrains[idx] for idx in ordering_true]
plt.figure()
plt.eventplot([st.magnitude for st in spiketrains_ordered], linewidths=5, linelengths=5)
plt.xlabel('time [ms]')
plt.ylabel('neuron id')
plt.title('True (unknown) ordering of the neurons (y-axis)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set the path for storing the data-set on your computer.
Step2: Load the class-names.
Step3: Load the training-set. This returns the images, the class-numbers as integers, and the class-numbers as One-Hot encoded arrays called labels.
Step4: Load the test-set.
Step5: The CIFAR-10 data-set has now been loaded and consists of 60,000 images and associated labels (i.e. classifications of the images). The data-set is split into 2 mutually exclusive sub-sets, the training-set and the test-set.
Step6: The data dimensions are used in several places in the source-code below. They have already been defined in the cifar10 module, so we just need to import them.
Step7: The images are 32 x 32 pixels, but we will crop the images to 24 x 24 pixels.
Step8: Function used to plot 9 images in a 3x3 grid, and writing the true and predicted classes below each image.
Step9: Plot a few images to see if data is correct
Step10: The pixelated images above are what the neural network will get as input. The images might be a bit easier for the human eye to recognize if we smoothen the pixels.
Step11: Data augmentation for images
Step12: The function above is called for each image in the input batch using the following function.
Step13: In order to plot the distorted images, we create the pre-processing graph for TensorFlow, so we may execute it later.
Step14: Creating Main Processing
Step15: Creating Neural Network
Step16: Create Neural Network for Training Phase
Step17: Create the neural network to be used for training. The create_network() function returns both y_pred and loss, but we only need the loss-function during training.
Step18: Create an optimizer which will minimize the loss-function. Also pass the global_step variable to the optimizer so it will be increased by one after each iteration.
Step19: Create Neural Network for Test Phase / Inference
Step20: We then calculate the predicted class number as an integer. The output of the network y_pred is an array with 10 elements. The class number is the index of the largest element in the array.
Step21: Then we create a vector of booleans telling us whether the predicted class equals the true class of each image.
Step22: The classification accuracy is calculated by first type-casting the vector of booleans to floats, so that False becomes 0 and True becomes 1, and then taking the average of these numbers.
Step23: Saver
Step24: Getting the Weights
Step25: Using this helper-function we can retrieve the variables. These are TensorFlow objects. In order to get the contents of the variables, you must do something like
Step26: Getting the Layer Outputs
Step27: Get the output of the convoluational layers so we can plot them later.
Step28: TensorFlow Run
Step29: Restore or initialize variables
Step30: Create the directory if it does not exist.
Step31: This is the base-filename for the checkpoints, TensorFlow will append the iteration number, etc.
Step32: First try to restore the latest checkpoint. This may fail and raise an exception e.g. if such a checkpoint does not exist, or if you have changed the TensorFlow graph.
Step33: Helper-function to get a random training-batch
Step34: Function for selecting a random batch of images from the training-set.
Step35: Optimization
Step36: Plot example errors
Step37: Plot confusion matrix
Step38: Calculating classifications
Step39: Calculate the predicted class for the test-set.
Step40: Helper-functions for the classification accuracy
Step41: Helper-function for showing the performance
Step42: Helper-function for plotting convolutional weights
Step43: Helper-function for plotting the output of convolutional layers
Step44: Examples of distorted input images
Step45: Helper-function for getting an image and its class-number from the test-set.
Step46: Get an image and its true class from the test-set.
Step47: Plot 9 random distortions of the image. If you re-run this code you will get slightly different results.
Step48: Perform optimization
Step49: Results
Step50: Convolutional Weights
Step51: Plot some of the weights (or filters) for the second convolutional layer. These are apparently closer to zero than the weights for the first convolutional layers, see the lower standard deviation.
Step52: Output of convolutional layers
Step53: Plot an image from the test-set. The raw pixelated image is used as input to the neural network.
Step54: Use the raw image as input to the neural network and plot the output of the first convolutional layer.
Step55: Using the same image as input to the neural network, now plot the output of the second convolutional layer.
Step56: Predicted class-labels
Step57: Print the predicted class-label.
Step58: The predicted class-label is an array of length 10, with each element indicating how confident the neural network is that the image is the given class.
Step59: Close TensorFlow Session
|
<ASSISTANT_TASK:>
Python Code:
import cifar10
cifar10.maybe_download_and_extract()
class_names = cifar10.load_class_names()
class_names
images_train, cls_train, labels_train = cifar10.load_training_data()
images_test, cls_test, labels_test = cifar10.load_test_data()
print("Size of:")
print("- Training-set:\t\t{}".format(len(images_train)))
print("- Test-set:\t\t{}".format(len(images_test)))
from cifar10 import img_size, num_channels, num_classes
img_size_cropped = 24
def plot_images(images, cls_true, cls_pred=None, smooth=True):
assert len(images) == len(cls_true) == 9
# Create figure with sub-plots.
fig, axes = plt.subplots(3, 3)
# Adjust vertical spacing if we need to print ensemble and best-net.
if cls_pred is None:
hspace = 0.3
else:
hspace = 0.6
fig.subplots_adjust(hspace=hspace, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Interpolation type.
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
# Plot image.
ax.imshow(images[i, :, :, :],
interpolation=interpolation)
# Name of the true class.
cls_true_name = class_names[cls_true[i]]
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true_name)
else:
# Name of the predicted class.
cls_pred_name = class_names[cls_pred[i]]
xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
# Get the first images from the test-set.
images = images_test[0:9]
# Get the true classes for those images.
cls_true = cls_test[0:9]
# Plot the images and labels using our helper-function above.
plot_images(images=images, cls_true=cls_true, smooth=False)
plot_images(images=images, cls_true=cls_true, smooth=True)
x = tf.placeholder(tf.float32, shape=[None, img_size, img_size, num_channels], name='x')
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)
def pre_process_image(image, training):
# This function takes a single image as input,
# and a boolean whether to build the training or testing graph.
if training:
# For training, add the following to the TensorFlow graph.
# Randomly crop the input image.
image = tf.random_crop(image, size=[img_size_cropped, img_size_cropped, num_channels])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
# Randomly adjust hue, contrast and saturation.
image = tf.image.random_hue(image, max_delta=0.05)
image = tf.image.random_contrast(image, lower=0.3, upper=1.0)
image = tf.image.random_brightness(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.0, upper=2.0)
# Some of these functions may overflow and result in pixel
# values beyond the [0, 1] range. It is unclear from the
# documentation of TensorFlow whether this is
# intended. A simple solution is to limit the range.
# Limit the image pixels between [0, 1] in case of overflow.
image = tf.minimum(image, 1.0)
image = tf.maximum(image, 0.0)
else:
# For training, add the following to the TensorFlow graph.
# Crop the input image around the centre so it is the same
# size as images that are randomly cropped during training.
image = tf.image.resize_image_with_crop_or_pad(image,
target_height=img_size_cropped,
target_width=img_size_cropped)
return image
def pre_process(images, training):
# Use TensorFlow to loop over all the input images and call
# the function above which takes a single image as input.
images = tf.map_fn(lambda image: pre_process_image(image, training), images)
return images
distorted_images = pre_process(images=x, training=True)
def main_network(images, training):
# Wrap the input images as a Pretty Tensor object.
x_pretty = pt.wrap(images)
# Pretty Tensor uses special numbers to distinguish between
# the training and testing phases.
if training:
phase = pt.Phase.train
else:
phase = pt.Phase.infer
# Create the convolutional neural network using Pretty Tensor.
with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
y_pred, loss = x_pretty.\
conv2d(kernel=5, depth=64, name='layer_conv1', batch_normalize=True).\
max_pool(kernel=2, stride=2).\
conv2d(kernel=5, depth=64, name='layer_conv2').\
max_pool(kernel=2, stride=2).\
flatten().\
fully_connected(size=256, name='layer_fc1').\
fully_connected(size=128, name='layer_fc2').\
softmax_classifier(num_classes=num_classes, labels=y_true)
return y_pred, loss
def create_network(training):
# Wrap the neural network in the scope named 'network'.
# Create new variables during training, and re-use during testing.
with tf.variable_scope('network', reuse=not training):
# Just rename the input placeholder variable for convenience.
images = x
# Create TensorFlow graph for pre-processing.
images = pre_process(images=images, training=training)
# Create TensorFlow graph for the main processing.
y_pred, loss = main_network(images=images, training=training)
return y_pred, loss
global_step = tf.Variable(initial_value=0,
name='global_step', trainable=False)
_, loss = create_network(training=True)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss, global_step=global_step)
y_pred, _ = create_network(training=False)
y_pred_cls = tf.argmax(y_pred, dimension=1)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def get_weights_variable(layer_name):
# Retrieve an existing variable named 'weights' in the scope
# with the given layer_name.
# This is awkward because the TensorFlow function was
# really intended for another purpose.
with tf.variable_scope("network/" + layer_name, reuse=True):
variable = tf.get_variable('weights')
return variable
weights_conv1 = get_weights_variable(layer_name='layer_conv1')
weights_conv2 = get_weights_variable(layer_name='layer_conv2')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(weights_conv1).shape)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(weights_conv2).shape)
def get_layer_output(layer_name):
# The name of the last operation of the convolutional layer.
# This assumes you are using Relu as the activation-function.
tensor_name = "network/" + layer_name + "/Relu:0"
# Get the tensor with this name.
tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)
return tensor
output_conv1 = get_layer_output(layer_name='layer_conv1')
output_conv2 = get_layer_output(layer_name='layer_conv2')
session = tf.Session()
save_dir = 'checkpoints/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_path = os.path.join(save_dir, 'cifar10_cnn')
try:
print("Trying to restore last checkpoint ...")
# Use TensorFlow to find the latest checkpoint - if any.
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=save_dir)
# Try and load the data in the checkpoint.
saver.restore(session, save_path=last_chk_path)
# If we get to this point, the checkpoint was successfully loaded.
print("Restored checkpoint from:", last_chk_path)
except:
# If the above failed for some reason, simply
# initialize all the variables for the TensorFlow graph.
print("Failed to restore checkpoint. Initializing variables instead.")
session.run(tf.global_variables_initializer())
train_batch_size = 64
def random_batch():
# Number of images in the training-set.
num_images = len(images_train)
# Create a random index.
idx = np.random.choice(num_images,
size=train_batch_size,
replace=False)
# Use the random index to select random images and labels.
x_batch = images_train[idx, :, :, :]
y_batch = labels_train[idx, :]
return x_batch, y_batch
def optimize(num_iterations):
# Start-time used for printing time-usage below.
start_time = time.time()
for i in range(num_iterations):
# Get a batch of training examples.
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
x_batch, y_true_batch = random_batch()
# Put the batch into a dict with the proper names
# for placeholder variables in the TensorFlow graph.
feed_dict_train = {x: x_batch,
y_true: y_true_batch}
# Run the optimizer using this batch of training data.
# TensorFlow assigns the variables in feed_dict_train
# to the placeholder variables and then runs the optimizer.
# We also want to retrieve the global_step counter.
i_global, _ = session.run([global_step, optimizer],
feed_dict=feed_dict_train)
# Print status to screen every 100 iterations (and last).
if (i_global % 100 == 0) or (i == num_iterations - 1):
# Calculate the accuracy on the training-batch.
batch_acc = session.run(accuracy,
feed_dict=feed_dict_train)
# Print status.
msg = "Global Step: {0:>6}, Training Batch Accuracy: {1:>6.1%}"
print(msg.format(i_global, batch_acc))
# Save a checkpoint to disk every 1000 iterations (and last).
if (i_global % 1000 == 0) or (i == num_iterations - 1):
# Save all variables of the TensorFlow graph to a
# checkpoint. Append the global_step counter
# to the filename so we save the last several checkpoints.
saver.save(session,
save_path=save_path,
global_step=global_step)
print("Saved checkpoint.")
# Ending time.
end_time = time.time()
# Difference between start and end-times.
time_dif = end_time - start_time
# Print the time-usage.
print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))
def plot_example_errors(cls_pred, correct):
# This function is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the test-set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the test-set that have been
# incorrectly classified.
images = images_test[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
cls_true = cls_test[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred):
# This is called from print_test_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the test-set.
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_test, # True class for test-set.
y_pred=cls_pred) # Predicted class.
# Print the confusion matrix as text.
for i in range(num_classes):
# Append the class-name to each line.
class_name = "({}) {}".format(i, class_names[i])
print(cm[i, :], class_name)
# Print the class-numbers for easy reference.
class_numbers = [" ({0})".format(i) for i in range(num_classes)]
print("".join(class_numbers))
# Split the data-set in batches of this size to limit RAM usage.
batch_size = 256
def predict_cls(images, labels, cls_true):
# Number of images.
num_images = len(images)
# Allocate an array for the predicted classes which
# will be calculated in batches and filled into this array.
cls_pred = np.zeros(shape=num_images, dtype=np.int)
# Now calculate the predicted classes for the batches.
# We will just iterate through all the batches.
# There might be a more clever and Pythonic way of doing this.
# The starting index for the next batch is denoted i.
i = 0
while i < num_images:
# The ending index for the next batch is denoted j.
j = min(i + batch_size, num_images)
# Create a feed-dict with the images and labels
# between index i and j.
feed_dict = {x: images[i:j, :],
y_true: labels[i:j, :]}
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = session.run(y_pred_cls, feed_dict=feed_dict)
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
return correct, cls_pred
def predict_cls_test():
return predict_cls(images = images_test,
labels = labels_test,
cls_true = cls_test)
def classification_accuracy(correct):
# When averaging a boolean array, False means 0 and True means 1.
# So we are calculating: number of True / len(correct) which is
# the same as the classification accuracy.
# Return the classification accuracy
# and the number of correct classifications.
return correct.mean(), correct.sum()
def print_test_accuracy(show_example_errors=False,
show_confusion_matrix=False):
# For all the images in the test-set,
# calculate the predicted classes and whether they are correct.
correct, cls_pred = predict_cls_test()
# Classification accuracy and the number of correct classifications.
acc, num_correct = classification_accuracy(correct)
# Number of images being classified.
num_images = len(correct)
# Print the accuracy.
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, num_correct, num_images))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
def plot_conv_weights(weights, input_channel=0):
# Assume weights are TensorFlow ops for 4-dim variables
# e.g. weights_conv1 or weights_conv2.
# Retrieve the values of the weight-variables from TensorFlow.
# A feed-dict is not necessary because nothing is calculated.
w = session.run(weights)
# Print statistics for the weights.
print("Min: {0:.5f}, Max: {1:.5f}".format(w.min(), w.max()))
print("Mean: {0:.5f}, Stdev: {1:.5f}".format(w.mean(), w.std()))
# Get the lowest and highest values for the weights.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
w_min = np.min(w)
w_max = np.max(w)
abs_max = max(abs(w_min), abs(w_max))
# Number of filters used in the conv. layer.
num_filters = w.shape[3]
# Number of grids to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_filters))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid filter-weights.
if i<num_filters:
# Get the weights for the i'th filter of the input channel.
# The format of this 4-dim tensor is determined by the
img = w[:, :, input_channel, i]
# Plot image.
ax.imshow(img, vmin=-abs_max, vmax=abs_max,
interpolation='nearest', cmap='seismic')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_layer_output(layer_output, image):
# Assume layer_output is a 4-dim tensor
# e.g. output_conv1 or output_conv2.
# Create a feed-dict which holds the single input image.
# Note that TensorFlow needs a list of images,
# so we just create a list with this one image.
feed_dict = {x: [image]}
# Retrieve the output of the layer after inputting this image.
values = session.run(layer_output, feed_dict=feed_dict)
# Get the lowest and highest values.
# This is used to correct the colour intensity across
# the images so they can be compared with each other.
values_min = np.min(values)
values_max = np.max(values)
# Number of image channels output by the conv. layer.
num_images = values.shape[3]
# Number of grid-cells to plot.
# Rounded-up, square-root of the number of filters.
num_grids = math.ceil(math.sqrt(num_images))
# Create figure with a grid of sub-plots.
fig, axes = plt.subplots(num_grids, num_grids)
# Plot all the filter-weights.
for i, ax in enumerate(axes.flat):
# Only plot the valid image-channels.
if i<num_images:
# Get the images for the i'th output channel.
img = values[0, :, :, i]
# Plot image.
ax.imshow(img, vmin=values_min, vmax=values_max,
interpolation='nearest', cmap='binary')
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def plot_distorted_image(image, cls_true):
# Repeat the input image 9 times.
image_duplicates = np.repeat(image[np.newaxis, :, :, :], 9, axis=0)
# Create a feed-dict for TensorFlow.
feed_dict = {x: image_duplicates}
# Calculate only the pre-processing of the TensorFlow graph
# which distorts the images in the feed-dict.
result = session.run(distorted_images, feed_dict=feed_dict)
# Plot the images.
plot_images(images=result, cls_true=np.repeat(cls_true, 9))
def get_test_image(i):
return images_test[i, :, :, :], cls_test[i]
img, cls = get_test_image(16)
plot_distorted_image(img, cls)
# if False:
optimize(num_iterations=1000)
print_test_accuracy(show_example_errors=True,
show_confusion_matrix=True)
plot_conv_weights(weights=weights_conv1, input_channel=0)
plot_conv_weights(weights=weights_conv2, input_channel=1)
def plot_image(image):
# Create figure with sub-plots.
fig, axes = plt.subplots(1, 2)
# References to the sub-plots.
ax0 = axes.flat[0]
ax1 = axes.flat[1]
# Show raw and smoothened images in sub-plots.
ax0.imshow(image, interpolation='nearest')
ax1.imshow(image, interpolation='spline16')
# Set labels.
ax0.set_xlabel('Raw')
ax1.set_xlabel('Smooth')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
img, cls = get_test_image(16)
plot_image(img)
plot_layer_output(output_conv1, image=img)
plot_layer_output(output_conv2, image=img)
label_pred, cls_pred = session.run([y_pred, y_pred_cls],
feed_dict={x: [img]})
# Set the rounding options for numpy.
np.set_printoptions(precision=3, suppress=True)
# Print the predicted label.
print(label_pred[0])
class_names[3]
class_names[5]
# This has been commented out in case you want to modify and experiment
# with the Notebook without having to restart it.
# session.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The multi-modal LogGamma distribution is useful for stress testing the effectiveness of bounding distributions. It is defined as
Step2: We will now sample from this distribution using 'multi' and 'rslice' in $d=2$ and $d=10$ dimensions.
Step3: Now let's see how we did!
|
<ASSISTANT_TASK:>
Python Code:
# system functions that are always useful to have
import time, sys, os
import warnings
# basic numeric setup
import numpy as np
# inline plotting
%matplotlib inline
# plotting
import matplotlib
from matplotlib import pyplot as plt
# seed the random number generator
rstate = np.random.default_rng(1028)
# re-defining plotting defaults
from matplotlib import rcParams
rcParams.update({'xtick.major.pad': '7.0'})
rcParams.update({'xtick.major.size': '7.5'})
rcParams.update({'xtick.major.width': '1.5'})
rcParams.update({'xtick.minor.pad': '7.0'})
rcParams.update({'xtick.minor.size': '3.5'})
rcParams.update({'xtick.minor.width': '1.0'})
rcParams.update({'ytick.major.pad': '7.0'})
rcParams.update({'ytick.major.size': '7.5'})
rcParams.update({'ytick.major.width': '1.5'})
rcParams.update({'ytick.minor.pad': '7.0'})
rcParams.update({'ytick.minor.size': '3.5'})
rcParams.update({'ytick.minor.width': '1.0'})
rcParams.update({'font.size': 30})
import dynesty
from scipy.stats import loggamma, norm
def lng(x):
lng1 = loggamma.logpdf(x[0], c=1., loc=1./3., scale=1./30.)
lng2 = loggamma.logpdf(x[0], c=1., loc=2./3., scale=1./30.)
return np.logaddexp(lng1, lng2) + np.log(0.5)
def lnn(x):
lnn1 = norm.logpdf(x[1], loc=1./3., scale=1./30.)
lnn2 = norm.logpdf(x[1], loc=2./3., scale=1./30.)
return np.logaddexp(lnn1, lnn2) + np.log(0.5)
def lnd_i(x_i, i):
if i >= 3:
if i <= (ndim + 2) / 2.:
return loggamma.logpdf(x_i, c=1., loc=2./3., scale=1./30.)
else:
return norm.logpdf(x_i, loc=2./3., scale=1./30.)
else:
return 0.
def lnd(x):
return sum([lnd_i(x_i, i) for i, x_i in enumerate(x)])
def loglike(x):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return lng(x) + lnn(x) + lnd(x)
# define the prior transform
def prior_transform(x):
return x
# plot the log-likelihood surface
plt.figure(figsize=(10., 10.))
axes = plt.axes(aspect=1)
xx, yy = np.meshgrid(np.linspace(0., 1., 200),
np.linspace(0., 1., 200))
logL = np.array([loglike(np.array([x, y]))
for x, y in zip(xx.flatten(), yy.flatten())])
L = np.exp(logL.reshape(xx.shape))
axes.contourf(xx, yy, L, 200, cmap=plt.cm.Purples)
plt.title('Likelihood Surface')
plt.xlabel(r'$x$')
plt.ylabel(r'$y$');
ndim = 2
nlive = 250
sampler = dynesty.NestedSampler(loglike, prior_transform, ndim=ndim,
bound='multi', sample='rwalk',
walks=100, nlive=nlive, rstate=rstate)
sampler.run_nested(dlogz=0.01)
res = sampler.results
ndim = 10
nlive = 250
sampler = dynesty.NestedSampler(loglike, prior_transform, ndim=ndim,
bound='multi', sample='rwalk',
walks=100, nlive=nlive, rstate=rstate)
sampler.run_nested(dlogz=0.01)
res2 = sampler.results
from dynesty import plotting as dyplot
# plot 2-D
fig, axes = dyplot.runplot(res, color='blue',
lnz_truth=0., truth_color='black')
fig.tight_layout()
fig, axes = plt.subplots(2, 2, figsize=(14, 8))
fig, axes = dyplot.traceplot(res, truths=[[1./3., 2./3.], [1./3., 2./3.]],
quantiles=None, fig=(fig, axes))
fig.tight_layout()
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
fig, axes = dyplot.cornerplot(res, truths=[[1./3., 2./3.], [1./3., 2./3.]],
quantiles=None, fig=(fig, axes))
# plot 10-D
fig, axes = dyplot.runplot(res2, color='red',
lnz_truth=0., truth_color='black')
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some explanations. To start, let's look at the columns in this dataset
Step2: I simply used Axes3D to obtain a three-dimensional chart object. The "scatter" method takes on three dimensions (xs, ys and zs), each assigned to one of the columns of the new dataframe. The "c" parameter is the color and the "s" is the size of each point. I informed the labels of each axis and that's it! We have a 3D graph showing the spatial distribution of dropouts, with respect to the three variables.
Step3: First we'll see how this would look in 2D view
Step4: Ok ... Nothing much ... A positive non-linear correlation, right? But now, let's see this with the Z matrix included
Step5: And it gets more interesting when we overlay a prediction on the actual data. Let's use a Decision Tree Regressor to create a predictive model for this data
Step6: We plot the predictions using a star-type marker. It was very interesting, right?
Step7: We can create a color table, indexed by the percentage of scholarship
Step8: This "trick" deserves an explanation. I created a dictionary indexed by the value of the scholarship. So, we get the corresponding color code. I just need to include a column in the dataframe with that value, in order to use it in the chart. There is only one problem
Step9: Ok! There we have the color of the ball giving the fourth dimension
Step10: This shows us a curious fact. We have students with good performance (big balls) in all periods, without repeating any discipline, who left. What would have made them do this? Maybe it's financial conditions, or dissatisfaction with the course. A fact to be investigated, which was only revealed thanks to this visualization.
Step11: The columns are
Step12: We can place the point size proportional to the number of cases, increasing the size of the information
Step13: We can manipulate color and intensity to create a "heat map" of Dengue
Step14: And we will classify the largest quantities to be last
Step15: Ok! A heat map of Dengue in 2018. But something is missing right? Where's the map of Rio de Janeiro?
Step16: Now, a bit more "smart" part. I have the coordinates of the center of Rio de Janeiro (geographical center, not the city center). I will create a request to the API Static Map to download a map. You see, you have to register an API Key to use this API. I purposely omitted mine. Here you have instructions for this
Step17: Well, the map was saved, now I need to know the coordinates of the limits. The Google API only allows you to enter the center (latitude and longitude) and the dimensions of the image in pixels. But, to adjust the map to the coordinates in latitudes and longitudes, you need to know the coordinates of the image rectangle. There are several examples of how to calculate this and I use a Javascript example that I converted to Python some time ago. This calculation is based on the script from
Step18: The function "calculateBbox" returns a dictionary containing the Northeast and Southwest points, with the latitude and longitude of each one.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D # Objects for 3D charts
%matplotlib inline
df = pd.read_csv('../datasets/evasao.csv') # School dropout data I collected
df.head()
df2 = df[['periodo','repetiu','desempenho']][df.abandonou == 1]
df2.head()
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df2['periodo'],ys=df2['repetiu'],zs=df2['desempenho'], c='r',s=8)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
import numpy as np
np.random.seed(42)
X = np.linspace(1.5,3.0,num=100)
Y = np.array([x**4 + (np.random.rand()*6.5) for x in X])
Z = np.array([(X[i]*Y[i]) + (np.random.rand()*3.2) for i in range(0,100)])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(X, Y, c='b', s=20)
ax.set_xlabel('X')
ax.set_ylabel('Y')
plt.show()
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X, Y, Z, c='r',s=8)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
features = pd.DataFrame({'X':X, 'Z':Z})
labels = pd.DataFrame({'Y':Y})
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.33, random_state=42)
dtr3d = DecisionTreeRegressor(max_depth=4, random_state=42)
dtr3d.fit(X_train,y_train)
print('R2',dtr3d.score(X_train,y_train))
yhat3d = dtr3d.predict(X_test)
fig = plt.figure()
ax = ax = fig.add_subplot(111, projection='3d')
ax.scatter(X, Y, Z, c='r',s=8)
ax.scatter(X_test['X'], yhat3d, X_test['Z'], c='k', marker='*',s=100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
print(df.groupby("bolsa").count())
from decimal import Decimal
bolsas = {0.00: 'b',0.05: 'r', 0.10: 'g', 0.15: 'm', 0.20: 'y', 0.25: 'k'}
df['cor'] = [bolsas[float(round(Decimal(codigo),2))] for codigo in df['bolsa']]
df.head()
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig) # Para Matplotlib 0.99
ax.scatter(xs=df['periodo'],ys=df['repetiu'],zs=df['desempenho'], c=df['cor'],s=50)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
ax.set_zlabel('desempenho')
plt.show()
fig, ax = plt.subplots()
ax.scatter(df['periodo'],df['repetiu'], c='r',s=df['desempenho']*30)
ax.set_xlabel('periodo')
ax.set_ylabel('repetiu')
plt.show()
df_dengue = pd.read_csv('./dengue2018.csv',decimal=',', sep=';')
df_dengue.head()
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=15)
plt.show()
fig, ax = plt.subplots()
ax.scatter(df_dengue['longitude'],df_dengue['latitude'], c='r',s=5+df_dengue['quantidade'])
plt.show()
def calcular_cor(valor):
cor = 'r'
if valor <= 10:
cor = '#ffff00'
elif valor <= 30:
cor = '#ffbf00'
elif valor <= 50:
cor = '#ff8000'
return cor
df_dengue['cor'] = [calcular_cor(codigo) for codigo in df_dengue['quantidade']]
df_dengue.head()
dfs = df_dengue.sort_values(['quantidade'])
dfs.head()
fig, ax = plt.subplots()
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.show()
!pip install requests
import requests
latitude = -22.9137528
longitude = -43.526409
zoom = 10
size = 800
scale = 1
apikey = "**HERE TYPE YOUR API KEY**"
gmapas = "https://maps.googleapis.com/maps/api/staticmap?center=" + str(latitude) + "," + str(longitude) + \
"&zoom=" + str(zoom) + \
"&scale=" + str(scale) + \
"&size=" + str(size) + "x" + str(size) + "&key=" + apikey
with open('mapa.jpg', 'wb') as handle:
response = requests.get(gmapas, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
import math
_C = { 'x': 128, 'y': 128 };
_J = 256 / 360;
_L = 256 / (2 * math.pi);
def tb(a):
return 180 * a / math.pi
def sb(a):
return a * math.pi / 180
def bounds(a, b, c):
if b != None:
a = max(a,b)
if c != None:
a = min(a,c)
return a
def latlonToPt(ll):
a = bounds(math.sin(sb(ll[0])), -(1 - 1E-15), 1 - 1E-15);
return {'x': _C['x'] + ll[1] * _J,'y': _C['y'] + 0.5 * math.log((1 + a) / (1 - a)) * - _L}
def ptToLatlon(pt):
return [tb(2 * math.atan(math.exp((pt['y'] - _C['y']) / -_L)) - math.pi / 2),(pt['x'] - _C['x']) / _J]
def calculateBbox(ll, zoom, sizeX, sizeY, scale):
cp = latlonToPt(ll)
pixelSize = math.pow(2, -(zoom + 1));
pwX = sizeX*pixelSize;
pwY = sizeY*pixelSize;
return {'ne': ptToLatlon({'x': cp['x'] + pwX, 'y': cp['y'] - pwY}),'sw': ptToLatlon({'x': cp['x'] - pwX, 'y': cp['y'] + pwY})}
limites = calculateBbox([latitude,longitude],zoom, size, size, scale)
print(limites)
import matplotlib.image as mpimg
fig, ax = plt.subplots(figsize=(10, 10))
rio_mapa=mpimg.imread('./mapa.jpg')
plt.imshow(rio_mapa, extent=[limites['sw'][1],limites['ne'][1],limites['sw'][0],limites['ne'][0]], alpha=1.0)
ax.scatter(dfs['longitude'],dfs['latitude'], c=dfs['cor'],s=10+dfs['quantidade'])
plt.ylabel("Latitude", fontsize=14)
plt.xlabel("Longitude", fontsize=14)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ะขะตะฟะตัั ัะฐััะผะพััะธะผ ัะปััะฐะนะฝัะต ะทะฝะฐัะตะฝะธั ะฒ ะบะฐัะตััะฒะต ะฒัะตะผะตะฝะธ ะผะตะถะดั ะฟะตัะตะบะปััะตะฝะธัะผะธ ัะฒะตัะพัะพัะพะฒ.
Step2: ะะพัะผะพััะธะผ ะฝะฐ ะฒะปะธัะฝะธะต phaseOffset ะฝะฐ ะฒะตะปะธัะธะฝั ััะตะดะฝะตะน ัะบะพัะพััะธ.
|
<ASSISTANT_TASK:>
Python Code:
data = pd.read_table("./1.data", sep=" ")
plt.plot(data['multiplier'], data['avg_speed'], '-o')
data = pd.read_table("./2.data", sep=" ")
plt.plot(data['it'], data['avg_speed'], '-o')
data = pd.read_table("./3.data", sep=" ")
data = data.sort(columns='it')
plt.plot(data['it'], data['avg_speed'], '-o')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Show event-related fields images
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Show the head of the dataframe
Step2: What are the column names?
Step3: How many States (ST) are represented in this data set?
Step4: Get a list or array of all the states in the data set.
Step5: What are the top 5 states with the most failed banks?
Step6: What are the top 5 acquiring institutions?
Step7: How many banks has the State Bank of Texas acquired? How many of them were actually in Texas?
Step8: What is the most common city in California for a bank to fail in?
Step9: How many failed banks don't have the word "Bank" in their name?
Step10: How many bank names start with the letter 's' ?
Step11: How many CERT values are above 20000 ?
Step12: How many bank names consist of just two words? (e.g. "First Bank" , "Bank Georgia" )
Step13: Bonus
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('./banklist.csv')
# CODE HERE
df.head()
# CODE HERE
df.columns
# CODE HERE
df['ST'].unique().shape[0]
# or
# len(df['ST'].unique())
# CODE HERE
df['ST'].unique()
# CODE HERE
df['ST'].value_counts().head()
# CODE HERE
df['Acquiring Institution'].value_counts().head()
# CODE HERE
df[df['Acquiring Institution'] == 'State Bank of Texas']
len(df[df['Acquiring Institution'] == 'State Bank of Texas'])
len(df[(df['Acquiring Institution'] == 'State Bank of Texas') & (df['ST'] == 'TX')])
# CODE HERE
df[df['ST'] == 'CA']['City'].value_counts()[0]
# CODE HERE
len(df[~df['Bank Name'].str.contains('Bank')]['Bank Name'].unique())
# CODE HERE
df['Bank Name'].apply(lambda name: name[0].lower() == 's').value_counts()
# CODE HERE
sum(df['CERT'] > 20000)
# CODE HERE
sum(df['Bank Name'].apply(lambda name: len(name.split()) == 2))
# CODE HERE
sum(df['Closing Date'].apply(lambda date: date[-2:] == '08'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part One
Step2: In its first test, the new Alien Blaster 9000 takes 10 shots and hits 2 targets. Taking into account this data, what is the posterior distribution of $x$ for this missile? What is the value in the posterior with the highest probability, also known as the MAP?
Step4: Now suppose the new ultra-secret Alien Blaster 10K is being tested. In a press conference, an EDF general reports that the new design has been tested twice, taking two shots during each test. The results of the test are confidential, so the general won't say how many targets were, but they report
Step5: If we start with a uniform prior, we can see what the likelihood function looks like
Step6: A tie is most likely if they are both terrible shots or both very good.
Step7: The posterior mean and MAP are lower than in the prior.
Step8: So if we learn that the new design is "consistent", it is more likely to be consistently bad (in this case).
Step9: The answer is a value drawn from the mixture of the two distributions.
Step10: Here's what the distribution looks like.
Step11: The mean should be near 3.7. We can run this simulation more efficiently using NumPy. First we generate a sample of xs
Step12: Then for each x we generate a k
Step13: And the results look similar.
Step14: One more way to do the same thing is to make a meta-Pmf, which contains the two binomial Pmf objects
Step15: Here's how we can draw samples from the meta-Pmf
Step16: And here are the results, one more time
Step17: This result, which we have estimated three ways, is a predictive distribution, based on our uncertainty about x.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from thinkbayes2 import Hist, Pmf, Cdf, Suite, Beta
import thinkplot
prior = Beta(2, 3)
thinkplot.Pdf(prior.MakePmf())
prior.Mean()
posterior = Beta(3, 2)
posterior.Update((2, 8))
posterior.MAP()
from scipy import stats
class AlienBlaster(Suite):
def Likelihood(self, data, hypo):
Computes the likeliood of data under hypo.
data: number of shots they took
hypo: probability of a hit, p
n = data
x = hypo
# specific version for n=2 shots
likes = [x**4, (1-x)**4, (2*x*(1-x))**2]
# general version for any n shots
likes = [stats.binom.pmf(k, n, x)**2 for k in range(n+1)]
return np.sum(likes)
pmf = Beta(1, 1).MakePmf()
blaster = AlienBlaster(pmf)
blaster.Update(2)
thinkplot.Pdf(blaster)
pmf = Beta(2, 3).MakePmf()
blaster = AlienBlaster(pmf)
blaster.Update(2)
thinkplot.Pdf(blaster)
prior.Mean(), blaster.Mean()
prior.MAP(), blaster.MAP()
k = 3
n = 10
x1 = 0.3
x2 = 0.4
0.3 * stats.binom.pmf(k, n, x1) + 0.7 * stats.binom.pmf(k, n, x2)
def flip(p):
return np.random.random() < p
def simulate_shots(n, p):
return np.random.binomial(n, p)
ks = []
for i in range(1000):
if flip(0.3):
k = simulate_shots(n, x1)
else:
k = simulate_shots(n, x2)
ks.append(k)
pmf = Pmf(ks)
thinkplot.Hist(pmf)
len(ks), np.mean(ks)
xs = np.random.choice(a=[x1, x2], p=[0.3, 0.7], size=1000)
Hist(xs)
ks = np.random.binomial(n, xs)
pmf = Pmf(ks)
thinkplot.Hist(pmf)
np.mean(ks)
from thinkbayes2 import MakeBinomialPmf
pmf1 = MakeBinomialPmf(n, x1)
pmf2 = MakeBinomialPmf(n, x2)
metapmf = Pmf({pmf1:0.3, pmf2:0.7})
metapmf.Print()
ks = [metapmf.Random().Random() for _ in range(1000)]
pmf = Pmf(ks)
thinkplot.Hist(pmf)
np.mean(ks)
from thinkbayes2 import MakeMixture
mix = MakeMixture(metapmf)
thinkplot.Hist(mix)
mix.Mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Easily switch between interactive and static matplotlib plots
Step2: Rectangle region of interests.
Step3: Draw annual (ring-shaped) regions of interest
Step4: Test when there is same spacing between rings
Step5: Test when there is different spacing between rings
Step6: Test when there is no spacing between rings
Step7: Generate a ROI of Segmented Ringsยถ
Step8: find the inner and outer radius of each ring
Step9: Generate a ROI of Pies
|
<ASSISTANT_TASK:>
Python Code:
import skxray.core.roi as roi
import skxray.core.correlation as corr
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LogNorm
import xray_vision.mpl_plotting as mpl_plot
interactive_mode = False
import matplotlib as mpl
if interactive_mode:
%matplotlib notebook
else:
%matplotlib inline
backend = mpl.get_backend()
cmap='Dark2'
# Image shape which is used to determine the maximum extent of output pixel coordinates
shape = (15, 26)
# coordinates of the upper-left corner and width and height of each rectangle
roi_data = np.array(([2, 2, 6, 3], [6, 7, 8, 5], [8, 18, 5, 10]),
dtype=np.int64)
#Elements not inside any ROI are zero; elements inside each ROI are 1, 2, 3, corresponding
# to the order they are specified in coords.
label_array = roi.rectangles(roi_data, shape)
roi_inds, pixel_list = roi.extract_label_indices(label_array)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
center = (100., 100.) # center of the rings
# Image shape which is used to determine the maximum extent of output pixel coordinates
img_shape = (200, 205)
first_q = 10.0 # inner radius of the inner-most ring
delta_q = 5.0 #ring thickness
num_rings = 7 # number of Q rings
# step or spacing, spacing between rings
one_step_q = 5.0 # one spacing between rings
step_q = [2.5, 3.0, 5.8] # differnt spacing between rings
# inner and outer radius for each ring
edges = roi.ring_edges(first_q, width=delta_q, spacing=one_step_q,
num_rings=num_rings)
edges
#Elements not inside any ROI are zero; elements inside each
#ROI are 1, 2, 3, corresponding to the order they are specified in edges.
label_array = roi.rings(edges, center, img_shape)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
axes.set_title("Same spacing between rings")
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
# inner and outer radius for each ring
edges = roi.ring_edges(first_q, width=delta_q, spacing=step_q,
num_rings=4)
print("edges when there is different spacing between rings", edges)
#Elements not inside any ROI are zero; elements inside each
#ROI are 1, 2, 3, corresponding to the order they are specified in edges.
label_array = roi.rings(edges, center, img_shape)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
axes.set_title("Different spacing between rings")
axes.set_xlim(50, 150)
axes.set_ylim(50, 150)
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
# inner and outer radius for each ring
edges = roi.ring_edges(first_q, width=delta_q, num_rings=num_rings)
edges
#Elements not inside any ROI are zero; elements inside each
#ROI are 1, 2, 3, corresponding to the order they are specified in edges.
label_array = roi.rings(edges, center, img_shape)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
axes.set_title("There is no spacing between rings")
axes.set_xlim(50, 150)
axes.set_ylim(50, 150)
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
center = (75, 75) # center of the rings
#Image shape which is used to determine the maximum extent of output pixel coordinates
img_shape = (150, 140)
first_q = 5.0 # inner radius of the inner-most ring
delta_q = 5.0 #ring thickness
num_rings = 4 # number of rings
slicing = 4 # number of pie slices or list of angles in radians
spacing = 4 # margin between rings, 0 by default
# inner and outer radius for each ring
edges = roi.ring_edges(first_q, width=delta_q, spacing=spacing,
num_rings=num_rings)
edges
#Elements not inside any ROI are zero; elements inside each
#ROI are 1, 2, 3, corresponding to the order they are specified in edges.
label_array = roi.segmented_rings(edges, slicing, center,
img_shape, offset_angle=0)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
axes.set_title("Segmented Rings")
axes.set_xlim(38, 120)
axes.set_ylim(38, 120)
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
first_q = 0
# inner and outer radius for each ring
edges = roi.ring_edges(first_q, width=50, num_rings=1)
edges
slicing = 10 # number of pie slices or list of angles in radians
#Elements not inside any ROI are zero; elements inside each
#ROI are 1, 2, 3, corresponding to the order they are specified in edges.
label_array = roi.segmented_rings(edges, slicing, center,
img_shape, offset_angle=0)
# plot the figure
fig, axes = plt.subplots(figsize=(6, 5))
axes.set_title("Pies")
axes.set_xlim(20, 140)
axes.set_ylim(20, 140)
im = mpl_plot.show_label_array(axes, label_array, cmap)
plt.show()
plt.figure()
import skxray
print(skxray.__version__)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Data Pre-Processing
Step3: Task II
Step5: Target Pre-Processing
Step6: Now that we have both added an extra dimension to the input data as well as one-hot encoded the target values, let's take a look at the shapes of the data matrices.
Step7: Build an Artificial Neural Network with Convolutions and Max-Pooling
Step8: <div class="alert alert-info">
Step9: Task V
Step10: Should We Use Max-Pooling?
Step11: <div class="alert alert-info">
Step12: Task VII
Step13: CIFAR
Step14: Task VIII
Step15: Task IX
Step16: Task X
Step17: Task XI
|
<ASSISTANT_TASK:>
Python Code:
# Plots will be displaying plots within the notebook
%matplotlib notebook
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
# NumPy is a package for manipulating N-dimensional array objects
import numpy as np
# Pandas is a data analysis package
import pandas as pd
#Library To test/verify some tasks
import problem_unittests as tests # Used to test ouw anwsers
# Mnist wrapper
from keras.datasets import mnist
# Code to load the data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Print data shape
print('Shape of x_train {}'.format(x_train.shape))
print('Shape of y_train {}'.format(y_train.shape))
print('Shape of x_test {}'.format(x_train.shape))
print('Shape of y_test {}'.format(y_train.shape))
# Code to plot the 5th training sample.
fig,ax1 = plt.subplots(1,1, figsize=(7, 7))
ax1.imshow(x_train[5], cmap='gray')
title = 'Target = {}'.format(y_train[5])
ax1.set_title(title)
ax1.grid(which='Major')
ax1.xaxis.set_major_locator(MaxNLocator(28))
ax1.yaxis.set_major_locator(MaxNLocator(28))
fig.canvas.draw()
time.sleep(0.1)
def normalise_images(images):
Normalise input images.
# Normalise image here
return images
### Do *not* modify the following lines ###
tests.test_normalize_images(normalise_images)
# Normalize the data for future use
x_train = normalise_images(x_train)
x_test = normalise_images(x_test)
# Write your code here
x_train = None
x_test = None
### Do *not* modify the following lines ###
print('Shape of x_train {}'.format(x_train.shape))
print('Shape of y_train {}'.format(y_train.shape))
print('Shape of x_test {}'.format(x_test.shape))
print('Shape of y_test {}'.format(y_test.shape))
def one_hot(vector, number_classes):
Return a one-hot encoded matrix given the argument vector.
# Where we will store our one-hots
one_hot = []
# One-hot encode `vector` here
# Transform list to numpy array and return it
return np.array(one_hot)
### Do *not* modify the following line ###
tests.test_one_hot(one_hot)
# One-hot encode the MNIST target values
y_train = one_hot (y_train, 10)
y_test = one_hot(y_test, 10)
print('Shape of x_train {}'.format(x_train.shape))
print('Shape of y_train {}'.format(y_train.shape))
print('Shape of x_test {}'.format(x_train.shape))
print('Shape of y_test {}'.format(y_train.shape))
# Import Keras library
import keras
from keras.models import Model
from keras.layers import *
def net_1(sample_shape, nb_classes):
# Define the network input to have `sample_shapeยด shape
input_x = None
# Create network internals here
x = None
# Dense `nb_classes`
probabilities = Dense(nb_classes, activation='softmax')(x)
# Define the output
model = Model(inputs=input_x, outputs=probabilities)
return model
# Shape of sample
sample_shape = x_train[0].shape
# Construct net
model = net_1(sample_shape, 10)
model.summary()
# Define hyperparameters
batch_size = None
epochs = None
### Do *not* modify the following lines ###
# There is no learning rate because we are using the recommended
# values for the Adadelta optimiser more information here:
# https://keras.io/optimizers/
# We need to compile our model
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
# Train
logs = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_split=0.1)
# Plot our losses and accuracy
fig, ax = plt.subplots(1,1)
pd.DataFrame(logs.history).plot(ax=ax)
ax.grid(linestyle='dotted')
ax.legend()
plt.show()
# Assess performance
print('='*80)
print('Assesing Test dataset...')
print('='*80)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
def net_2(sample_shape, nb_classes):
# Define the network input to have `sample_shape` shape
input_x = None
# Create network internals here
x = None
# Dense number_classes
probabilities = Dense(nb_classes, activation='softmax')(x)
# Define the output
model = Model(inputs=input_x, outputs=probabilities)
return model
# Shape of sample
sample_shape = x_train[0].shape
# Construct net
model = net_2(sample_shape, 10)
model.summary()
# Define hyperparameters
batch_size = None
epochs = None
### Do *not* modify the following lines ###
# As always we need to compile our model
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
# Train
LOGS = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_split = 0.1,)
# Plot our losses and accuracy
fig, ax = plt.subplots(1,1)
pd.DataFrame(logs.history).plot(ax=ax)
ax.grid(linestyle='dotted')
ax.legend()
fig.canvas.draw()
# Assess performance
print('='*80)
print('Assesing Test dataset...')
print('='*80)
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
from keras.datasets import cifar10
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
target_2_class = {0:'airplane',
1:'automobile',
2:'bird',
3:'cat',
4:'deer',
5:'dog',
6:'frog',
7:'horse',
8:'ship',
9:'truck'}
# Code to plot the 5th training sample.
fig,ax1 = plt.subplots(1,1, figsize=(7,7))
ax1.imshow(x_train[5])
target = y_train[5][0]
title = 'Target is {} - Class {}'.format(target_2_class[target],target )
ax1.set_title(title)
ax1.grid(which='Major')
ax1.xaxis.set_major_locator(MaxNLocator(32))
ax1.yaxis.set_major_locator(MaxNLocator(32))
fig.canvas.draw()
time.sleep(0.1)
print('Shape of x_train {}'.format(x_train.shape))
print('Shape of y_train {}'.format(y_train.shape))
print('Shape of x_test {}'.format(x_train.shape))
print('Shape of y_test {}'.format(y_train.shape))
y_train = None
y_test = None
### Do *not* modify the following line ###
# Print data sizes
print('Shape of x_train {}'.format(x_train.shape))
print('Shape of y_train {}'.format(y_train.shape))
print('Shape of x_test {}'.format(x_train.shape))
print('Shape of y_test {}'.format(y_train.shape))
x_train = None
x_test = None
# Shape of samples
sample_shape = x_train[0].shape
# Construct net
model = None
model.summary()
# We need to compile our model network:
model.compile(loss='categorical_crossentropy',
optimizer='Adam',
metrics=['accuracy'])
# Build the code within this cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Why Python for Scientific Computing?
Step2: SciPy Stack<br>
Step3: Statistical Analysis
Step4: Data Visualization
Step5: Integrated Development Environment
|
<ASSISTANT_TASK:>
Python Code:
import this
print("Hello, World!")
# Import relevant libraries from the SciPy Stack
import numpy as np
# Specify parametrization
num_agents = 1000
num_covars = 3
betas_true = np.array([0.22, 0.30, -0.1]).T
# Set a seed to ensure recomputability in light of randomness
np.random.seed(4292367295)
# Sample exogenous agent characteristics from a uniform distribution in
# a given shape
X = np.random.rand(num_agents, num_covars)
# Sample random disturbances from a standard normal distribution and rescale
eps = np.random.normal(scale=0.1, size=num_agents)
# Construct endogenous agent characteristic
Y = np.dot(X, betas_true) + eps
# Import relevant libraries from the SciPy Stack
import statsmodels.api as sm
# Specify and fit the model
rslt = sm.OLS(Y, X).fit()
# Provide some summary information
print(rslt.summary())
# Import relevant libraries from the SciPy Stack
import matplotlib.pyplot as plt
# Initialize canvas
ax = plt.figure(figsize=(12, 8)).add_subplot(111, axisbg='white')
# Plot actual and fitted values
ax.plot(np.dot(X, rslt.params), Y, 'o', label='True')
ax.plot(np.dot(X, rslt.params), rslt.fittedvalues, 'r--.', label="Predicted")
# Set axis labels and ranges
ax.set_xlabel(r'$X\hat{\beta}$', fontsize=20)
ax.set_ylabel(r'$Y$', fontsize=20)
# Remove first element on y-axis
ax.yaxis.get_major_ticks()[0].set_visible(False)
# Add legend
plt.legend(loc='upper center', bbox_to_anchor=(0.50, -0.10),
fancybox=False, frameon=False, shadow=False, ncol=2, fontsize=20)
# Add title
plt.suptitle('Synthetic Sample', fontsize=20)
# Save figure
plt.savefig('images/scatterplot.png', bbox_inches='tight', format='png')
from IPython.display import Image
Image(filename='images/scatterplot.png', width=700, height=700)
import urllib; from IPython.core.display import HTML
HTML(urllib.urlopen('http://bit.ly/1K5apRH').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Also we can found the main parameters of the chicane with chicane_RTU(yoke_len, dip_dist, r, type)
Step2: <a id='compression'></a>
Step3: Now we update cavities parameters in the lattice
Step4: Generate electron beam
Step5: Tracking up to undulator
Step6: <a id='coherent'></a>
Step7: Beam after undulator.
Step8: Electron trajectories
|
<ASSISTANT_TASK:>
Python Code:
# To activate interactive matplolib in notebook
# %matplotlib notebook
from ocelot import *
from ocelot.gui import *
import time
#Initial Twiss parameters
tws0 = Twiss()
tws0.beta_x = 29.171
tws0.beta_y = 29.171
tws0.alpha_x = 10.955
tws0.alpha_y = 10.955
tws0.gamma_x = 4.148367385417024
tws0.gamma_y = 4.148367385417024
tws0.E = 0.005
# Drifts
D0 = Drift(l=3.52)
D1 = Drift(l=0.3459)
D2 = Drift(l=0.2043)
D3 = Drift(l=0.85)
D4 = Drift(l=0.202)
D5 = Drift(l=0.262)
D6 = Drift(l=2.9)
D8 = Drift(l=1.8)
D9 = Drift(l=0.9)
D11 = Drift(l=1.31)
D12 = Drift(l=0.81)
D13 = Drift(l=0.50)
D14 = Drift(l=1.0)
D15 = Drift(l=1.5)
D18 = Drift(l=0.97)
D19 = Drift(l=2.3)
D20 = Drift(l=2.45)
# Quadrupoles
q1 = Quadrupole(l=0.3, k1=-1.537886, eid='Q1')
q2 = Quadrupole(l=0.3, k1=1.435078, eid='Q2')
q3 = Quadrupole(l=0.2, k1=1.637, eid='Q3')
q4 = Quadrupole(l=0.2, k1=-2.60970, eid='Q4')
q5 = Quadrupole(l=0.2, k1=3.4320, eid='Q5')
q6 = Quadrupole(l=0.2, k1=-1.9635, eid='Q6')
q7 = Quadrupole(l=0.2, k1=-0.7968, eid='Q7')
q8 = Quadrupole(l=0.2, k1=2.7285, eid='Q8')
q9 = Quadrupole(l=0.2, k1=-3.4773, eid='Q9')
q10 = Quadrupole(l=0.2, k1=0.780, eid='Q10')
q11 = Quadrupole(l=0.2, k1=-1.631, eid='Q11')
q12 = Quadrupole(l=0.2, k1=1.762, eid='Q12')
q13 = Quadrupole(l=0.2, k1=-1.8, eid='Q13')
q14 = Quadrupole(l=0.2, k1=1.8, eid='Q14')
q15 = Quadrupole(l=0.2, k1=-1.8, eid='Q15')
# SBends
b1 = SBend(l=0.501471120927, angle=0.1327297047, e2=0.132729705, tilt=1.570796327, eid='B1')
b2 = SBend(l=0.501471120927, angle=-0.1327297047, e1=-0.132729705, tilt=1.570796327, eid='B2')
b3 = SBend(l=0.501471120927, angle=-0.1327297047, e2=-0.132729705, tilt=1.570796327, eid='B3')
b4 = SBend(l=0.501471120927, angle=0.1327297047, e1=0.132729705, tilt=1.570796327, eid='B4')
# Cavitys
c1 = Cavity(l=1.0377, v=0.01815975, freq=1300000000.0, eid='C1')
c3 = Cavity(l=0.346, v=0.0024999884, phi=180.0, freq=3900000000.0, eid='C3')
und = Undulator(lperiod=0.2, nperiods=20, Kx=30)
start_und = Marker()
end = Marker()
# Lattice
cell = (D0, c1, D1, c1, D1, c1, D1, c1, D1, c1, D1, c1, D1, c1, D1, c1, D2, q1, D3,
q2, D4, c3, D5, c3, D5, c3, D5, c3, D5, c3, D5, c3, D5, c3, D5, c3, D6, q3, D6,
q4, D8, q5, D9, q6, D9, q7, D11, q8, D12, q9, D13, b1, D14, b2, D15, b3, D14, b4, D13,
q10, D9, q11, D18, q12, D19, q13, D19, q14, D19, q15, D20, start_und, und, D14, end)
lat = MagneticLattice(cell, stop=start_und)
tws = twiss(lat, tws0)
plot_opt_func(lat, tws, legend=False, fig_name=100)
plt.show()
from ocelot.utils import *
R56, T566, U5666, Sref = chicane_RTU(yoke_len=0.5, dip_dist=D14.l * np.cos(b1.angle), r=b1.l/b1.angle, type="c")
print("bunch compressor R56 = ", R56, " m")
import scipy.optimize
# M*a = b
k = 2*np.pi/3e8*1.3e9
n = 3
M = np.array([[1, 0, 1, 0],
[0, -k, 0, -(n*k)],
[-k**2, 0, -(n*k)**2, 0],
[0, k**3, 0, (n*k)**3]])
b = np.array([125, -1300, 0, 0])
def F(x):
V1 = x[0]
phi1 = x[1]
V13 = x[2]
phi3 = x[3]
V = np.array([V1*np.cos(phi1*np.pi/180),
V1*np.sin(phi1*np.pi/180),
V13*np.cos(phi3*np.pi/180),
V13*np.sin(phi3*np.pi/180)]).T
return np.dot(M, V) - b
x = scipy.optimize.broyden1(F, [150, 10, 20, 190])
V1, phi1, V13, phi13 = x
print("V1 = ", V1, " MeV")
print("phi1 = ", phi1)
print("V13 = ", V13, " MeV")
print("phi13 = ", phi13)
# type new parameters,
# NOTE in OCELOT cavity voltage in [GeV] so to traslate calculated voltage we need factor 1/1000
# and we have 8 cavities for main RF module and linearizer
c1.v = V1/8/1000
c1.phi = phi1
c3.v = V13/8/1000
c3.phi = phi13
# and update lattice
lat.update_transfer_maps()
np.random.seed(10)
parray = generate_parray(sigma_x=0.0001, sigma_px=2e-05, sigma_y=None, sigma_py=None,
sigma_tau=0.001, sigma_p=0.0001, chirp=0.0, charge=0.5e-09,
nparticles=300, energy=0.005, tau_trunc=None)
show_e_beam(parray,nparts_in_slice=50,smooth_param=0.1, nbins_x=50, nbins_y=50, nfig=10)
plt.show()
navi = Navigator(lat)
tws_track, parray = track(lat, parray, navi)
show_e_beam(parray, nfig=201)
plt.show()
parray.E
from ocelot.rad import *
lat = MagneticLattice(cell, start=start_und, stop=end)
screen = Screen()
screen.z = 1000.0
screen.size_x = 15
screen.size_y = 15
screen.nx = 1
screen.ny = 1
screen.start_energy = 0.001 # eV
screen.end_energy = 3e-3 # eV
screen.num_energy = 1001
# to estimate radiation properties we need to create beam class
beam = Beam()
beam.E = 0.13
# NOTE: this function just estimate spontanious emmision
print_rad_props(beam, K=und.Kx, lu=und.lperiod, L=und.l, distance=screen.z)
start = time.time()
screen_i = coherent_radiation(lat, screen, parray, accuracy=1)
print()
print("time exec: ", time.time() - start, " s")
show_flux(screen_i, unit="mm", title="")
show_e_beam(parray, nfig=203)
plt.show()
n = 100
x = screen.beam_traj.x(n)
y = screen.beam_traj.y(n)
z = screen.beam_traj.z(n)
plt.title("trajectory of " + str(n)+"th particle")
plt.plot(z, x, label="X")
plt.plot(z, y, label="Y")
plt.xlabel("Z [m]")
plt.ylabel("X/Y [m]")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Amazon S3 bucket info
Step2: We are going to check if we have the right bucket and if we have the right permissions.
Step3: Prepare data
Step4: Ensure that the newly created directories containing the downloaded data are structured as shown at the beginning of this tutorial.
Step5: Prepare "list" files with train-val split
Step6: Save lst files to S3
Step7: Retrieve dataset size
Step8: This marks the end of the data preparation phase.
Step9: Apart from the above set of parameters, there are hyperparameters that are specific to the algorithm. These are
Step10: Input data specification
Step11: Start the training
Step12: The output from the above command will have the model accuracy and the time it took to run the training.
Step13: Deploy to a Sagemaker endpoint
Step14: Test the images against the endpoint
Step15: Display confusion matrix showing 'true' and 'predicted' labels
Step16: Approximate costs
Step17: Rename model to deploy to AWS DeepLens
|
<ASSISTANT_TASK:>
Python Code:
import os
import urllib.request
import boto3, botocore
import sagemaker
from sagemaker import get_execution_role
import mxnet as mx
mxnet_path = mx.__file__[ : mx.__file__.rfind('/')]
print(mxnet_path)
role = get_execution_role()
print(role)
sess = sagemaker.Session()
BUCKET = 'deeplens-<Your-Test-Bucket>'
PREFIX = 'deeplens-trash-test'
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, 'image-classification', repo_version="latest")
print (training_image)
test_data = 'TestData'
s3 = boto3.resource('s3')
object = s3.Object(BUCKET, PREFIX+"/test.txt")
try:
object.put(Body=test_data)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "AccessDenied":
#cannot write on the bucket
print("Bucket "+BUCKET+"is not writeable, make sure you have the right permissions")
else:
if e.response['Error']['Code'] == "NoSuchBucket":
#Bucket does not exist
print("Bucket"+BUCKET+" does not exist")
else:
raise
else:
print("Bucket access is Ok")
object.delete(BUCKET, PREFIX+"/test.txt")
!wget https://deeplens-public.s3.amazonaws.com/samples/deeplens-trash/trash-images.zip
!rm -rf data/ && mkdir -p data
!mkdir -p data/images
!unzip -qq trash-images.zip -d data/images
!rm trash-images.zip
import matplotlib.pyplot as plt
%matplotlib inline
def show_images(item_name, images_to_show=-1):
_im_list = !ls $IMAGES_DIR/$item_name
NUM_COLS = 3
if images_to_show == -1:
IM_COUNT = len(_im_list)
else:
IM_COUNT = images_to_show
print('Displaying images category ' + item_name + ' count: ' + str(IM_COUNT) + ' images.')
NUM_ROWS = int(IM_COUNT / NUM_COLS)
if ((IM_COUNT % NUM_COLS) > 0):
NUM_ROWS += 1
fig, axarr = plt.subplots(NUM_ROWS, NUM_COLS)
fig.set_size_inches(10.0, 10.0, forward=True)
curr_row = 0
for curr_img in range(IM_COUNT):
# fetch the url as a file type object, then read the image
f = IMAGES_DIR + item_name + '/' + _im_list[curr_img]
a = plt.imread(f)
# find the column by taking the current index modulo 3
col = curr_img % NUM_ROWS
# plot on relevant subplot
if NUM_ROWS == 1:
axarr[curr_row].imshow(a)
else:
axarr[col, curr_row].imshow(a)
if col == (NUM_ROWS - 1):
# we have finished the current row, so increment row counter
curr_row += 1
fig.tight_layout()
plt.show()
# Clean up
plt.clf()
plt.cla()
plt.close()
IMAGES_DIR = 'data/images/'
show_images("Compost", images_to_show=3)
show_images("Landfill", images_to_show=3)
show_images("Recycling", images_to_show=3)
DEST_BUCKET = 's3://'+BUCKET+'/'+PREFIX+'/images/'
!aws s3 cp --recursive data/images $DEST_BUCKET --quiet
!aws s3 ls $DEST_BUCKET
!python $mxnet_path/tools/im2rec.py --list --recursive --test-ratio=0.02 --train-ratio 0.7 trash data/images
s3train_lst = 's3://{}/{}/train_lst/'.format(BUCKET, PREFIX)
s3validation_lst = 's3://{}/{}/validation_lst/'.format(BUCKET, PREFIX)
# upload the lst files to train_lst and validation_lst channels
!aws s3 cp trash_train.lst $s3train_lst --quiet
!aws s3 cp trash_val.lst $s3validation_lst --quiet
f = open('trash_train.lst', 'r')
train_samples = sum(1 for line in f)
f.close()
f = open('trash_val.lst', 'r')
val_samples = sum(1 for line in f)
f.close()
f = open('trash_test.lst', 'r')
test_samples = sum(1 for line in f)
f.close()
print('train_samples:', train_samples)
print('val_samples:', val_samples)
print('test_samples:', test_samples)
s3_output_location = 's3://{}/{}/output'.format(BUCKET, PREFIX)
ic = sagemaker.estimator.Estimator(training_image,
role,
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
train_volume_size = 50,
train_max_run = 360000,
input_mode= 'File',
output_path=s3_output_location,
sagemaker_session=sess,
base_job_name='ic-trash')
ic.set_hyperparameters(num_layers=18,
use_pretrained_model=1,
image_shape = "3,224,224",
num_classes=3,
mini_batch_size=128,
epochs=10,
learning_rate=0.01,
top_k=2,
num_training_samples=train_samples,
resize = 224,
precision_dtype='float32')
s3images = 's3://{}/{}/images/'.format(BUCKET, PREFIX)
train_data = sagemaker.session.s3_input(s3images, distribution='FullyReplicated',
content_type='application/x-image', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3images, distribution='FullyReplicated',
content_type='application/x-image', s3_data_type='S3Prefix')
train_data_lst = sagemaker.session.s3_input(s3train_lst, distribution='FullyReplicated',
content_type='application/x-image', s3_data_type='S3Prefix')
validation_data_lst = sagemaker.session.s3_input(s3validation_lst, distribution='FullyReplicated',
content_type='application/x-image', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data,
'train_lst': train_data_lst, 'validation_lst': validation_data_lst}
ic.fit(inputs=data_channels, logs=True)
MODEL_PATH = ic.model_data
print(MODEL_PATH)
ic_infer = ic.deploy(initial_instance_count=1, instance_type='local')
object_categories = ['Compost', 'Landfill', 'Recycling']
from IPython.display import Image, display
import json
import numpy as np
def test_model():
preds = []
acts = []
num_errors = 0
with open('trash_test.lst', 'r') as f:
for line in f:
stripped_line = str(line.strip()).split("\t")
file_path = stripped_line[2]
category = int(float(stripped_line[1]))
with open(IMAGES_DIR + stripped_line[2], 'rb') as f:
payload = f.read()
payload = bytearray(payload)
ic_infer.content_type = 'application/x-image'
result = json.loads(ic_infer.predict(payload))
# the result will output the probabilities for all classes
# find the class with maximum probability and print the class index
index = np.argmax(result)
act = object_categories[category]
pred = object_categories[index]
conf = result[index]
print("Result: Predicted: {}, Confidence: {:.2f}, Actual: {} ".format(pred, conf, act))
acts.append(category)
preds.append(index)
if (pred != act):
num_errors += 1
print('ERROR on image -- Predicted: {}, Confidence: {:.2f}, Actual: {}'.format(pred, conf, act))
display(Image(filename=IMAGES_DIR + stripped_line[2], width=100, height=100))
return num_errors, preds, acts
num_errors, preds, acts = test_model()
from sklearn.metrics import confusion_matrix
import numpy as np
import itertools
COLOR = 'green'
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
plt.rcParams['xtick.color'] = COLOR
plt.rcParams['ytick.color'] = COLOR
def plot_confusion_matrix(cm, classes,
class_name_list,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.GnBu):
plt.figure(figsize=(7,7))
plt.grid(False)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]),
range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.gca().set_xticklabels(class_name_list)
plt.gca().set_yticklabels(class_name_list)
plt.ylabel('True label')
plt.xlabel('Predicted label')
def create_and_plot_confusion_matrix(actual, predicted):
cnf_matrix = confusion_matrix(actual, np.asarray(predicted),labels=range(len(object_categories)))
plot_confusion_matrix(cnf_matrix, classes=range(len(object_categories)), class_name_list=object_categories)
create_and_plot_confusion_matrix(acts, preds)
sess.delete_endpoint(ic_infer.endpoint)
print("Completed")
import glob
!rm -rf data/$PREFIX/tmp && mkdir -p data/$PREFIX/tmp
!aws s3 cp $MODEL_PATH data/$PREFIX/tmp
!tar -xzvf data/$PREFIX/tmp/model.tar.gz -C data/$PREFIX/tmp
params_file_name = glob.glob('./data/' + PREFIX + '/tmp/*.params')[0]
!mv $params_file_name data/$PREFIX/tmp/image-classification-0000.params
!tar -cvzf ./model.tar.gz -C data/$PREFIX/tmp ./image-classification-0000.params ./image-classification-symbol.json
!aws s3 cp model.tar.gz $MODEL_PATH
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can also express this query in multiple ways using aggregations
Step2: Annotate
Step3: Compare this to what we had before
Step4: Note
Step5: Select and Transmute
Step6: We can also create a new field that stores the age relative to the average. Note that new fields must be assigned a name (in this case mean_shifted_age)
Step7: transmute replaces any fields mentioned on the right-hand side with the new fields, but leaves unmentioned fields unchanged. transmute is useful for transforming data into a new form. Compare the following two snippts of code. The second is identical to the first with transmute replacing select.
Step8: Global Fields
|
<ASSISTANT_TASK:>
Python Code:
import hail as hl
hl.utils.get_movie_lens('data/')
users = hl.read_table('data/users.ht')
users.filter(users.occupation == 'programmer').count()
users.aggregate(hl.agg.filter(users.occupation == 'programmer', hl.agg.count()))
users.aggregate(hl.agg.counter(users.occupation == 'programmer'))[True]
missing_occupations = hl.set(['other', 'none'])
t = users.annotate(
cleaned_occupation = hl.if_else(missing_occupations.contains(users.occupation),
hl.missing('str'),
users.occupation))
t.show()
users.show()
users.describe()
users.select(users.sex, users.occupation).show()
mean_age = round(users.aggregate(hl.agg.stats(users.age)).mean)
users.select(users.sex, users.occupation, mean_shifted_age = users.age - mean_age).show()
missing_occupations = hl.set(['other', 'none'])
t = users.select(
cleaned_occupation = hl.if_else(missing_occupations.contains(users.occupation),
hl.missing('str'),
users.occupation))
t.show()
missing_occupations = hl.set(['other', 'none'])
t = users.transmute(
cleaned_occupation = hl.if_else(missing_occupations.contains(users.occupation),
hl.missing('str'),
users.occupation))
t.show()
t = users.annotate_globals(cohort = 5, cloudable = hl.set(['sample1', 'sample10', 'sample15']))
t.describe()
t.cloudable
hl.eval(t.cloudable)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the PDF
Step2: Get the first page
Step3: What data would we get if we used the default settings?
Step4: The default settings correctly identify the table's vertical demarcations, but don't capture the horizontal demarcations between each group of five states/territories. So
Step5: Cleaning up the data
Step6: The first row
Step7: The last
Step8: Now, let's turn those rows into dictionaries, and also convert strings-representing-numbers to the numbers themselves, e.g., "18,870" -> 18870
Step9: Now here's the first row, parsed
Step10: Sort the data
Step11: Use extract_text to extract the report month
|
<ASSISTANT_TASK:>
Python Code:
import pdfplumber
print(pdfplumber.__version__)
pdf = pdfplumber.open("../pdfs/background-checks.pdf")
p0 = pdf.pages[0]
im = p0.to_image()
im
im.reset().debug_tablefinder()
table_settings = {
"vertical_strategy": "lines",
"horizontal_strategy": "text",
"snap_y_tolerance": 5,
"intersection_x_tolerance": 15,
}
im.reset().debug_tablefinder(table_settings)
table = p0.extract_table(table_settings)
for row in table[:5]:
print(row)
core_table = table[4:4+56]
" โข ".join(core_table[0])
" โข ".join(core_table[-1])
COLUMNS = [
"state",
"permit",
"handgun",
"long_gun",
"other",
"multiple",
"admin",
"prepawn_handgun",
"prepawn_long_gun",
"prepawn_other",
"redemption_handgun",
"redemption_long_gun",
"redemption_other",
"returned_handgun",
"returned_long_gun",
"returned_other",
"rentals_handgun",
"rentals_long_gun",
"private_sale_handgun",
"private_sale_long_gun",
"private_sale_other",
"return_to_seller_handgun",
"return_to_seller_long_gun",
"return_to_seller_other",
"totals"
]
def parse_value(i, x):
if i == 0: return x
if x == "": return None
return int(x.replace(",", ""))
from collections import OrderedDict
def parse_row(row):
return {COLUMNS[i]:parse_value(i, cell)
for i, cell in enumerate(row)}
data = [ parse_row(row) for row in core_table ]
data[0]
for row in list(reversed(sorted(data, key=lambda x: x["handgun"])))[:6]:
print("{state}: {handgun:,d} handgun-only checks".format(**row))
month_crop = p0.within_bbox((0, 35, p0.width, 65))
month_crop.to_image()
month_chars = month_crop.extract_text()
month_chars
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description
Step2: Make reference to the data from the server
Step3: ReferenceSet Name (chromosome) & ReadGroupSet Reads
Step4: Functions to obtain ReadGroupSet ID by name.
Step5: Function to call multiple ReferenceSets.
Step6: Cigar-Unit interpreter function.
Step7: Variant Call Function
Step8: Pile up function
Step9: Function to calculate occurrence frequency
Step10: Precursor function
Step11: Plotting Function
Step12: Widget Interface Setup
|
<ASSISTANT_TASK:>
Python Code:
Widget()
from ga4gh.client import protocol
from ga4gh.client import client
c = client.HttpClient("http://1kgenomes.ga4gh.org")
dataset = c.search_datasets().next()
reference_set = c.search_reference_sets().next()
references = [r for r in c.search_references(reference_set_id= reference_set.id)]
contig ={}
for i in references:
contig[i.name] = str(i.id)
def get_reads_for_name(Name):
Name = str(Name)
if type(get_read_groups_by_read_group_set_name(Name)) == str:
return get_read_groups_by_read_group_set_name(Name)
else:
return [i for i in get_read_groups_by_read_group_set_name(Name)]
def read_group_set_by_name(name):
result = None
for rgs in c.search_read_group_sets(name=name, dataset_id= dataset.id):
return rgs
return result
## [name=name, dataset_id= dataset.id]
def get_read_groups_by_read_group_set_name(read_group_set_name):
if None == read_group_set_by_name(read_group_set_name):
return "Sorry, bad request for {}".format(read_group_set_name)
else:
return read_group_set_by_name(read_group_set_name).read_groups
def chrfunct(chromo):
chr1 = filter(lambda x: x.name == str(chromo), references)[0]
return chr1
def cigar_interpreter(sequence, observe, ReferBase):
# print "Sequence Val: {}".format(sequence)
# print "Observe Val: {}".format(observe)
# print "RefereBase Val: {}".format(ReferBase)
Temp = 0
BaseCounter = 0
Variant = ""
AligSeq = sequence.aligned_sequence
InterpArr = list([])
Iter = 0
type(sequence)
for i in sequence.alignment.cigar:
Length = i.operation_length
if protocol.CigarUnit.Operation.Name(i.operation) == "ALIGNMENT_MATCH":
InterpArr[len(InterpArr):len(InterpArr)+Length] = AligSeq[Temp:Temp+Length]
Temp += Length
BaseCounter += Length
elif protocol.CigarUnit.Operation.Name(i.operation) == "CLIP_SOFT":
Temp += Length
elif protocol.CigarUnit.Operation.Name(i.operation) == "DELETE":
int_iter = 0
for i in range(Length):
InterpArr[len(InterpArr) : len(InterpArr)+1] = "N"
BaseCounter += 1
int_iter += 1
if BaseCounter == observe:
Variant = ReferBase[BaseCounter:BaseCounter+int_iter]
return Variant
elif protocol.CigarUnit.Operation.Name(i.operation) == "INSERT":
for i in range(Length):
InterpArr[len(InterpArr):len(InterpArr)+1] = AligSeq[Temp : Temp+1]
Temp += 1
if (Temp == observe) and (len(InterpArr) >= Temp+Length+1):
Variant = "".join(InterpArr[Temp:Temp+Length+1])
return Variant
Iter += 1
if (Temp >= observe) and (len(sequence.alignment.cigar) == Iter) :
return InterpArr[observe]
else:
return "N"
list_of_callset_ids =[]
def find_variants(Start, End, RdGrpSetName, ChromoSm):
for variant_set in c.search_variant_sets(datasetId=dataset.id):
if variant_set.name == "phase3-release":
release = variant_set
print variant_set
for callSet in c.search_call_sets(variant_set_id= release.id, name= str(RdGrpSetName)):
mycallset = callSet
list_of_callset_ids.append(callSet.id)
for variant in c.search_variants(release.id, reference_name=ChromoSm, start=Start, end=End, call_set_ids=list_of_callset_ids):
print variant
if len(variant.alternate_bases[0]) == 1 and len(variant.reference_bases) == 1:
print "\nA VARIANT WAS FOUND"
print "Variant Name: {}, Start: {}, End: {} \nAlternate Bases: {} \nGenotypes: {}".format(str(variant.names[0]), str(variant.start), str(variant.end), str(variant.alternate_bases[0]), str(variant.calls[0].genotype))
return
return False
def pileUp(contig, position, rgset, Chromosm):
alleles = []
rgset = get_reads_for_name(rgset)
if type(rgset) != str:
for i in rgset:
for sequence in c.search_reads(read_group_ids=[i.id],start = position, end = position+1, reference_id=contig):
if sequence.alignment != None:
start = sequence.alignment.position.position
observe = position - sequence.alignment.position.position
end = start+len(sequence.aligned_sequence)
if observe > 100 or observe < 0:
continue
if len(sequence.alignment.cigar) > 1:
allele = cigar_interpreter(sequence, observe,c.list_reference_bases(chrfunct(Chromosm).id, start=start, end= end))
else:
allele = sequence.aligned_sequence[observe]
alleles.append({"allele": str(allele), "readGroupId":i.id})
return Calc_Freq(alleles)
else:
return rgset
def Calc_Freq(Test):
tot = len(Test)
AutCalc = {}
Arr = []
for i in range(tot):
if AutCalc.has_key(Test[i]["allele"]) == False and (Test[i]['allele'] != "N"):
AutCalc.setdefault(Test[i]["allele"], 1)
Arr.append(Test[i]['allele'])
else:
if Test[i]['allele'] == "N":
tot -= 1
else:
AutCalc[Test[i]["allele"]] = float(AutCalc.get(Test[i]["allele"]) + 1)
Freq = {}
print "\n{} Reads where used, to determine pile-up".format(tot)
tot = float(tot)
for i in Arr:
Freq.setdefault(i,float(AutCalc.get(i)/tot))
return Freq
def Variant_Comp(Position, ReadGroupSetName, Chromosm):
RdGrp = get_reads_for_name(ReadGroupSetName)
Chrm = contig.get(Chromosm, None)
if (Chrm != None) and type(RdGrp) != (str) :
base = c.list_reference_bases(Chrm, start = Position, end = Position+1)
var = pileUp(Chrm, Position, ReadGroupSetName, Chromosm)
return (str(base), var)
else:
if RdGrp == None:
print"Read Group Set '{}' is not in the API".format(ReadGroupSetName)
else:
print"Chromosome '{}' is not in the API".format(Chromosm)
def plot_vars(Position, RdGrpName, Chromo):
%matplotlib inline
import matplotlib.pyplot as plt
Refer, Freqs = Variant_Comp(int(Position), str(RdGrpName),str(Chromo))
labels = Freqs.keys()
sizes = Freqs.values()
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
Expl= {}
Legend = []
print "Reference Bases:", Refer
for i in labels:
if Freqs.get(i) != max(sizes):
find_variants(int(Position), int(Position)+1, str(RdGrpName), str(Chromo))
Expl.setdefault(i, .15)
Legend.append("{}: {} %".format(i, str(Freqs.get(i)*100)[:4]))
elif i == Refer:
Expl.setdefault(i,0.8)
Legend.append("{}: {} %".format(i, str(Freqs.get(i)*100)[:4]))
else:
Expl.setdefault(i,0.0)
Legend.append("{}: {} %".format(i, str(Freqs.get(i)*100)[:4]))
explode = Expl.values()
plt.pie(sizes, explode=explode, labels=labels, colors=colors,autopct='%1.1f%%', shadow=True, startangle=0)
plt.axis('equal')
plt.legend(['%s' % str(x) for x in (Legend)])
plt.show()
def Widget():
from ipywidgets import widgets
from ipywidgets import interact
from IPython.display import display
t0 = widgets.Text(value="Position Exaple: '120394'", disabled=True)
text0 = widgets.Text()
t1 = widgets.Text(value="ReadGroupName Example: 'NA19102'", disabled=True)
text1 = widgets.Text()
t2 = widgets.Text(value= "ReferenceSets Example: '1'", disabled=True)
text2 = widgets.Text()
display(t0, text0, t1, text1, t2, text2)
button = widgets.Button(description="Submit")
exit = widgets.Button(description="Exit")
display(button, exit)
def exitFunct(c):
import sys
sys.exit(["Thank you, you have exited the function"])
def Submit(sender):
Pos, RgSetNm, Chrom = text0.value, text1.value, text2.value
chr1 = chrfunct(Chrom)
print "NEXT PLOT VARS FUNCTION W/ PARAMS {}, {}, {}".format(Pos, RgSetNm, Chrom)
plot_vars(Pos, RgSetNm, Chrom)
def button_clicked(b):
print "Position: {}, ReadGrpSet: {}, Chrom: {}".format(text0.value, text1.value, text2.value)
Submit(b)
button.on_click(button_clicked)
exit.on_click(exitFunct)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Can we predict salmon populations?
Step2: To get this data into a Pandas Series, I'll also make a range of years to use as an index.
Step3: And here's the series.
Step4: Here's what it looks like
Step5: Modeling changes
Step6: We can compute relative differences by dividing by the original series elementwise.
Step7: These relative differences are observed annual net growth rates. So let's drop the 0 and save them.
Step8: A simple way to model this system is to draw a random value from this series of observed rates each year. We can use the NumPy function choice to make a random choice from a series.
Step9: Simulation
Step10: I'll create a System object with variables t_0, p_0, rates, and duration=10 years.
Step11: Write an update functon that takes as parameters pop, t, and system.
Step12: Test your update function and run it a few times
Step14: Here's a version of run_simulation that stores the results in a TimeSeries and returns it.
Step15: Use run_simulation to run generate a prediction for the next 10 years.
Step17: To get a sense of how much the results vary, we can run the model several times and plot all of the results.
Step18: The plot option alpha=0.1 makes the lines semi-transparent, so they are darker where they overlap.
Step20: The results are highly variable
Step21: Test your function by running it with iters=5.
Step22: Now we can run 1000 simulations and describe the distribution of the results.
Step23: If we substract off the initial population, we get the distribution of changes.
Step24: The median is negative, which indicates that the population decreases more often than it increases.
Step25: Or we can use mean to compute the fraction of runs where net_changes is positive.
Step26: And here's the fraction where it's negative.
Step27: So, based on observed past changes, this model predicts that the population is more likely to decrease than increase over the next 10 years, by about 2
Step28: I'll add the weights to the System object, since they are parameters of the model.
Step29: We can pass these weights as a parameter to np.random.choice (see the documentation)
Step30: Write an update function that takes the weights into account.
Step31: Use plot_many_simulations to plot the results.
Step32: Use run_many_simulations to collect the results and describe to summarize the distribution of net changes.
Step33: Does the refined model have much effect on the probability of population decline?
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
pops = [2749, 2845, 4247, 1843, 2562, 1774, 1201, 1284, 1287,
2339, 1177, 962, 1176, 2149, 1404, 969, 1237, 1615, 1201]
years = linrange(1997, 2015)
years
pop_series = TimeSeries(pops, index=years)
pop_series
def plot_population(series):
series.plot(label='Estimated population')
decorate(xlabel='Year',
ylabel='Population estimate',
title='Narraguacus River',
ylim=[0, 5000])
plot_population(pop_series)
abs_diffs = pop_series.diff()
abs_diffs
rel_diffs = abs_diffs / pop_series
rel_diffs
rates = rel_diffs.dropna()
rates
np.random.choice(rates)
t_0 = 2015
p_0 = pop_series[t_0]
system = System(t_0=t_0,
p_0=p_0,
duration=10,
rates=rates)
# Solution goes here
update_func1(p_0, t_0, system)
def run_simulation(system, update_func):
Simulate a queueing system.
system: System object
update_func: function object
t_0 = system.t_0
t_end = t_0 + system.duration
results = TimeSeries()
results[t_0] = system.p_0
for t in linrange(t_0, t_end):
results[t+1] = update_func(results[t], t, system)
return results
# Solution goes here
def plot_many_simulations(system, update_func, iters):
Runs simulations and plots the results.
system: System object
update_func: function object
iters: number of simulations to run
for i in range(iters):
results = run_simulation(system, update_func)
results.plot(color='gray', label='_nolegend',
linewidth=1, alpha=0.3)
# Solution goes here
def run_many_simulations(system, update_func, iters):
Runs simulations and report final populations.
system: System object
update_func: function object
iters: number of simulations to run
returns: series of final populations
# FILL THIS IN
# Solution goes here
run_many_simulations(system, update_func1, 5)
last_pops = run_many_simulations(system, update_func1, 1000)
last_pops.describe()
net_changes = last_pops - p_0
net_changes.describe()
np.sum(net_changes > 0)
np.mean(net_changes > 0)
np.mean(net_changes < 0)
weights = linspace(0, 1, len(rates))
weights /= sum(weights)
weights
system.weights = weights
np.random.choice(system.rates, p=system.weights)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris Dataset
Step2: Make Iris Dataset Imbalanced
Step3: Upsampling Minority Class To Match Majority
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
import numpy as np
from sklearn.datasets import load_iris
# Load iris data
iris = load_iris()
# Create feature matrix
X = iris.data
# Create target vector
y = iris.target
# Remove first 40 observations
X = X[40:,:]
y = y[40:]
# Create binary target vector indicating if class 0
y = np.where((y == 0), 0, 1)
# Look at the imbalanced target vector
y
# Indicies of each class' observations
i_class0 = np.where(y == 0)[0]
i_class1 = np.where(y == 1)[0]
# Number of observations in each class
n_class0 = len(i_class0)
n_class1 = len(i_class1)
# For every observation in class 1, randomly sample from class 0 with replacement
i_class0_upsampled = np.random.choice(i_class0, size=n_class1, replace=True)
# Join together class 0's upsampled target vector with class 1's target vector
np.concatenate((y[i_class0_upsampled], y[i_class1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 0 (Example)
Step2: Question 1
Step3: Question 2
Step4: Question 3
Step5: Question 4
Step6: Part 2
Step7: Question 6
Step8: Question 7
Step9: Question 8
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('olympics.csv', index_col=0, skiprows=1)
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold'+col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver'+col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze'+col[4:]}, inplace=True)
if col[:1]=='โ':
df.rename(columns={col:'#'+col[1:]}, inplace=True)
names_ids = df.index.str.split('\s\(') # split the index by '('
df.index = names_ids.str[0] # the [0] element is the country name (new index)
df['ID'] = names_ids.str[1].str[:3] # the [1] element is the abbreviation or ID (take first 3 characters from that)
df = df.drop('Totals')
df.head()
# You should write your whole answer within the function provided. The autograder will call
# this function and compare the return value against the correct solution value
def answer_zero():
# This function returns the row for Afghanistan, which is a Series object. The assignment
# question description will tell you the general format the autograder is expecting
return df.iloc[0]
# You can examine what your function returns by calling it in the cell. If you have questions
# about the assignment formats, check out the discussion forums for any FAQs
answer_zero()
def answer_one():
maxim = df['Gold'].max()
most_gold = df.index[df['Gold'] == maxim]
return most_gold[0]
answer_one()
def answer_two():
gold_sum_win_diff = abs(df['Gold'] - df['Gold.1'])
biggest_idx = gold_sum_win_diff.idxmax()
return biggest_idx
answer_two()
def answer_three():
new_df = df[(df['Gold'] > 0) & (df['Gold.1'] > 0)]
summer_gold = new_df['Gold']
wint_gold = new_df['Gold.1']
tot_gold = new_df['Gold.2']
measure = (summer_gold - wint_gold)/tot_gold
return measure.idxmax()
answer_three()
df.shape
def answer_four():
gold_2_val = df['Gold.2']*3
silver_2_val = df['Silver.2']*2
bronze_2_val = df['Bronze.2']*1
Points = pd.Series(gold_2_val+silver_2_val+bronze_2_val, name='Points')
return Points
#answer_four()
census_df = pd.read_csv('census.csv', encoding='iso-8859-1')
census_df.head()
df_level = census_df[census_df['SUMLEV'] == 50]
group = df_level.groupby(['STNAME']).size()
#group
def answer_five():
df_level = census_df[census_df['SUMLEV'] == 50]
group = df_level.groupby(['STNAME']).size().reset_index(name='freq')
max_count_state = group['freq'].idxmax()
return group['STNAME'][max_count_state]
answer_five()
def answer_six():
df_level = census_df[census_df['SUMLEV'] == 50]
top_count = df_level.sort_values(by=['STNAME', 'CENSUS2010POP'], ascending=False)
top_count = top_count.groupby('STNAME').head(3) #based on first 3 for each state
maxim = top_count.groupby('STNAME').sum()
maxim = maxim.sort_values(by='CENSUS2010POP', ascending=False).head(3)
return maxim.index.tolist()
answer_six()
df_county = census_df[census_df['SUMLEV'] == 50]
df_county['max'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',
'POPESTIMATE2014', 'POPESTIMATE2013',
'POPESTIMATE2012', 'POPESTIMATE2011',
'POPESTIMATE2010']].max(axis=1)
df_county['min'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',
'POPESTIMATE2014', 'POPESTIMATE2013',
'POPESTIMATE2012', 'POPESTIMATE2011',
'POPESTIMATE2010']].min(axis=1)
df_county['max_diff'] = abs(df_county['max'] - df_county['min'])
df_county.sort_values(by='max_diff', ascending=False)
df_county.head()
def answer_seven():
df_county = census_df[census_df['SUMLEV'] == 50]
df_county['max'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',
'POPESTIMATE2014', 'POPESTIMATE2013',
'POPESTIMATE2012', 'POPESTIMATE2011',
'POPESTIMATE2010']].max(axis=1)
df_county['min'] = df_county[['STNAME','CTYNAME', 'POPESTIMATE2015',
'POPESTIMATE2014', 'POPESTIMATE2013',
'POPESTIMATE2012', 'POPESTIMATE2011',
'POPESTIMATE2010']].min(axis=1)
df_county['max_diff'] = abs(df_county['max'] - df_county['min'])
df_county.sort_values(by='max_diff', ascending=False)
return df_county['CTYNAME'].iloc[0]
answer_seven()
def answer_eight():
counties_df = census_df[census_df['SUMLEV'] == 50]
ans = counties_df[((counties_df['REGION']==1)|(counties_df['REGION']==2))&(counties_df['CTYNAME']=='Washington County')&(counties_df['POPESTIMATE2015']>counties_df['POPESTIMATE2014'])][['STNAME','CTYNAME']]
return ans
answer_eight()
def answer_seven():
cty = census_df[census_df['SUMLEV'] == 50]
cty['pop_change'] = abs(cty['POPESTIMATE2015'] - cty['POPESTIMATE2014'])+abs(cty['POPESTIMATE2014'] - cty['POPESTIMATE2013'])+abs(cty['POPESTIMATE2013'] - cty['POPESTIMATE2012'])+abs(cty['POPESTIMATE2012'] - cty['POPESTIMATE2011'])+abs(cty['POPESTIMATE2011'] - cty['POPESTIMATE2010'])
maxim = max(cty['pop_change'])
ans = cty['CTYNAME'][cty['pop_change']==maxim].tolist()
return ans[0]
answer_seven()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting access to the spreadsheet (Method 1)
Step3: Calculations on the spreadsheet
Step4: cloning repos
Step7: rtc covers
Step8: Getting covers into repos
Step9: Getting covers into repos
Step10: Generalized structure for iterating over repos
Step11: Travis work
Step12: Calculating URL for latest epub for each repo
Step13: Misc
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import gspread
import json
# rtc50_settings.py holds URL related to the Google spreadsheet
from rtc50_settings import (g_name, g_url, g_key)
OFFICIAL_NAME_KEY = "Name in rtc/books.json, Official Name"
g_url
import json
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
json_key = json.load(open('nypl50-gspread.json'))
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'], scope)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(g_key).sheet1
wks
# load the rows
all_rows = wks.get_all_values()
# use pandas
import pandas as pd
from pandas import (DataFrame, Series)
df = DataFrame(all_rows[2:], columns=all_rows[1])
df.index = df.index + 3 # shift index to match spreadsheet
df.head()
# what does the status mean?
df[df["RTC Good Cover?"] == 'YES']["Gitenberg Status"].value_counts()
# "RTC 1st GEN" vs "RTC 2nd GEN" vs "RTC Other Gen"
len(df[df["RTC 1st GEN"] == 'X'])
from second_folio import all_repos
set(all_repos) - set(df['Gitenberg URL'].map(lambda u: u.split("/")[-1]))
# just forget the whole part 1/part 2 -- figure out what repos are ready to work on haven't yet been done.
from github3 import (login, GitHub)
from github_settings import (username, password, token)
from itertools import islice
#gh = login(username, password=password)
gh = login(token=token)
def asciidoc_in_repo_root(repo, branch ='master'):
return list of asciidocs in the root of repo
repo_branch = repo.branch(branch)
tree = repo.tree(repo_branch.commit.sha)
return [hash_.path
for hash_ in tree.tree
if hash_.path.endswith('.asciidoc')]
def asciidocs_for_repo_name(repo_name):
try:
repo = gh.repository('GITenberg', repo_name)
return asciidoc_in_repo_root(repo, branch ='master')
except Exception as e:
return e
# copy CSV to clipboard, making it easy to then paste it to
# https://github.com/gitenberg-dev/Second-Folio/blob/master/Gitenberg%20Book%20List.csv
df.to_clipboard(encoding='utf-8', sep=',', index=False)
import sh
sh.cd("/Users/raymondyee/C/src/gitenberg/Adventures-of-Huckleberry-Finn_76")
len(sh.grep (sh.git.remote.show("-n", "origin"),
"git@github-GITenberg:GITenberg/Adventures-of-Huckleberry-Finn_76.git", _ok_code=[0,1]))
from itertools import islice
from second_folio import (repo_cloned, clone_repo)
repos_to_clone = (repo for repo in all_repos if not repo_cloned(repo)[0])
for (i, repo) in enumerate(islice(repos_to_clone,None)):
output = clone_repo(repo)
print ("\r{} {} {} {}".format(i, repo, output, repo_cloned(repo)))
import requests
# rtc_covers_url = "https://raw.githubusercontent.com/plympton/rtc/master/books.json"
rtc_covers_url = "https://raw.githubusercontent.com/rdhyee/rtc/master/books.json"
covers = requests.get(rtc_covers_url).json()
covers_dict = dict([(cover['name'], cover) for cover in covers])
len(covers_dict)
# Are there any covers in the Plymton repo not in books.json?
df
# not that many covers
cover_names = set([cover['name'] for cover in covers])
# read off cover_map from df
# http://stackoverflow.com/a/9762084
cover_map = dict(filter(lambda (k,v):v,
[tuple(x) for x in df[['Title', OFFICIAL_NAME_KEY]].values]
))
repos_with_covers = list(df[df[OFFICIAL_NAME_KEY].map(lambda s: len(s) > 0)]['Gitenberg URL'].map(lambda u: u.split("/")[-1]))
repos_with_covers
len(repos_with_covers)
# compare list of cover repo data in
# https://raw.githubusercontent.com/gitenberg-dev/Second-Folio/master/covers_data.json
import requests
r = requests.get("https://raw.githubusercontent.com/gitenberg-dev/Second-Folio/master/covers_data.json")
covers_data = r.json()
covers_data
set(repos_with_covers) - set([c['GitHub repo'] for c in covers_data])
set([c['GitHub repo'] for c in covers_data]) - set(repos_with_covers)
mapped_cover_names = set(cover_map.values())
(cover_names - mapped_cover_names), (mapped_cover_names - cover_names)
[v['covers'][0]['filename']
for (k,v) in covers_dict.items()]
# Have I downloaded all the big images?
img_path = "/Users/raymondyee/Downloads/rtc/full_images/"
cover_names
from IPython.display import HTML
from PIL import Image
import jinja2
# let's look at the images for the books
# https://cdn.rawgit.com/plympton/rtc/master/rtc_books/
# https://cdn.rawgit.com/plympton/rtc/master/rtc_books_resized/
cover_url_base = "https://cdn.rawgit.com/plympton/rtc/master/rtc_books/"
small_cover_url_base = "https://cdn.rawgit.com/plympton/rtc/master/rtc_books_resized/"
from functools import partial
def cover_name_to_url(name, reduce=False):
if reduce:
url = small_cover_url_base
else:
url = cover_url_base
cover = covers_dict.get(name)
if cover is not None:
return url + cover['covers'][0]["filename"]
else:
return None
def cover_name_to_artist(name):
cover = covers_dict.get(name)
if cover is not None:
return cover['covers'][0]['artist']
else:
return None
cover_name_to_url_small = partial(cover_name_to_url, reduce=True)
cover_name_to_url_big = partial(cover_name_to_url, reduce=False)
df['big_image_url'] = rtc50[OFFICIAL_NAME_KEY].map(cover_name_to_url_big)
df['small_image_url'] = rtc50[OFFICIAL_NAME_KEY].map(cover_name_to_url_small)
rtc50 = df[df["RTC Good Cover?"] == 'YES']
rtc50.head()
results = rtc50[['Title', 'big_image_url']].T.to_dict().values()
results
from IPython.display import HTML
from jinja2 import Template
CSS =
<style>
.wrap img {
margin-left: 0px;
margin-right: 0px;
display: inline-block;
width: 100px;
}
</style>
IMAGES_TEMPLATE = CSS +
<div class="wrap">
{% for item in items %}<img title="{{item.Title}}" src="{{item.}}"/>{% endfor %}
</div>
template = Template(IMAGES_TEMPLATE)
HTML(template.render(items=results))
#let's try looping over all the images and convert them to png
def download_big_images(limit=None):
import requests
from itertools import islice
import os
img_path = "/Users/raymondyee/Downloads/rtc/full_images/"
for image in islice(results,limit):
# check whether we have the cover already before downloading
url = image['big_image_url']
if url is not None:
name = url.split("/")[-1]
dest_path = img_path + name
if not os.path.exists(dest_path):
print (dest_path)
content = requests.get(url).content
with open(img_path + name, "wb") as f:
f.write(content)
download_big_images(limit=None)
# loop over jpg and convert to png
def convert_small_jpg_to_png():
import glob
for f in glob.glob("/Users/raymondyee/Downloads/rtc/resized/*.jp*g"):
im = Image.open(f)
png_path = ".".join(f.split(".")[:-1]) + ".png"
if im.mode not in ["1", "L", "P", "RGB", "RGBA"]:
im = im.convert("RGB")
im.save(png_path)
# image types in covers
from collections import Counter
map(lambda p: p.split(".")[-1], reduce(lambda x,y: x+y, [[c['filename'] for c in cover['covers'] for cover in covers]]))
df['GitHub repo']=df['Gitenberg URL'].map(lambda u:u.split("/")[-1])
import numpy as np
df['local_big_file'] = df['big_image_url'].map(lambda u:u.split("/")[-1] if u is not None and u is not np.nan else None)
df['cover_artist'] = df[OFFICIAL_NAME_KEY].map(cover_name_to_artist)
df['local_big_file'] = df['local_big_file'].map(lambda s: re.sub(r".png$", ".jpg", s) if s is not None else s)
def write_covers_data():
import json
rtc50 = df[df["RTC Good Cover?"] == 'YES']
covers_data_path = "/Users/raymondyee/C/src/gitenberg/Second-Folio/covers_data.json"
with open(covers_data_path, "w") as f:
f.write(json.dumps(rtc50[['GitHub repo', 'cover_artist', 'local_big_file']].T.to_dict().values(),
sort_keys=True,indent=2, separators=(',', ': ')))
#write_covers_data()
import sh
# can control tty settings for sh
# https://amoffat.github.io/sh/#ttys
sh.ls("-1", _tty_out=False ).split()
dict([(c['GitHub repo'], c) for c in covers_data])
s = Series(repos)
list(s.map(lambda r: covers_data_dict.get(r).get('local_big_file')))
import os
import os
import shutil
import sh
from pandas import DataFrame, Series
from itertools import islice
REPOS_LIST = "/Users/raymondyee/C/src/gitenberg/Second-Folio/list_of_repos.txt"
COVERS_DATA = "/Users/raymondyee/C/src/gitenberg/Second-Folio/covers_data.json"
GITENBERG_DIR = "/Users/raymondyee/C/src/gitenberg/"
COVERS_DIR = "/Users/raymondyee/Downloads/rtc/full_images/"
repos=open(REPOS_LIST).read().strip().split("\n")
covers_data = json.loads(open(COVERS_DATA).read())
covers_data_dict = dict([(c['GitHub repo'], c) for c in covers_data])
def copy_repo_cover(repo, dry_run=False):
cover_file = covers_data_dict[repo]['local_big_file']
local_cover_path = None
copied = False
if cover_file is not None:
local_cover_path = os.path.join(COVERS_DIR, cover_file)
destination = os.path.join(GITENBERG_DIR, repo, "cover.jpg")
if os.path.exists(local_cover_path) and not os.path.exists(destination):
if not dry_run:
shutil.copyfile(local_cover_path, destination)
copied = True
return (local_cover_path, copied)
def git_pull(repo):
sh.cd(os.path.join(GITENBERG_DIR, repo))
return sh.git("pull")
def copy_covers():
for (i,repo) in enumerate(islice(repos,None)):
print (i, repo, copy_repo_cover(repo, dry_run=False))
copy_covers()
# let's compute missing covers
for repo in repos:
destination = os.path.join(GITENBERG_DIR, repo, "cover.jpg")
if not os.path.exists(destination):
print (repo)
def git_add_cover_commit_push(repo):
cover_path = os.path.join(GITENBERG_DIR, repo, "cover.jpg")
try:
if os.path.exists(cover_path):
sh.cd(os.path.join(GITENBERG_DIR, repo))
print ("add")
sh.git("add", "cover.jpg")
print ("commit")
try:
sh.git("commit", "-m", "add cover.jpg")
except:
pass
print ("push")
sh.git.push()
else:
return None
except Exception as e:
return e
for (i,repo) in enumerate(islice(repos,None)):
print (i, repo)
print (git_add_cover_commit_push(repo))
def git_pull(repo):
sh.cd(os.path.join(GITENBERG_DIR, repo))
sh.git("pull")
for (i,repo) in enumerate(islice(repos,None)):
print (i, repo)
git_pull(repo)
sh.cd("/Users/raymondyee/C/src/gitenberg/Jane-Eyre_1260")
sh.git.push()
import os
import json
import shutil
import sh
import yaml
from pandas import DataFrame, Series
from itertools import islice
REPOS_LIST = "/Users/raymondyee/C/src/gitenberg/Second-Folio/list_of_repos.txt"
GITENBERG_DIR = "/Users/raymondyee/C/src/gitenberg/"
METADATA_DIR = "/Users/raymondyee/C/src/gitenberg-dev/giten_site/metadata"
COVERS_DATA = "/Users/raymondyee/C/src/gitenberg/Second-Folio/covers_data.json"
import os
import glob
import sh
import yaml
from gitenberg import metadata
import jinja2
from second_folio import (GITENBERG_DIR,
all_repos,
apply_to_repos,
travis_setup_releases,
git_pull,
apply_travis,
finish_travis,
repo_is_buildable,
has_travis_with_gitenberg_build,
slugify,
latest_epub,
repo_version
)
from github_settings import (username, password)
from itertools import islice, izip
repos = list(islice(all_repos,0,None))
# determine which repos are "buildable"
repos_statues = list(izip(repos,
apply_to_repos(repo_is_buildable, repos=repos),
apply_to_repos(has_travis_with_gitenberg_build, repos=repos) ))
# we want to apply travis to repos that are buildable but that don't yet have .travis.yml.
repos_to_travisfy = [repo[0] for repo in repos_statues if repo[1] and not repo[2]]
repos_to_travisfy
from __future__ import print_function
for (i, repo) in enumerate(islice(repos_to_travisfy,1)):
print (i, repo, end=" ")
r1 = apply_travis(repo, username, password, overwrite_travis=True)
print (r1, end=" ")
if r1:
r2 = finish_travis(repo)
print (r2)
else:
print ("n/a")
import requests
url = "https://github.com/GITenberg/Adventures-of-Huckleberry-Finn_76/releases/download/0.0.17/Adventures-of-Huckleberry-Finn.epub"
r = requests.head(url)
r.status_code, r.url, r.url == url
epub_urls = list(apply_to_repos(latest_epub))
import pandas as pd
from pandas import DataFrame
df = DataFrame({'epub_url':epub_urls}, index=all_repos)
df.head()
df['status_code'] = df.epub_url.apply(lambda u: requests.head(u).status_code)
df['buildable'] = df.index.map(repo_is_buildable)
k = df[df['status_code'] == 404][:3]
k['status_code'] = k.epub_url.apply(lambda u: requests.head(u).status_code)
k.head()
df.ix[k.index] = k
list(k.epub_url)
df[(df.status_code == 404) & (df.buildable)]
df['metadata_url'] = df.index.map(lambda repo: "https://github.com/GITenberg/{}/raw/master/metadata.yaml".format(repo))
print "\n".join(list(df[~df.buildable].index))
df.buildable.value_counts()
df.to_clipboard(index_label="repo", sep=',')
df[df.status_code == 404]
md.metadata.get("title"), md.metadata.get("_repo"), md.metadata.get("_version"),
# figure out what elements to feed to template
#
from jinja2 import Environment, PackageLoader, meta
env = Environment()
parsed_content = env.parse(template)
meta.find_undeclared_variables(parsed_content)
import sh
sh.cd("/Users/raymondyee/C/src/gitenberg/Adventures-of-Huckleberry-Finn_76")
sh.travis.whoami()
from itertools import islice, izip
repos = list(islice(second_folio.all_repos,1,None))
list(izip(repos, apply_to_repos(git_mv_asciidoc, repos=repos)))
list(apply_to_repos(git_pull))
from __future__ import print_function
line = "Detected repository as GITenberg/Don-Quixote_996, is this correct? |yes| "
"Detected" in line
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Who is involved ?
Step3: The best is to have one function drawing the same plots for different input dataframes, i.e. for different conditions.
Step4: We can see that men are a lot more involved in accidents than women. Let's try to understand this distribution by looking at the distribution for pedestrians, drivers and passagers separately.
Step5: The ratio men/women displays some unexpected variations below 40. This can maybe be explained by different activities and lifestyles at different ages for men and women. The fact that the ratio is less than one above 60 could be due to women living longer than men in average. Numbers on the population by gender of France would be useful here.
Step6: The dataframe contains all vehicle types, including bicycles which is why we have accidents involving drivers under the age of 16. This analysis of the drivers will be easier to interpret if we separate also by vehicle type. However the vehicle type is not stored in the Users dataframe. We need to import the vehicle type from the Vehicles dataframe.
Step7: Cars are identified with the number 7 in the vehicle type column.
Step8: Passengers involved in accidents
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
# Provides better color palettes
import seaborn as sns
from pandas import DataFrame,Series
import matplotlib as mpl
import matplotlib.pyplot as plt
# Command to display the plots in the iPython Notebook
%matplotlib inline
import matplotlib.patches as mpatches
mpl.style.use('seaborn-whitegrid')
plt.style.use('seaborn-talk')
# Extract the list of colors from this style for later use
cycl = mpl.rcParams['axes.prop_cycle']
colors = cycl.by_key()['color']
from CSVtoSQLconverter import load_sql_engine
sqlEngine = load_sql_engine()
users_df = pd.read_sql_query('''SELECT * FROM safer_roads.users
WHERE `accident id` > 2014E+8 AND `accident id` < 2015E+8''',
sqlEngine)
users_df.head()
def men_women_dist(in_df, title=''):
Plot the age distribution for men and women, and the ratio.
# Extract age for each gender
women_sr = in_df[in_df['sex']==2]['age'].dropna()
men_sr = in_df[in_df['sex']==1]['age'].dropna()
# Bin the ages in bins with 2 years of width
bin_range = range(0,102,2)
women_bins = pd.cut(women_sr, bin_range,right=False).value_counts().sort_index()
men_bins = pd.cut(men_sr, bin_range,right=False).value_counts().sort_index()
# Calculate the ratio between men and women
ratio_bins = men_bins/women_bins
error_bins = ratio_bins * np.sqrt((men_bins+women_bins)/(men_bins*women_bins))
# Plot the stacked distribution of age for men and women
plt.subplots(figsize=(14,8))
plt.subplot(211)
plt.hist([women_sr,men_sr],bins=50,range=(0,100) , stacked=True, normed = False, histtype='bar',
rwidth=1.0, color=[colors[1],colors[0]]);
plt.title(title)
plt.ylabel('counts')
leg_men = mpatches.Patch(color=colors[0], label='Men')
leg_women = mpatches.Patch(color=colors[1], label='Women')
plt.legend(handles=[leg_men,leg_women],frameon=True)
# Plot the ratio men/women
plt.subplot(212)
plt.errorbar(range(1,100,2),ratio_bins,xerr=1.0,yerr=error_bins, fmt='none')
plt.xlim([0,100])
plt.xlabel('age in years')
plt.ylabel('ratio men/women')
men_women_dist(users_df, 'All accidents, all users')
men_women_dist(users_df[(users_df['user type'] == 3) | (users_df['user type'] == 4)] , 'All accidents, pedestrians')
men_women_dist(users_df[users_df['user type'] == 1] , 'All accidents, drivers')
vehicles_df = pd.read_sql_query('''SELECT * FROM safer_roads.vehicles
WHERE `accident id` > 2014E+8 AND `accident id` < 2015E+8''',
sqlEngine)
users_vt_df = users_df.merge(vehicles_df[['vehicle type','accident id','vehicle id']],
on=['accident id','vehicle id'],how='inner')
users_vt_df.head()
men_women_dist(users_df[(users_df['user type'] == 1) & (users_vt_df['vehicle type'] == 7)] ,
'All accidents, drivers of cars')
men_women_dist(users_df[(users_df['user type'] == 2) & (users_vt_df['vehicle type'] == 7)] ,
'All accidents, passengers of cars')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But first, an aside about joining strings
Step2: Another Aside
Step3: Scraping the Faculty, how many percentage of the CJ faculty are adjunct faculty
Step4: very first task
Step5: Now, we want to make a list of dictionaries of faculty members along with their titles
Step6: String Indexing
Step7: lost count of asides
|
<ASSISTANT_TASK:>
Python Code:
!pip3 install bs4
from bs4 import BeautifulSoup
from urllib.request import urlopen
html_str = urlopen("http://static.decontextualize.com/kittens.html").read()
print(html_str)
document = BeautifulSoup(html_str,"html.parser")
type(document)
h1_tag = document.find('h1')
h1_tag.string
img_tag = document.find('img')
img_tag.string
img_tag('src')
img_tag['src']
document.find_all('img')
img_tags=document.find_all('img')
type(img_tags)
first_img = img_tags[0]
first_img['src']
second_img = img_tags[1]
second_img['src']
for item in img_tags:
print(item['src'])
h2_tags = document.find_all('h2')
for item in h2_tags:
print(item.string)
checkups = document.find_all('span',{'class':'lastcheckup'})
for item in checkups:
print(item.string)
kittens = document.find_all('div', {'class': 'kitten'})
for item in kittens:
h2_tag = item.find('h2')
print(h2_tag.string)
checkup = item.find('span')
print(checkup.string)
kittens = document.find_all('div', {'class': 'kitten'})
first_kitten = kittens[0]
first_kitten_h2 = first_kitten.find('h2')
print(first_kitten_h2.string)
planets = ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"]
separator = ","
separator.join(planets)
print("&\n".join(planets))
print("&\n".join(planets[:4]))
kittens = document.find_all('div', {'class': 'kitten'})
for item in kittens:
h2_tag = item.find('h2')
print(h2_tag.string)
a_tags = item.find_all('a') #anchor tag, ancestor
all_shows_str = [] #create a new list
for a_tag_item in a_tags:
#print("-", a_tag_item.string)
tag_str = a_tag_item.string
all_shows_str.append(tag_str)
string_with_all_show_names = ",".join(all_shows_str)
print(h2_tag.string+ ":", string_with_all_show_names)
kittens_data = list()#create an empty list
kittens = document.find_all('div', {'class': 'kitten'})
for item in kittens:
h2_tag = item.find('h2')
print(h2_tag.string)
a_tags = item.find_all('a') #anchor tag, ancestor
all_shows_str = [] #create a new list
for a_tag_item in a_tags:
#print("-", a_tag_item.string)
tag_str = a_tag_item.string
all_shows_str.append(tag_str)
#1 create a dictionary and add to it the relevant key/value pairs
#kitten_map = {}
#kitten_map["name"] = h2_tag.string
#kitten_map["tvshows"] = all_shows_str
kitten_map = {"name":h2_tag.string, "tvshows":all_shows_str }
#2 append that dictionary to the kittens_data
string_with_all_show_names = ",".join(all_shows_str)
#print(h2_tag.string+ ":", string_with_all_show_names
kittens_data
kittens_data = list()#create an empty list
kittens = document.find_all('div', {'class': 'kitten'})
for item in kittens:
h2_tag = item.find('h2')
print(h2_tag.string)
a_tags = item.find_all('a') #anchor tag, ancestor
all_shows_str = []
for a_tag_item in a_tags:
tag_str = a_tag_item.string
all_shows_str.append(tag_str)
#create a dictionary adding kittens checkups
checkup = item.find('span')# get the string with checkup.string
kittens_data.append(
{"name":h2_tag.string,
"tvshows":all_shows_str,
"last_checkup": checkup.string})
kittens_data
Our next goal is to create a data structure that looks like this:
[
{'name': 'Fluffy',
'tv shows': ['Deep Space Nine', 'Mr.Belvedere']},
{}
x = ["a", "b", "c", "d"]
x[0]
x.append("e")
len(x)
x[4]
numbers = [1,2,3,4,5,6]
# end up with: [1,4,9,16,25,36]
squared = [item * item for item in numbers]
for item in numbers:
s = item*item
squared.append(s)
squared
## Aside the Third: Making dictionaries
#declaring a dictionary
x = {'a':1, 'b':2, 'c':3}
#get a value out of a dictionary
x['a']
x.keys()
for key in x.keys():
print(key) #print out keys
# target: {1:1, 2:4, 3:9, 4:16, 5:25,...}
squares = {}
for n in range(1,11):
squares[n] = n*n
squares
squares[7]
names = ["Aaron", "Bob", "Caroline", "Daphne"]
#target: {"Aaron": 5} #show the name and how many characters each name has
name_length_map = {}#map is
for item in names:
name_length_map[item] = len(item)
name_length_map #evaluate the dictionary, Python 3 # take a list and create a new dictionary
from urllib.request import urlopen
faculty_html = urlopen("http://www.journalism.columbia.edu/page/10/10?category_ids%5B%5D=2&category_ids%5B%5D=3&category_ids%5B%5D=37").read()
document = BeautifulSoup(faculty_html, "html.parser")
document.find('h2').string
h2_tag = document.find('h2')
h2_tag.string
# this doesn't work,
ul_tag = document.find('ul', {'class': 'experts-list'})
li_tags= ul_tag.find_all('li')
for item in li_tags:
h4_tag = item.find('h4')
if h4_tag: #none counts as false in python, only proceed if we actually found a h4-tag under li tags
a_tag = h4_tag.find('a')#name of adjunct
p_tag = item.find('p', {'class':'description'})#position of adjunct
print(a_tag.string, "/", p_tag.string)
profs = []
ul_tag = document.find('ul', {'class': 'experts-list'})
li_tags= ul_tag.find_all('li')
for item in li_tags:
h4_tag = item.find('h4')
if h4_tag: #none counts as false in python, only proceed if we actually found a h4-tag under li tags
a_tag = h4_tag.find('a')
p_tag = item.find('p', {'class':'description'})
prof_map = {'name': a_tag.string, 'title': p_tag.string}
profs.append(prof_map)
profs
for item in profs:
print(item['name'])
# print all of the professors whose last name start with 'M'
m_profs = []
mcount = 0
for item in profs:
prof_name = item['name']
if prof_name[0]=='M':
print(item['name'])
mcount += 1 #mcount= mcount+1
print(mcount)
# find all of the professors listed as "Adjunct Faculty"
adjunct_profs = []
#same as where clause
mcount=0
for item in profs:
if item['title'] is not None and ("Adjunct" in item['title']):
adjunct_profs.append(item)
len(adjunct_profs)
for item in profs:
if item['title'] is not None and ("Adjunct" in item['title']: adjunct_profs.append(item)
message = "bungalow"
message[0]
message[2:6]
message[-1]
message[0:3]
message[:3]
message[4:]
message[-5:-2]
x=5
x
x = x-1
x
x -= 1
x
x *=2
x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot ratio of update norms to parameter norms across epochs for different layers
|
<ASSISTANT_TASK:>
Python Code:
tr = np.array(model.monitor.channels['valid_y_y_1_nll'].time_record) / 3600.
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(111)
ax1.plot(model.monitor.channels['valid_y_y_1_nll'].val_record)
ax1.plot(model.monitor.channels['train_y_y_1_nll'].val_record)
ax1.set_xlabel('Epochs')
ax1.legend(['Valid', 'Train'])
ax1.set_ylabel('NLL')
ax1.set_ylim(0., 5.)
ax1.grid(True)
ax2 = ax1.twiny()
ax2.set_xticks(np.arange(0,tr.shape[0],20))
ax2.set_xticklabels(['{0:.2f}'.format(t) for t in tr[::20]])
ax2.set_xlabel('Hours')
print(model.yaml_src)
pv = get_weights_report(model=model)
img = pv.get_img()
img = img.resize((4*img.size[0], 4*img.size[1]))
img_data = io.BytesIO()
img.save(img_data, format='png')
display(Image(data=img_data.getvalue(), format='png'))
plt.plot(model.monitor.channels['learning_rate'].val_record)
h1_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h1_W_kernel_norm_mean'].val_record])
h1_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h1_kernel_norms_mean'].val_record])
plt.plot(h1_W_norms / h1_W_up_norms)
plt.ylim(0,1000)
plt.show()
plt.plot(model.monitor.channels['valid_h1_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h1_kernel_norms_max'].val_record)
h2_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h2_W_kernel_norm_mean'].val_record])
h2_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h2_kernel_norms_mean'].val_record])
plt.plot(h2_W_norms / h2_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h2_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h2_kernel_norms_max'].val_record)
h3_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h3_W_kernel_norm_mean'].val_record])
h3_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h3_kernel_norms_mean'].val_record])
plt.plot(h3_W_norms / h3_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h3_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h3_kernel_norms_max'].val_record)
h4_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h4_W_kernel_norm_mean'].val_record])
h4_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h4_kernel_norms_mean'].val_record])
plt.plot(h4_W_norms / h4_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h4_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h4_kernel_norms_max'].val_record)
h5_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h5_W_kernel_norm_mean'].val_record])
h5_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h5_kernel_norms_mean'].val_record])
plt.plot(h5_W_norms / h5_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h5_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h5_kernel_norms_max'].val_record)
h6_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h6_W_col_norm_mean'].val_record])
h6_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h6_col_norms_mean'].val_record])
plt.plot(h6_W_norms / h6_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h6_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h6_col_norms_max'].val_record)
y_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_softmax_W_col_norm_mean'].val_record])
y_W_norms = np.array([float(v) for v in model.monitor.channels['valid_y_y_1_col_norms_mean'].val_record])
plt.plot(y_W_norms / y_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_y_y_1_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_y_y_1_col_norms_max'].val_record)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem
Step2: Problem
Step3: OSMH Dual Formulation
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy as np
center1 = np.array([3.0,3.0])
center2 = np.array([-3.0,-3.0])
X = np.zeros((100,2)); Y = np.zeros((100,))
X[:50,:] = np.random.multivariate_normal(center1, np.eye(2),(50,))
Y[:50] = +1
X[50:,:] = np.random.multivariate_normal(center2, np.eye(2),(50,))
Y[50:] = -1
plt.scatter(X[:,0], X[:,1], c = Y)
wvec = np.array([-4.0,7.0])
bval = -2.4
# Does this wvec and b correctly classify data within margin?
# put some code in here!
# let's find some alphas!
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input Parameter
Step2: Preparation
Step3: Create space and time vector
Step4: Source signal - Ricker-wavelet
Step5: Time stepping
Step6: Save seismograms
Step7: Plotting
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import time as tm
import matplotlib.pyplot as plt
# Discretization
c1=20 # Number of grid points per dominant wavelength
c2=0.5 # CFL-Number
nx=200 # Number of grid points in X
ny=200 # Number of grid points in Y
T=1 # Total propagation time
# Source Signal
f0= 5 # Center frequency Ricker-wavelet
q0= 100 # Maximum amplitude Ricker-Wavelet
xscr = 100 # Source position (in grid points) in X
yscr = 100 # Source position (in grid points) in Y
# Receiver
xrec1=100; yrec1=80; # Position Reciever 1 (in grid points)
xrec2=100; yrec2=100; # Position Reciever 2 (in grid points)
xrec3=100; yrec3=120;# Position Reciever 3 (in grid points)
# Velocity and density
modell_v = 3000*np.ones((ny,nx))
rho=2.2*np.ones((ny,nx))
# Init wavefields
vx=np.zeros((ny,nx))
vy=np.zeros((ny,nx))
p=np.zeros((ny,nx))
vx_x=np.zeros((ny,nx))
vy_y=np.zeros((ny,nx))
p_x=np.zeros((ny,nx))
p_y=np.zeros((ny,nx))
# Calculate first Lame-Paramter
l=rho * modell_v * modell_v
cmin=min(modell_v.flatten()) # Lowest P-wave velocity
cmax=max(modell_v.flatten()) # Highest P-wave velocity
fmax=2*f0 # Maximum frequency
dx=cmin/(fmax*c1) # Spatial discretization (in m)
dy=dx # Spatial discretization (in m)
dt=dx/(cmax)*c2 # Temporal discretization (in s)
lampda_min=cmin/fmax # Smallest wavelength
# Output model parameter:
print("Model size: x:",dx*nx,"in m, y:",dy*ny,"in m")
print("Temporal discretization: ",dt," s")
print("Spatial discretization: ",dx," m")
print("Number of gridpoints per minimum wavelength: ",lampda_min/dx)
x=np.arange(0,dx*nx,dx) # Space vector in X
y=np.arange(0,dy*ny,dy) # Space vector in Y
t=np.arange(0,T,dt) # Time vector
nt=np.size(t) # Number of time steps
# Plotting model
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.subplots_adjust(wspace=0.4,right=1.6)
ax1.plot(x,modell_v)
ax1.set_ylabel('VP in m/s')
ax1.set_xlabel('Depth in m')
ax1.set_title('P-wave velocity')
ax2.plot(x,rho)
ax2.set_ylabel('Density in g/cm^3')
ax2.set_xlabel('Depth in m')
ax2.set_title('Density');
tau=np.pi*f0*(t-1.5/f0)
q=q0*(1.0-2.0*tau**2.0)*np.exp(-tau**2)
# Plotting source signal
plt.figure(3)
plt.plot(t,q)
plt.title('Source signal Ricker-Wavelet')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.draw()
# Init Seismograms
Seismogramm=np.zeros((3,nt)); # Three seismograms
# Calculation of some coefficients
i_dx=1.0/(dx)
i_dy=1.0/(dy)
c1=9.0/(8.0*dx)
c2=1.0/(24.0*dx)
c3=9.0/(8.0*dy)
c4=1.0/(24.0*dy)
c5=1.0/np.power(dx,3)
c6=1.0/np.power(dy,3)
c7=1.0/np.power(dx,2)
c8=1.0/np.power(dy,2)
c9=np.power(dt,3)/24.0
## Time stepping
print("Starting time stepping...")
for n in range(2,nt):
# Update velocity
for kx in range(5,nx-4):
for ky in range(5,ny-4):
p_x=c1*(p[ky,kx+1]-p[ky,kx])-c2*(p[ky,kx+2]-p[ky,kx-1])
p_y=c3*(p[ky+1,kx]-p[ky,kx])-c4*(p[ky+2,kx]-p[ky-1,kx])
vx[ky,kx]=vx[ky,kx]-dt/rho[ky,kx]*p_x
vy[ky,kx]=vy[ky,kx]-dt/rho[ky,kx]*p_y
# Inject source wavelet
p[yscr,xscr]=p[yscr,xscr]+q[n]
# Update pressure
for kx in range(5,nx-4):
for ky in range(5,ny-4):
vx_x=c1*(vx[ky,kx]-vx[ky,kx-1])-c2*(vx[ky,kx+1]-vx[ky,kx-2])
vy_y=c3*(vy[ky,kx]-vy[ky-1,kx])-c4*(vy[ky+1,kx]-vy[ky-2,kx])
p[ky,kx]=p[ky,kx]-l[ky,kx]*dt*(vx_x+vy_y)
# Save seismograms
Seismogramm[0,n]=p[yrec1,xrec1]
Seismogramm[1,n]=p[yrec2,xrec2]
Seismogramm[2,n]=p[yrec3,xrec3]
print("Finished time stepping!")
## Save seismograms
np.save("Seismograms/FD_2D_DX4_DT2",Seismogramm)
## Image plot
fig, ax = plt.subplots(1,1)
img = ax.imshow(p);
ax.set_title('P-Wavefield')
ax.set_xticks(range(0,nx+1,int(nx/5)))
ax.set_yticks(range(0,ny+1,int(ny/5)))
ax.set_xlabel('Grid-points in X')
ax.set_ylabel('Grid-points in Y')
fig.colorbar(img)
## Plot seismograms
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.4,right=1.6, top = 2 )
ax1.plot(t,Seismogramm[0,:])
ax1.set_title('Seismogram 1')
ax1.set_ylabel('Amplitude')
ax1.set_xlabel('Time in s')
ax1.set_xlim(0, T)
ax2.plot(t,Seismogramm[1,:])
ax2.set_title('Seismogram 2')
ax2.set_ylabel('Amplitude')
ax2.set_xlabel('Time in s')
ax2.set_xlim(0, T)
ax3.plot(t,Seismogramm[2,:])
ax3.set_title('Seismogram 3')
ax3.set_ylabel('Amplitude')
ax3.set_xlabel('Time in s')
ax3.set_xlim(0, T);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vertex Training using Vertex SDK and Custom Container
Step2: Initialize Vertex SDK
Step3: Create a Vertex Tensorboard Instance
Step4: Option
Step5: Training Output Artifact
Step6: Clean Up Artifact
|
<ASSISTANT_TASK:>
Python Code:
PROJECT_ID = "YOUR PROJECT ID"
BUCKET_NAME = "gs://YOUR BUCKET NAME"
REGION = "YOUR REGION"
SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT"
! gsutil ls -al $BUCKET_NAME
content_name = "pt-img-cls-multi-node-ddp-cust-cont"
hostname = "gcr.io"
image_name = content_name
tag = "latest"
custom_container_image_uri = f"{hostname}/{PROJECT_ID}/{image_name}:{tag}"
! pip install -r requirements.txt
from google.cloud import aiplatform
aiplatform.init(
project=PROJECT_ID,
staging_bucket=BUCKET_NAME,
location=REGION,
)
content_name = content_name + "-gpu"
tensorboard = aiplatform.Tensorboard.create(
display_name=content_name,
)
display_name = content_name
gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}"
replica_count = 1
machine_type = "n1-standard-4"
accelerator_count = 4
accelerator_type = "NVIDIA_TESLA_K80"
args = [
"--backend",
"nccl",
"--batch-size",
"128",
"--epochs",
"25",
]
custom_container_training_job = aiplatform.CustomContainerTrainingJob(
display_name=display_name,
container_uri=custom_container_image_uri,
)
custom_container_training_job.run(
args=args,
base_output_dir=gcs_output_uri_prefix,
replica_count=replica_count,
machine_type=machine_type,
accelerator_count=accelerator_count,
accelerator_type=accelerator_type,
tensorboard=tensorboard.resource_name,
service_account=SERVICE_ACCOUNT,
)
print(f"Custom Training Job Name: {custom_container_training_job.resource_name}")
print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}")
! gsutil ls $gcs_output_uri_prefix
! gsutil rm -rf $gcs_output_uri_prefix
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Random Seed
Step2: Tensors
Step3: Operations
Step4: Matrix multiplication
Step5: More operations
Step6: Automatic differentiation with Autograd
Step7: x.grad will now contain the gradient $\partial o/ \partial x$, and this will say how a change in $x$ will affect output $o$
Step8: Verifying the gradient by hand
Step9: In NLP, we are often doing classification, rather than regression (predicting a value). So, even though the name can be misleading, logistic regression is a classifier
Step10: Now, this is a very simple Neural Network! Actually, it is so simple maybe we should not call it a Neural network. But let's do so anyway.
Step11: You can see that
Step12: Loss
Step13: Now we'll ask PyTorch to update the weights (parameters) of our neural network so that our next prediction is closer to that target.
Step14: Now, if we check the output for the same input vector $\mathbf{x}$, the output should be closer to the target
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import torch
torch.manual_seed(42)
# create uninitialized 3-D tensor (values can be anything that is in memory!)
x = torch.Tensor(2, 3, 3)
print(x)
# a randomly initialized 2-D tensor (a matrix)
x = torch.rand(4, 3)
print(x)
# how to get its size
print(x.size())
# or if you know there are 2 dimensions:
time, dim = x.size()
print(time, dim)
# You can add tensors in many ways.
# The easiest is to simply use a python + operator:
y = torch.rand(4, 3)
print(x + y)
# But you can also use torch.add:
print(torch.add(x, y))
# Provide an output Tensor and save the result there:
result = torch.Tensor(4, 3)
torch.add(x, y, out=result)
print(result)
# Or add in-place (this changes y!)
# Note: Any operation that mutates a tensor in-place is post-fixed with an "_", like "add_" here.
y.add_(x)
print(y)
# Let's create a vector x with values 0..5
# We can use the arange function for that:
x = torch.arange(0, 6)
print(x)
print(x.shape)
# Now, we will reshape x to have shape 2x3
# That is, it will become a matrix!
# The values will be the same, we will just look at them differently.
x = x.view((2, 3))
print(x)
print(x.shape)
# Now, let's create a square matrix W:
W = torch.arange(0, 9).view((3, 3))
print(W)
# Now we can perform matrix multiplication, since we have 2x3 and 3x3 matrices!
# Verify if you can do this multiplication by hand, too!
# If you need some help, you can check here: https://www.mathsisfun.com/algebra/matrix-multiplying.html
h = torch.matmul(x, W)
print(h)
print(result[:, 1]) # second column
print(result[0]) # first row
print(result[:2, -1]) # first two rows, last column
print(result[1:3, :]) # middle two rows
from torch.autograd import Variable
# create an input vector x
x = Variable(torch.ones(3), requires_grad=True)
print(x)
# Now we define our function
# Note that, even though x is a vector, we can still add a single value to it.
# PyTorch will just add that value to each element of the vector.
y = (x + 2)**2 + 3
print(y)
# y has a grad_fn since it was created by an operation
# this grad_fn will be used by PyTorch for obtaining the gradient
print(y.grad_fn)
# Our final output o is the mean
o = y.mean()
print(o)
# now we can take the gradients by calling o.backward()
# this will populate x.grad
o.backward()
print(x.grad)
import numpy as np
import matplotlib.pyplot as plt
sigmoid = lambda x: 1 / (1 + np.exp(-x))
x = np.arange(-10., 10., 0.2)
plt.plot(x, sigmoid(x), 'b', label='sigmoid')
import torch.nn as nn
import torch.nn.functional as F
class ExampleNN(nn.Module):
def __init__(self):
super(ExampleNN, self).__init__()
# an affine operation: y = Wx + b
self.linear = nn.Linear(3, 1)
def forward(self, x):
x = self.linear(x)
x = F.sigmoid(x) # output values are squashed between 0 and 1
return x
net = ExampleNN()
print(net)
params = list(net.parameters())
for p in params:
print(p)
x = Variable(torch.randn(3))
print("input:", x)
out = net(x)
print("output:", out)
out = net(x)
target = Variable(torch.zeros(1)) # a dummy target (0.)
criterion = nn.MSELoss() # this is our criterion
loss = criterion(out, target)
print("output:", out)
print("loss:", loss)
net.zero_grad() # reset gradients
loss.backward() # compute gradients
# update weights
learning_rate = 0.5
for f in net.parameters():
# for each parameter, take a small step in the opposite dir of the gradient
# sub_ substracts in-place
f.data.sub_(f.grad.data * learning_rate)
new_out = net(x)
new_loss = criterion(new_out, target)
print("target:", target)
print("out:", out)
print("new out (should be closer to target):", new_out)
print("\nloss:", loss)
print("new loss (should be lower):", new_loss)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Preparing the Inputs
Step2: The first few lines of the xyz.in file are
Step3: Plot HAC (heat current autocorrelations) & RTC (running thermal conductivity)
|
<ASSISTANT_TASK:>
Python Code:
from pylab import *
from ase.build import graphene_nanoribbon
from thermo.gpumd.data import load_hac
from thermo.gpumd.io import ase_atoms_to_gpumd
gnr = graphene_nanoribbon(60, 36, type='armchair', sheet=True, vacuum=3.35/2, C_C=1.44)
gnr.euler_rotate(theta=90)
l = gnr.cell.lengths()
gnr.cell = gnr.cell.new((l[0], l[2], l[1]))
l = l[2]
gnr.center()
gnr.pbc = [True, True, False]
gnr
ase_atoms_to_gpumd(gnr, M=3, cutoff=2.1)
aw = 2
fs = 16
font = {'size' : fs}
matplotlib.rc('font', **font)
matplotlib.rc('axes' , linewidth=aw)
def set_fig_properties(ax_list):
tl = 8
tw = 2
tlm = 4
for ax in ax_list:
ax.tick_params(which='major', length=tl, width=tw)
ax.tick_params(which='minor', length=tlm, width=tw)
ax.tick_params(which='both', axis='both', direction='in', right=True, top=True)
hac = load_hac([50000]*3, [10]*3)
print(hac.keys())
print(hac['run0'].keys())
t = hac['run0']['t']
hac_ave_i = np.zeros(hac['run0']['jxijx'].shape[0])
hac_ave_o = np.zeros_like(hac_ave_i)
ki_ave, ko_ave = np.zeros_like(hac_ave_i), np.zeros_like(hac_ave_o)
for runkey in hac.keys():
hac_ave_i += hac[runkey]['jxijx']+hac[runkey]['jyijy']
hac_ave_o += hac[runkey]['jxojx']+hac[runkey]['jyojy']
ki_ave += (hac[runkey]['kxi']+hac[runkey]['kyi'])
ko_ave += (hac[runkey]['kxo']+hac[runkey]['kyo'])
hac_ave_i /= hac_ave_i.max()
hac_ave_o /= hac_ave_o.max()
ki_ave /= 6.
ko_ave /= 6.
figure(figsize=(12,10))
subplot(2,2,1)
set_fig_properties([gca()])
loglog(t, hac_ave_i, color='C3')
loglog(t, hac_ave_o, color='C0')
xlim([1e-1, 1e3])
ylim([1e-4, 1])
xlabel('Correlation Time (ps)')
ylabel('Normalized HAC')
title('(a)')
subplot(2,2,2)
set_fig_properties([gca()])
for runkey in hac.keys():
plot(hac[runkey]['t'], (hac[runkey]['kxi']+hac[runkey]['kyi'])/2, color='C7',alpha=0.5)
plot(t, ki_ave, color='C3', linewidth=3)
xlim([0, 1000])
gca().set_xticks(range(0,1001,200))
ylim([0, 1500])
gca().set_yticks(range(0,1501,500))
xlabel('Correlation Time (ps)')
ylabel(r'$\kappa^{in}$ (W/m/K)')
title('(b)')
subplot(2,2,3)
set_fig_properties([gca()])
for runkey in hac.keys():
plot(hac[runkey]['t'], (hac[runkey]['kxo']+hac[runkey]['kyo'])/2, color='C7',alpha=0.5)
plot(t, ko_ave, color='C0', linewidth=3)
xlim([0, 1000])
gca().set_xticks(range(0,1001,200))
ylim([0, 1500])
gca().set_yticks(range(0,4001,1000))
xlabel('Correlation Time (ps)')
ylabel(r'$\kappa^{out}$ (W/m/K)')
title('(c)')
subplot(2,2,4)
set_fig_properties([gca()])
plot(t, ko_ave, color='C0', linewidth=3)
plot(t, ki_ave, color='C3', linewidth=3)
plot(t, ki_ave + ko_ave, color='k', linewidth=3)
xlim([0, 1000])
gca().set_xticks(range(0,1001,200))
ylim([0, 1500])
gca().set_yticks(range(0,4001,1000))
xlabel('Correlation Time (ps)')
ylabel(r'$\kappa$ (W/m/K)')
title('(d)')
tight_layout()
show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Maps
|
<ASSISTANT_TASK:>
Python Code:
from linearset import Set
smith = Set()
smith.add("CSCI-112")
smith.add("MATH-121")
smith.add("HIST-340")
smith.add("ECON-101")
robert = Set()
robert.add('POL-101')
robert.add('ANTH-230')
robert.add('CSCI-112')
robert.add('ECON-101')
if smith == robert:
print("Smith and Robert are taking the same courses.")
else:
sameCourses = smith.intersect(robert)
if sameCourses.isEmpty():
print("Smith and Robert are not taking any of classes together.")
else:
print("Smith and Robert are taking some of the same courses")
for course in sameCourses:
print(course)
def func( *args):
print("number of arguments: ", len(args))
_sum = 0
for value in args:
_sum += value
print ("Sum of the arguments: ", _sum)
func(12)
func(5,8,2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic python syntax
Step2: Excercise
Step3: Variables
Step4: Variable types
Step5: When writing python code we very often will run into this error messages. The errors come in many different types and learning to understand these messages can help the development process significantly. Normally the description at the end can give a lot of information. Here we see that we can not add an interger with a string
Step6: Excersice
Step7: If statements
Step8: If we need to check for multiple conditions we can use the if - elif- else consturction.
Step9: Excercise
Step10: We can access the elements of a list using the following syntax
Step11: Lists have various useful fuctions that can be used to manipulate them. To see all you can use the python documentation at https
Step12: Loops
Step13: Often we want to loop a certain number of times. Then pythons range function is very useful. This function returns a generetor (almost a list) of all the integers.
Step14: Excercise
Step15: Excersice
Step16: Dictionaries
Step17: Note that a dictionary is an unordered data type. So we can not expect a particular order if we want to loop through the dictionary
Step18: For both dictionaries and list it is easy to test if an element exists by using the in operator
Step19: Excersice
Step22: Functions and modules
Step24: Modules and packages
Step25: Many very useful stanrd libraries can be found at
|
<ASSISTANT_TASK:>
Python Code:
print("Hello, World")
5 + 3
9 + 16
400*321
height = 1.8
weight = 78
(height+weight)*2
# This is a comment
# We can store the result of calculation in a new variable
bmi = weight / height ** 2
bmi
string_variable = "test" # A string
int_variable = 4 # An intenger
float_variable = 3.14 # a floating point number
height = int_variable * 2
age = 2
name = "Rakoto"
print(int_variable + 4) # As we want to print out multiple things from this cell we use the print() statement
print(string_variable + " again")
print ("the name is "+ name +"and age =" + str(age))
# We can use the type() function to determine the type of the variable
print(type(string_variable))
print(type(int_variable))
print(type(float_variable))
# Sometimes python can convert between types automatically as in the BMI example above
print(type(height))
print(type(weight))
print(type(bmi))
# Other times not
string_variable + int_variable # This line will give a python error
# If we wanted to create the string "test4" we need to explicitly convert the initeger to a string
print(string_variable + str(int_variable))
# We can also convert from string to integer or float
print(float("3.14")**2)
t = True # Boolean true
f = False # Boolean false
print(type(t))
print("Equality " ,"test" == "test") # Test for equality with ==
print("Not Equal " ,"test" != "test") # Test for not equal with !=
print("And ", t and f) # Boolean and
print("Or ", t or f) # Boolean or
# Some more examples
print("3 == 3 is", 3 == 3)
print('"3" = 3 is',"3" == 3) # This will be false as "3" is a string and 3 is an integer
print ("3 > 2 is", 3 > 2) # Larger than
print ("2 >= 2 is", 2 >= 2) # Larger than or equal
print("2 < 3 is",2 < 3) # Smaller than
print("3 <= 2 is",3 <= 2) # Smaller than or equal
name = "Rania"
if name == "Rania":
print("Hi Rania")
else:
print("Who are you?")
name = "John"
if name == "Rania":
print("Hi Rania")
else:
print("Who are you?")
name = "Gunnar"
if name == "Rania":
print("Hi Rania")
elif name == "Gunnar":
print("Hi Gunnar")
else:
print("Who are you?")
l = [1, 5, 9] # Defining a list
print(l)
print(type(l))
l = [1, 5, 9]
print("First element", l[0])
print("Second element", l[1])
print("Thrid element", l[2])
# If we try to access an element that does not exist we get an error
print(l[5])
# We can edit individual elements in the list by assigning to them
l2 = ["a", "b", "c"]
print(l2)
l2[1] = "d"
print(l2)
l = ["a", "b"]
print(l)
l.append("c") # add an element to the end of the list
print(l)
l.remove("b") # Removes the element "b" from the list
print(l)
print(len(l)) # len(l) gives the length of a list
developers = ["Jonathan", "Jyri", "Mix", "Gunnar"]
for developer in developers:
print(developer)
print(list(range(10))) # We need the extra list command as range returns a generator
# range(a) returns all the integers starting at 0 and ending a a-1
# We can also use range(3,6) to return the integer 3,4,5
for i in range(3,10):
print(i)
i = 0
while i < 10:
print(i)
i = i + 1 # We can also write this as i += 1
while True:
number = int(input("Input a number and I will double it, type 0 to quit "))
# We add the int() function as input returns a string
if number == 0:
break # We can use break to break out of a loop
print(number * 2)
num = int(input("Type in a number "))
# Your code goes here:
dictionary = {"key1": 3, "key2": 5} # Defining a dictionary
print(dictionary)
print(type(dictionary))
# We can access items in the same was as for lists:
print(dictionary["key1"])
for d in dictionary: # Loops through the keys
print(d)
for d in dictionary.values(): #Looping through the values
print(d)
d = {"key1": "a"}
print("key1 in d", "key1" in d)
print("key2 in d", "key2" in d)
l = [1, 4]
print("4 in l", 4 in l)
print("2 in l", 2 in l)
l = [1,2,3,5,5,2,7,1,1,1]
d = {}
# Your code goes here
def double(x):
Doubles the input value
return x * 2
print(double(4))
print(double(17))
def alternative_len(l):
Calculates the length of a list
length = 0
for _ in l:
length += 1
return length
l = list(range(20,400))
print("Normal len", len(l))
print("Alternative len", alternative_len(l))
import math
print(math.pi)
double = [x * 2 for x in range(10)]
double
# or a list of sine values
from math import sin, pi
sin_values = [sin(pi * i / 4) for i in range(9)]
sin_values
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create features and labels
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
iris = sns.load_dataset('iris')
iris.head()
iris.shape
iris['species'].value_counts().plot(kind='bar')
%matplotlib inline
sns.pairplot(iris, hue='species')
X_iris = iris.drop('species', axis=1)
X_iris.shape
y_iris = iris['species']
y_iris.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: To compute most measures, data(i.e objectives) is normalized. Normalization is scaling the data between 0 and 1. Why do we normalize?
Step10: Data Format
Step13: Reference Set
Step17: Spread
Step20: IGD = inter-generational distance; i.e. how good are you compared to the best known?
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# All the imports
from __future__ import print_function, division
import pom3_ga, sys
import pickle
# TODO 1: Enter your unity ID here
__author__ = "<sbiswas4>"
def normalize(problem, points):
Normalize all the objectives
in each point and return them
meta = problem.objectives
all_objs = []
for point in points:
objs = []
for i, o in enumerate(problem.evaluate(point)):
low, high = meta[i].low, meta[i].high
# TODO 3: Normalize 'o' between 'low' and 'high'; Then add the normalized value to 'objs'
if high==low:
objs.append(0)
continue
else:
objs.append((o - low)/(high-low))
all_objs.append(objs)
return all_objs
Performing experiments for [5, 10, 50] generations.
problem = pom3_ga.POM3()
pop_size = 10
repeats = 10
test_gens = [5, 10, 50]
def save_data(file_name, data):
Save 'data' to 'file_name.pkl'
with open(file_name + ".pkl", 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def load_data(file_name):
Retrieve data from 'file_name.pkl'
with open(file_name + ".pkl", 'rb') as f:
return pickle.load(f)
def build(problem, pop_size, repeats, test_gens):
Repeat the experiment for 'repeats' number of repeats for each value in 'test_gens'
tests = {t: [] for t in test_gens}
tests[0] = [] # For Initial Population
for _ in range(repeats):
init_population = pom3_ga.populate(problem, pop_size)
pom3_ga.say(".")
for gens in test_gens:
tests[gens].append(normalize(problem, pom3_ga.ga(problem, init_population, retain_size=pop_size, gens=gens)[1]))
tests[0].append(normalize(problem, init_population))
print("\nCompleted")
return tests
Repeat Experiments
# tests = build(problem, pop_size, repeats, test_gens)
Save Experiment Data into a file
# save_data("dump", tests)
Load the experimented data from dump.
tests = load_data("dump")
def make_reference(problem, *fronts):
Make a reference set comparing all the fronts.
Here the comparison we use is bdom. It can
be altered to use cdom as well
retain_size = len(fronts[0])
reference = []
for front in fronts:
reference+=front
def bdom(one, two):
Return True if 'one' dominates 'two'
else return False
:param one - [pt1_obj1, pt1_obj2, pt1_obj3, pt1_obj4]
:param two - [pt2_obj1, pt2_obj2, pt2_obj3, pt2_obj4]
dominates = False
for i, obj in enumerate(problem.objectives):
gt, lt = pom3_ga.gt, pom3_ga.lt
better = lt if obj.do_minimize else gt
# TODO 3: Use the varaibles declared above to check if one dominates two
return dominates
def fitness(one, dom):
return len([1 for another in reference if dom(one, another)])
fitnesses = []
for point in reference:
fitnesses.append((fitness(point, bdom), point))
reference = [tup[1] for tup in sorted(fitnesses, reverse=True)]
return reference[:retain_size]
make_reference(problem, tests[5][0], tests[10][0], tests[50][0])
def eucledian(one, two):
Compute Eucledian Distance between
2 vectors. We assume the input vectors
are normalized.
:param one: Vector 1
:param two: Vector 2
:return:
# TODO 4: Code up the eucledian distance. https://en.wikipedia.org/wiki/Euclidean_distance
#dist = 0
return (sum([(o-t)**2 for o,t in zip(one, two)]) / len(one))**0.5
#return dist
def sort_solutions(solutions):
Sort a list of list before computing spread
def sorter(lst):
m = len(lst)
weights = reversed([10 ** i for i in xrange(m)])
return sum([element * weight for element, weight in zip(lst, weights)])
return sorted(solutions, key=sorter)
def closest(one, many):
min_dist = sys.maxint
closest_point = None
for this in many:
dist = eucledian(this, one)
if dist < min_dist:
min_dist = dist
closest_point = this
return min_dist, closest_point
def spread(obtained, ideals):
Calculate the spread (a.k.a diversity)
for a set of solutions
s_obtained = sort_solutions(obtained)
s_ideals = sort_solutions(ideals)
d_f = closest(s_ideals[0], s_obtained)[0]
d_l = closest(s_ideals[-1], s_obtained)[0]
n = len(s_ideals)
distances = []
for i in range(len(s_obtained)-1):
distances.append(eucledian(s_obtained[i], s_obtained[i+1]))
d_bar = sum(distances)/len(distances)
# TODO 5: Compute the value of spread using the definition defined in the previous cell.
d_sum = sum([abs(d_i - d_bar) for d_i in distances])
delta = (d_f + d_l + d_sum) / (d_f + d_l + (n-1)*d_bar)
return delta
ref = make_reference(problem, tests[5][0], tests[10][0], tests[50][0])
print(spread(tests[5][0], ref))
print(spread(tests[10][0], ref))
print(spread(tests[50][0], ref))
def igd(obtained, ideals):
Compute the IGD for a
set of solutions
:param obtained: Obtained pareto front
:param ideals: Ideal pareto front
:return:
# TODO 6: Compute the value of IGD using the definition defined in the previous cell.
igd_val = sum([closest(ideal, obtained)[0] for ideal in ideals]) / len(ideals)
return igd_val
# igd_val = 0
# return igd_val
ref = make_reference(problem, tests[5][0], tests[10][0], tests[50][0])
print(igd(tests[5][0], ref))
print(igd(tests[10][0], ref))
print(igd(tests[50][0], ref))
import sk
sk = reload(sk)
def format_for_sk(problem, data, measure):
Convert the experiment data into the format
required for sk.py and computet the desired
'measure' for all the data.
gens = data.keys()
reps = len(data[gens[0]])
measured = {gen:["gens_%d"%gen] for gen in gens}
for i in range(reps):
ref_args = [data[gen][i] for gen in gens]
ref = make_reference(problem, *ref_args)
for gen in gens:
measured[gen].append(measure(data[gen][i], ref))
return measured
def report(problem, tests, measure):
measured = format_for_sk(problem, tests, measure).values()
sk.rdivDemo(measured)
print("*** IGD ***")
report(problem, tests, igd)
print("\n*** Spread ***")
report(problem, tests, spread)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This collection of rates has the main CNO rates plus a breakout rate into the hot CNO cycle
Step2: To evaluate the rates, we need a composition. This is defined using a list of Nuceli objects.
Step3: Interactive exploration is enabled through the Explorer class, which takes a RateCollection and a Composition
|
<ASSISTANT_TASK:>
Python Code:
import pynucastro as pyrl
files = ["c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12",
"o14-ap-f17-Ha96c",
"f17-pg-ne18-cb09",
"ne18--f18-wc12",
"f18-pa-o15-il10"]
rc = pyrl.RateCollection(files)
comp = pyrl.Composition(rc.get_nuclei())
comp.set_solar_like()
re = pyrl.Explorer(rc, comp, size=(1000,1000),
ydot_cutoff_value=1.e-25)
re.explore()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: These simple operations on numbers in Python 3 works exactly as you'd expect, but that's not true across all programming languages.
Step2: Next, let's create a list of numbers and do math to that list.
Step3: How many elements or numbers does the list c contain? Yes, this is easy to count now, but you will eventually work with lists that contains MANY numbers. To get the length of a list (or array), use len().
Step4: What exactly is c? It looks like an array because of the square brackets, but it isn't. To see what any variable is, use type().
Step5: Now, some math... Let's square each value in c and put those values in a new list called d. To square a variable (or number), you use **. So $3^{**}2=9$. The rest of the math operations (+ / - x) are 'normal.'
Step6: This should not have worked. Why? The short answer is that a list is very useful, but it is not an array. However, you can convert your lists to arrays (and back again if you feel you need to). In order to do this conversion (and just about anything else), we need something extra.
Step7: To convert our list $c = [0,1,2,3,4,5,6,7,8,9]$ to an array we use numpy.array(),
Step8: Great! However, typing numpy over and over again can get tiresome, so we can import it and give it a shorter name. It is common to use the following
Step9: In this notation, converting a list to an array would be np.array(c).
Step10: C. Arrays of numbers
Step11: Next make an array with endpoints 0 and 1 (include 0 and 1), that has 50 values in it. You can use either (both?) np.arange or np.linspace. Which is easier to you? How many numbers do you get? Are these numbers integers or floats (decimal place)?
Step12: For all the possible plotting symbols, see
Step13: D. A more complicated function
Step14: Would this code be easy to edit for other temperatures?
Step15: Plotting Multiple Curves
Step16: Next, let's have you try an example. We mentioned above that wavelength and frequency are related by the speed of light,
|
<ASSISTANT_TASK:>
Python Code:
## You can use Python as a calculator:
5*7 #This is a comment and does not affect your code.
#You can have as many as you want.
#No worries.
5+7
5-7
5/7
a = 5
b = 7
print(a)
print(b)
print(a*b , a+b, a/b)
a = 5.
b = 7
print(a*b, a+b, a/b)
c = [0,1,2,3,4,5,6,7,8,9]
print(c)
len(c)
type(c) #pick a variable: a, b, or c and type it in the parentheses
d = c**2
import numpy
c = numpy.array(c)
d = c**2
print(d)
type(d)
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
x = c
y = d
p = plt.plot(x,y)
p = plt.plot(x,y**2)
np.arange(0,10,2) #here the step size is 2. So you'll get even numbers.
np.linspace(0,10,2) #here you're asking for 2 values. Guess what they'll be!
import ___ as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
___ = np.linspace(____)
___ = np.arange(____)
# Clear the plotting field.
plt.clf() # No need to add anything inside these parentheses.
plt.plot(__,__,'ro') # The 'ro' says you want to use Red o plotting symbols.
import ___ as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
x = np.linspace(-1,1,100)
y = np.sqrt(______)
# Clear the plotting field.
plt.clf() # No need to add anything inside these parentheses.
plt.plot(x,y,'ro') # The 'ro' says you want to use Red o plotting symbols.
plt.xlim([-2,2])
plt.ylim([-2,2])
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
x = np.linspace(100,2000,10000)*1e-9 #wavelength, we want a range of 100 nm to 10000 nm, but in METERS
Blam = 2.0*6.626e-34*2.998e8**2/x**5/(np.exp(6.626e-34*2.998e8/(x*1.381e-23*5800.0))-1.0)
plt.clf()
p = plt.plot(x*1e9,Blam) #we multiply by 1e9 so that the x axis shows nm
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel('Spectral Radiance ()') #What are the units?
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 # J s
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's pick the sun. YOU will need to PICK the temperature and then a range of
# frequencies or wavelengths that "make sense" for that temperature.
# We know that the sun peaks in visible part of the spectrum. This wavelength
# is close to 500 nm. Let's have the domain (x values) go from 100 nm to 2000 nm.
# 1 nm = 10^-9 m = 10^-7 cm.
lam = np.linspace(100,2000,10000)*1e-9 #wavelength in nm
nu = c/lam
T = 5800.0
exp = np.exp(h*c/(lam*k*T))
num = 2.0 * h * c**2
denom = lam**5 * (exp - 1.0)
Blam = num/denom
plt.clf()
p = plt.plot(lam*1e9,Blam)
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel(r'Spectral Radiance (W m$^{-3}$)') #What are the units?
# Try a log-log plot.
#p = plt.loglog(wav,Bnu)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 #
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's try to recreate the plot above.
# Pick temperatures: T1 = 7000 K , T2= 5800 K, and T3 = 4000 K.
# Let's have the domain (x values) go from 100 nm to 2000 nm.
# 1 nm = 10^-9 m.
wav = np.linspace(100,2000,10000)*1e-9 #in meters
T1 = 7000.
T2 = 5800.
T3 = 4000.
num = 2.0 * h * c**2
exp1 = np.exp(h*c/(wav*k*T1))
denom1 = wav**5 * (exp1 - 1.0)
exp2 = np.exp(h*c/(wav*k*T2))
denom2 = wav**5 * (exp2 - 1.0)
exp3 = np.exp(h*c/(wav*k*T3))
denom3 = wav**5 * (exp3 - 1.0)
Bnu1 = num/denom1
Bnu2 = num/denom2
Bnu3 = num/denom3
plt.clf()
p1 = plt.plot(wav*1e9,Bnu1,label='T =7000 K')
p2 = plt.plot(wav*1e9,Bnu2,label='T = 5800 K')
p3 = plt.plot(wav*1e9,Bnu3,label='T = 4000 K')
xl = plt.xlabel('Wavelength (nm)')
yl = plt.ylabel(r'Spectral Radiance (W m$^{-3}$)')
l = plt.legend()
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Constants in MKS (meters, kilograms, & seconds)
h = 6.626e-34 #
c = 2.998e8 # m/s
k = 1.381e-23 # J/K
# Let's try to recreate the plot above.
# Pick three temperatures.
# Decide on a domain in Hertz (frequency) that makes sense.
# c = nu x lambda, nu = c/lambda
#### Put your code here ###
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Expected
Step2: In class, we looked at a passwords database that doesn't save actual passwords, only hashes thereof. Even system administrators with the keys to the database, have no means to force a hash to run backwards to regain the phrase which was behind it. A hash is a one way street.
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
m = hashlib.sha256()
m.update(b"Nobody inspects")
m.update(b" the spammish repetition")
m.digest()
result = hashlib.sha256(b"Nobody inspects the spammish repetition").hexdigest()
result
print("Digest size", m.digest_size)
print("Block size ", m.block_size)
# Uncomment me to check your result
# assert result == 'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Integrated gradients
Step2: Download Inception V1 from TF-Hub
Step4: From the TF Hub module page, you need to keep in mind the following about Inception V1 for image classification
Step6: Load and preprocess images with tf.image
Step9: Applying integrated gradients
Step 1
Step10: Step 2
Step12: Step 3
Step13: Generate interpolated images along a linear path at alpha intervals between a black baseline image and the example "Giant Panda" image.
Step14: The interpolated images are visualized below. Note that another way of thinking about the $\alpha$ constant is that it is monotonically and consistently increasing each interpolated image's intensity.
Step16: Compute gradients
Step17: Compute gradients between your model Inception V1's predicted probabilities for the target class on each interpolated image with respect to each interpolated input. Recall that your model returns a (1, 1001) shaped Tensor with of logits that you will convert to predicted probabilities for every class. You need to pass the correct ImageNet target class index to the compute_gradients function below in order to identify the specific output tensor you wish to explain in relation to your input and baseline.
Step18: Note the output shape (n_interpolated_images, img_height, img_width, RGB).
Step19: Why not just use gradients for attribution? Saturation
Step21: Notice in the left plot above, how the model prediction function quickly learns the correct "Giant Panda" class when alpha is between 0.0 and 0.3 and then largely flattens between 0.3 and 1.0. There could still be features that the model relies on for correct prediction that differ from the baseline but the magnitudes of those feature gradients become really small and bounce around 0 starting from 0.3 to 1.0.
Step22: Recall that a feature's gradient will vary in magnitude over the interpolated images between the baseline and input. You want to choose a method to best approximate the area of difference, also know as the integral between your baseline and input in the feature space. Lets consider the down facing parabola function $y = sin(x*\pi)$ varying between 0 and 1 as a proxy for how a feature gradient could vary in magnitude and sign over different alphas. To implement IG, you care about approximation accuracy and covergence. Left, Right, and Midpoint Riemann Sums utilize rectangles to approximate areas under the function while Trapezoidal Riemann Sums utilize trapezoids.
Step24: Which integral approximation method should you choose for IG?
Step26: $IntegratedGrads^{approx}{i}(x)
Step27: The integral_approximation function takes the gradients of the predicted probability of the "Giant Panda" class with respect to the interpolated images between the baseline and "Giant Panda" image.
Step28: You can confirm averaging across the gradients of m interpolated images returns an integrated gradients tensor with the same shape as the original "Giant Panda" image.
Step30: Putting it all together
Step32: Again, you can check that the IG feature attributions have the same shape as the input "Giant Panda" image.
Step 4
Step34: Through utilizing the completeness axiom and the corresponding convergence function above, you were able to identify that you needed about 50 steps to approximate feature importances within 5% error for the "Giant Panda" image.
Step 5
Step35: Visual inspection of the IG attributions on the "Fireboat" image, show that Inception V1 identifies the water cannons and spouts as contributing to its correct prediction.
Step36: IG attributions on the "School Bus" image highlight the shape, front lighting, and front stop sign.
Step37: Returning to the "Giant Panda" image, IG attributions hightlight the texture, nose shape, and white fur of the Panda's face.
Step38: How do different baselines impact interpretation of IG attributions?
Step39: Returning to the school bus image, a black baseline really highlighted the school bus shape and stop sign as strongly distingushing features. In contrast, a random noise baseline makes interpretation of the IG attribution mask significantly more difficult. In particular, this attribution mask would wrongly leave you to believe that the model found a small area of pixels along the side of the bus significant.
Step40: Are there any scenarios where you prefer a non-black baseline? Yes.
Step41: A white baseline is a better contrastive choice here to highlight the important pixels on the beetle.
Step42: Ultimately, picking any constant color baseline has potential interpretation problems through just visual inspection alone without consideration of the underlying values and their signs. Baseline selection is still an area of active research with various proposals e.g. averaging multiple random baselines, blurred inputs, etc. discussed in depth in the distill.pub article Visualizing the Impact of Feature Attribution Baselines.
Step43: Without any prior understanding of how to differentiate these dogs or the features to do so, what can you learn from IG's feature importances?
Step44: Comparatively, IG also highlights the face and body shape of the Labrador Retriever with a density of bright pixels on its straight and short hair coat. This provides additional evidence toward the length and texture of the coats being key differentiators between these 2 breeds.
Step45: From visual inspection of the IG attributions, you now have insight into the underlying causal structure behind distringuishing Golden Retrievers and Yellow Labrador Retrievers without any prior knowledge. Going forward, you can use this insight to improve your model's performance further through refining its learned representations of these 2 breeds by retraining with additional examples of each dog breed and augmenting your training data through random perturbations of each dog's coat textures and colors.
Step46: Inception V1 correctly classifies this image of United States Rear Admiral and Computer Scientist, Grace Hopper, under the class "military uniform" above. From visual inspection of the IG feature attributions, you can see that brightest intensity pixels are focused around the shirt colar and tie, military insignia on the jacket and hat, and various pixel areas around her face. Note that there are potentially spurious pixels also highlighted in the background worth investigating empirically to refine the model's learned representation of military uniforms. However, IG does not provide insight into how these pixels were combined into the final prediction so its possible these pixels helped the model distinguish between military uniform and other similar classes such as the windsor tie and suit.
Step47: Below is an image of the United States General Ulysses S. Grant circa 1865. He is wearing a military uniform for the same country as Rear Admiral Hopper above, but how well can the model identify a military uniform to this image of different coloring and taken 120+ years earlier? From the model predictions above, you can see not very well as the model incorrectly predicts a trench coat and suit above a military uniform.
Step48: Yikes! Inception V1 incorrectly predicted the image of a Greek Presidential Guard as a vestment with low confidence. The underlying training data does not appear to have sufficient representation and density of Greek military uniforms. In fact, the lack of geo-diversity in large public image datasets, including ImageNet, was studied in the paper S. Shankar, Y. Halpern, E. Breck, J. Atwood, J. Wilson, and D. Sculley. "No classification without representation
Step49: Using the IG attributions above, you can see the model focused primarily on the face and high contrast white wavy kilt in the front and vest rather than the military insignia on the red hat or sword hilt. While IG attributions alone will not identify or fix data skew or bias, when combined with model evaluation performance metrics and dataset statistics, IG attributions provide you with a guided path forward to collecting more and diverse data to improve model performance.
Step50: These rotation and zooming examples serve to highlight an important limitation of convolutional neural networks like Inception V1 - CNNs are not naturally rotationally or scale invariant. All of these examples resulted in incorrect predictions. Now you will see an example of how comparing 2 example attributions - one incorrect prediction vs. one known correct prediction - gives a deeper feature-level insight into why the model made an error to take corrective action.
Step51: Zooming in on the Labrador Retriever image causes Inception V1 to incorrectly predict a different dog breed, a Saluki. Compare the IG attributions on the incorrect and correct predictions below. You can see the IG attributions on the zoomed image still focus on the legs but they are now much further apart and the midsection is proportionally narrower. Compared to the IG attributions on the original image, the visible head size is significantly smaller as well. Aimed with deeper feature-level understanding of your model's error, you can improve model performance by pursuing strategies such as training data augmentation to make your model more robust to changes in object proportions or check your image preprocessing code is the same during training and serving to prevent data skew introduced from by zooming or resizing operations.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pylab as plt
import numpy as np
import math
import sys
import tensorflow as tf
import tensorflow_hub as hub
inception_v1_url = "https://tfhub.dev/google/imagenet/inception_v1/classification/4"
inception_v1_classifier = tf.keras.Sequential([
hub.KerasLayer(name='inception_v1',
handle=inception_v1_url,
trainable=False),
])
inception_v1_classifier.build([None, 224, 224, 3])
inception_v1_classifier.summary()
def load_imagenet_labels(file_path):
Args:
file_path(str): A URL download path.
Returns:
imagenet_label_array(numpy.ndarray): Array of strings with shape (1001,).
labels_file = tf.keras.utils.get_file('ImageNetLabels.txt', file_path)
with open(labels_file, "r") as reader:
f = reader.read()
labels = f.splitlines()
imagenet_label_array = np.array(labels)
return imagenet_label_array
imagenet_label_vocab = load_imagenet_labels('https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt')
def parse_image(file_name):
This function downloads and standardizes input JPEG images for the
inception_v1 model. Its applies the following processing:
- Reads JPG file.
- Decodes JPG file into colored image.
- Converts data type to standard tf.float32.
- Resizes image to expected Inception V1 input dimension of
(224, 224, 3) with preserved aspect ratio. E.g. don't stretch image.
- Pad image to (224, 224, 3) shape with black pixels.
Args:
file_name(str): Direct URL path to the JPG image.
Returns:
image(Tensor): A Tensor of floats with shape (224, 224, 3).
label(str): A text label for display above the image.
image = tf.io.read_file(file_name)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, (224, 224), preserve_aspect_ratio=True)
image = tf.image.resize_with_pad(image, target_height=224, target_width=224)
return image
# img_name_url {image_name: origin_url}
img_name_url = {
'Fireboat': 'https://storage.googleapis.com/applied-dl/temp/San_Francisco_fireboat_showing_off.jpg',
'School Bus': 'https://storage.googleapis.com/applied-dl/temp/Thomas_School_Bus_Bus.jpg',
'Giant Panda': 'https://storage.googleapis.com/applied-dl/temp/Giant_Panda_2.jpeg',
'Black Beetle': 'https://storage.googleapis.com/applied-dl/temp/Lucanus.jpeg',
'Golden Retriever': 'https://storage.googleapis.com/applied-dl/temp/Golden_retriever.jpg',
'Yellow Labrador Retriever': 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg',
'Military Uniform (Grace Hopper)': 'https://storage.googleapis.com/download.tensorflow.org/example_images/grace_hopper.jpg',
'Military Uniform (General Ulysses S. Grant)': 'https://storage.googleapis.com/applied-dl/temp/General_Ulysses_S._Grant%2C_Union_Army_(6186252896).jpg',
'Military Uniform (Greek Presidential Guard)': 'https://storage.googleapis.com/applied-dl/temp/Greek_guard_uniforms_1.jpg',
}
# img_name_path {image_name: downloaded_image_local_path}
img_name_path = {name: tf.keras.utils.get_file(name, url) for (name, url) in img_name_url.items()}
# img_name_tensors {image_name: parsed_image_tensor}
img_name_tensors = {name: parse_image(img_path) for (name, img_path) in img_name_path.items()}
plt.figure(figsize=(14,14))
for n, (name, img_tensors) in enumerate(img_name_tensors.items()):
ax = plt.subplot(3,3,n+1)
ax.imshow(img_tensors)
ax.set_title(name)
ax.axis('off')
plt.tight_layout()
# stack images into a batch for processing.
image_titles = tf.convert_to_tensor(list(img_name_tensors.keys()))
image_batch = tf.convert_to_tensor(list(img_name_tensors.values()))
image_batch.shape
def top_k_predictions_scores_labels(model, img, label_vocab, top_k=3):
Args:
model(tf.keras.Model): Trained Keras model.
img(tf.Tensor): A 4D tensor of floats with the shape
(img_n, img_height, img_width, 3).
label_vocab(numpy.ndarray): An array of strings with shape (1001,).
top_k(int): Number of results to return.
Returns:
k_predictions_idx(tf.Tensor): A tf.Tensor [n_images, top_k] of tf.int32
prediction indicies.
k_predictions_proba(tf.Tensor): A tf.Tensor [n_images, top_k] of tf.float32
prediction probabilities.
k_predictions_label(tf.Tensor): A tf.Tensor [n_images, top_k] of tf.string
prediction labels.
# These are logits (unnormalized scores).
predictions = model(img)
# Convert logits into probabilities.
predictions_proba = tf.nn.softmax(predictions, axis=-1)
# Filter top k prediction probabilities and indices.
k_predictions_proba, k_predictions_idx = tf.math.top_k(
input=predictions_proba, k=top_k)
# Lookup top k prediction labels in label_vocab array.
k_predictions_label = tf.convert_to_tensor(
label_vocab[k_predictions_idx.numpy()],
dtype=tf.string)
return k_predictions_idx, k_predictions_label, k_predictions_proba
def plot_img_predictions(model, img, img_titles, label_vocab, top_k=3):
Plot images with top_k predictions.
Args:
model(tf.keras.Model): Trained Keras model.
img(Tensor): A 4D Tensor of floats with the shape
(img_n, img_height, img_width, 3).
img_titles(Tensor): A Tensor of strings with the shape
(img_n, img_height, img_width, 3).
label_vocab(numpy.ndarray): An array of strings with shape (1001,).
top_k(int): Number of results to return.
Returns:
fig(matplotlib.pyplot.figure): fig object to utilize for displaying, saving
plots.
pred_idx, pred_label, pred_proba = \
top_k_predictions_scores_labels(
model=model,
img=img,
label_vocab=label_vocab,
top_k=top_k)
img_arr = img.numpy()
title_arr = img_titles.numpy()
pred_idx_arr = pred_idx.numpy()
pred_label_arr = pred_label.numpy()
pred_proba_arr = pred_proba.numpy()
n_rows = img_arr.shape[0]
# Preserve image height by converting pixels to inches based on dpi.
size = n_rows * (224 // 48)
fig, axs = plt.subplots(nrows=img_arr.shape[0], ncols=1, figsize=(size, size), squeeze=False)
for idx, image in enumerate(img_arr):
axs[idx, 0].imshow(image)
axs[idx, 0].set_title(title_arr[idx].decode('utf-8'), fontweight='bold')
axs[idx, 0].axis('off')
for k in range(top_k):
k_idx = pred_idx_arr[idx][k]
k_label = pred_label_arr[idx][k].decode('utf-8')
k_proba = pred_proba_arr[idx][k]
if k==0:
s = 'Prediction {:}: ({:}-{:}) Score: {:.1%}'.format(k+1, k_idx, k_label, k_proba)
axs[idx, 0].text(244 + size, 102+(k*40), s, fontsize=12, fontweight='bold')
else:
s = 'Prediction {:}: ({:}-{:}) Score: {:.1%}'.format(k+1, k_idx, k_label, k_proba)
axs[idx, 0].text(244 + size, 102+(k*20), s, fontsize=12)
plt.tight_layout()
return fig
_ = plot_img_predictions(
model=inception_v1_classifier,
img=image_batch,
img_titles=image_titles,
label_vocab=imagenet_label_vocab,
top_k=5
)
# name_baseline_tensors. Set random seed for reproducibility of random baseline image and associated attributions.
tf.random.set_seed(42)
name_baseline_tensors = {
'Baseline Image: Black': tf.zeros(shape=(224,224,3)),
'Baseline Image: Random': tf.random.uniform(shape=(224,224,3), minval=0.0, maxval=1.0),
'Baseline Image: White': tf.ones(shape=(224,224,3)),
}
plt.figure(figsize=(12,12))
for n, (name, baseline_tensor) in enumerate(name_baseline_tensors.items()):
ax = plt.subplot(1,3,n+1)
ax.imshow(baseline_tensor)
ax.set_title(name)
ax.axis('off')
plt.tight_layout()
m_steps=20
alphas = tf.linspace(start=0.0, stop=1.0, num=m_steps+1)
def generate_path_inputs(baseline,
input,
alphas):
Generate m interpolated inputs between baseline and input features.
Args:
baseline(Tensor): A 3D image tensor of floats with the shape
(img_height, img_width, 3).
input(Tensor): A 3D image tensor of floats with the shape
(img_height, img_width, 3).
alphas(Tensor): A 1D tensor of uniformly spaced floats with the shape
(m_steps,).
Returns:
path_inputs(Tensor): A 4D tensor of floats with the shape
(m_steps, img_height, img_width, 3).
# Expand dimensions for vectorized computation of interpolations.
alphas_x = alphas[:, tf.newaxis, tf.newaxis, tf.newaxis]
baseline_x = tf.expand_dims(baseline, axis=0)
input_x = tf.expand_dims(input, axis=0)
delta = input_x - baseline_x
path_inputs = baseline_x + alphas_x * delta
return path_inputs
path_inputs = generate_path_inputs(
baseline=name_baseline_tensors['Baseline Image: Black'],
input=img_name_tensors['Giant Panda'],
alphas=alphas)
path_inputs.shape
fig, axs = plt.subplots(nrows=1, ncols=5, squeeze=False, figsize=(24, 24))
axs[0,0].set_title('Baseline \n alpha: {:.2f}'.format(alphas[0]))
axs[0,0].imshow(path_inputs[0])
axs[0,0].axis('off')
axs[0,1].set_title('=> Interpolated Image # 1 \n alpha: {:.2f}'.format(alphas[1]))
axs[0,1].imshow(path_inputs[1])
axs[0,1].axis('off')
axs[0,2].set_title('=> Interpolated Image # 2 \n alpha: {:.2f}'.format(alphas[2]))
axs[0,2].imshow(path_inputs[2])
axs[0,2].axis('off')
axs[0,3].set_title('... => Interpolated Image # 10 \n alpha: {:.2f}'.format(alphas[10]))
axs[0,3].imshow(path_inputs[10])
axs[0,3].axis('off')
axs[0,4].set_title('... => Input Image \n alpha: {:.2f}'.format(alphas[-1]))
axs[0,4].imshow(path_inputs[-1])
axs[0,4].axis('off')
plt.tight_layout();
def compute_gradients(model, path_inputs, target_class_idx):
Compute gradients of model predicted probabilties with respect to inputs.
Args:
mode(tf.keras.Model): Trained Keras model.
path_inputs(Tensor): A 4D tensor of floats with the shape
(m_steps, img_height, img_width, 3).
target_class_idx(Tensor): A 0D tensor of an int corresponding to the correct
ImageNet target class index.
Returns:
gradients(Tensor): A 4D tensor of floats with the shape
(m_steps, img_height, img_width, 3).
with tf.GradientTape() as tape:
tape.watch(path_inputs)
predictions = model(path_inputs)
# Note: IG requires softmax probabilities; converting Inception V1 logits.
outputs = tf.nn.softmax(predictions, axis=-1)[:, target_class_idx]
gradients = tape.gradient(outputs, path_inputs)
return gradients
path_gradients = compute_gradients(
model=inception_v1_classifier,
path_inputs=path_inputs,
target_class_idx=389)
fig, axs = plt.subplots(nrows=1, ncols=5, squeeze=False, figsize=(24, 24))
for i in range(5):
axs[0,i].imshow(tf.cast(255 * path_gradients[i], tf.uint8), cmap=plt.cm.inferno)
axs[0,i].axis('off')
plt.tight_layout()
pred = inception_v1_classifier(path_inputs)
pred_proba = tf.nn.softmax(pred, axis=-1)[:, 389]
plt.figure(figsize=(10,4))
ax1 = plt.subplot(1,2,1)
ax1.plot(alphas, pred_proba)
ax1.set_title('Target class predicted probability over alpha')
ax1.set_ylabel('model p(target class)')
ax1.set_xlabel('alpha')
ax1.set_ylim([0,1])
ax2 = plt.subplot(1,2,2)
# Average across interpolation steps
average_grads = tf.math.reduce_mean(path_gradients, axis=[1,2,3])
# Normalize average gradients to 0 to 1 scale. E.g. (x - min(x))/(max(x)-min(x))
average_grads_norm = (average_grads-tf.math.reduce_min(average_grads))/(tf.math.reduce_max(average_grads)-tf.reduce_min(average_grads))
ax2.plot(alphas, average_grads_norm)
ax2.set_title('Average pixel gradients (normalized) over alpha')
ax2.set_ylabel('Average pixel gradients')
ax2.set_xlabel('alpha')
ax2.set_ylim([0,1]);
def plot_riemann_sums(fn, start_val, end_val, m_steps=10):
Plot Riemann Sum integral approximations for single variable functions.
Args:
fn(function): Any single variable function.
start_val(int): Minimum function value constraint.
end_val(int): Maximum function value constraint.
m_steps(int): Linear interpolation steps for approximation.
Returns:
fig(matplotlib.pyplot.figure): fig object to utilize for displaying, saving
plots.
# fn plot args
x = tf.linspace(start_val, end_val, m_steps**2+1)
y = fn(x)
fig = plt.figure(figsize=(16,4))
# Left Riemann Sum
lr_ax = plt.subplot(1,4,1)
lr_ax.plot(x, y)
lr_x = tf.linspace(0.0, 1.0, m_steps+1)
lr_point = lr_x[:-1]
lr_height = fn(lr_x[:-1])
lr_ax.plot(lr_point, lr_height, 'b.', markersize=10)
lr_ax.bar(lr_point, lr_height, width=(end_val-start_val)/m_steps, alpha=0.2, align='edge', edgecolor='b')
lr_ax.set_title('Left Riemann Sum \n m_steps = {}'.format(m_steps))
lr_ax.set_xlabel('alpha')
# Right Riemann Sum
rr_ax = plt.subplot(1,4,2)
rr_ax.plot(x, y)
rr_x = tf.linspace(0.0, 1.0, m_steps+1)
rr_point = rr_x[1:]
rr_height = fn(rr_x[1:])
rr_ax.plot(rr_point, rr_height, 'b.', markersize=10)
rr_ax.bar(rr_point, rr_height, width=-(end_val-start_val)/m_steps, alpha=0.2, align='edge', edgecolor='b')
rr_ax.set_title('Right Riemann Sum \n m_steps = {}'.format(m_steps))
rr_ax.set_xlabel('alpha')
# Midpoint Riemann Sum
mr_ax = plt.subplot(1,4,3)
mr_ax.plot(x, y)
mr_x = tf.linspace(0.0, 1.0, m_steps+1)
mr_point = (mr_x[:-1] + mr_x[1:])/2
mr_height = fn(mr_point)
mr_ax.plot(mr_point, mr_height, 'b.', markersize=10)
mr_ax.bar(mr_point, mr_height, width=(end_val-start_val)/m_steps, alpha=0.2, edgecolor='b')
mr_ax.set_title('Midpoint Riemann Sum \n m_steps = {}'.format(m_steps))
mr_ax.set_xlabel('alpha')
# Trapezoidal Riemann Sum
tp_ax = plt.subplot(1,4,4)
tp_ax.plot(x, y)
tp_x = tf.linspace(0.0, 1.0, m_steps+1)
tp_y = fn(tp_x)
for i in range(m_steps):
xs = [tp_x[i], tp_x[i], tp_x[i+1], tp_x[i+1]]
ys = [0, tp_y[i], tp_y[i+1], 0]
tp_ax.plot(tp_x,tp_y,'b.',markersize=10)
tp_ax.fill_between(xs, ys, color='C0', edgecolor='blue', alpha=0.2)
tp_ax.set_title('Trapezoidal Riemann Sum \n m_steps = {}'.format(m_steps))
tp_ax.set_xlabel('alpha')
return fig
_ = plot_riemann_sums(lambda x: tf.math.sin(x*math.pi), 0.0, 1.0, m_steps=5)
_ = plot_riemann_sums(lambda x: tf.math.sin(x*math.pi), 0.0, 1.0, m_steps=10)
def generate_alphas(m_steps=50,
method='riemann_trapezoidal'):
Args:
m_steps(Tensor): A 0D tensor of an int corresponding to the number of linear
interpolation steps for computing an approximate integral. Default is 50.
method(str): A string representing the integral approximation method. The
following methods are implemented:
- riemann_trapezoidal(default)
- riemann_left
- riemann_midpoint
- riemann_right
Returns:
alphas(Tensor): A 1D tensor of uniformly spaced floats with the shape
(m_steps,).
m_steps_float = tf.cast(m_steps, float) # cast to float for division operations.
if method == 'riemann_trapezoidal':
alphas = tf.linspace(0.0, 1.0, m_steps+1) # needed to make m_steps intervals.
elif method == 'riemann_left':
alphas = tf.linspace(0.0, 1.0 - (1.0 / m_steps_float), m_steps)
elif method == 'riemann_midpoint':
alphas = tf.linspace(1.0 / (2.0 * m_steps_float), 1.0 - 1.0 / (2.0 * m_steps_float), m_steps)
elif method == 'riemann_right':
alphas = tf.linspace(1.0 / m_steps_float, 1.0, m_steps)
else:
raise AssertionError("Provided Riemann approximation method is not valid.")
return alphas
alphas = generate_alphas(m_steps=20, method='riemann_trapezoidal')
alphas.shape
def integral_approximation(gradients,
method='riemann_trapezoidal'):
Compute numerical approximation of integral from gradients.
Args:
gradients(Tensor): A 4D tensor of floats with the shape
(m_steps, img_height, img_width, 3).
method(str): A string representing the integral approximation method. The
following methods are implemented:
- riemann_trapezoidal(default)
- riemann_left
- riemann_midpoint
- riemann_right
Returns:
integrated_gradients(Tensor): A 3D tensor of floats with the shape
(img_height, img_width, 3).
if method == 'riemann_trapezoidal':
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
elif method == 'riemann_left':
grads = gradients
elif method == 'riemann_midpoint':
grads = gradients
elif method == 'riemann_right':
grads = gradients
else:
raise AssertionError("Provided Riemann approximation method is not valid.")
# Average integration approximation.
integrated_gradients = tf.math.reduce_mean(grads, axis=0)
return integrated_gradients
ig = integral_approximation(
gradients=path_gradients,
method='riemann_trapezoidal')
ig.shape
@tf.function
def integrated_gradients(model,
baseline,
input,
target_class_idx,
m_steps=50,
method='riemann_trapezoidal',
batch_size=32
):
Args:
model(keras.Model): A trained model to generate predictions and inspect.
baseline(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3) with the same shape as the input tensor.
input(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3).
target_class_idx(Tensor): An integer that corresponds to the correct
ImageNet class index in the model's output predictions tensor. Default
value is 50 steps.
m_steps(Tensor): A 0D tensor of an integer corresponding to the number of
linear interpolation steps for computing an approximate integral.
method(str): A string representing the integral approximation method. The
following methods are implemented:
- riemann_trapezoidal(default)
- riemann_left
- riemann_midpoint
- riemann_right
batch_size(Tensor): A 0D tensor of an integer corresponding to a batch
size for alpha to scale computation and prevent OOM errors. Note: needs to
be tf.int64 and shoud be < m_steps. Default value is 32.
Returns:
integrated_gradients(Tensor): A 3D tensor of floats with the same
shape as the input tensor (image_height, image_width, 3).
# 1. Generate alphas.
alphas = generate_alphas(m_steps=m_steps,
method=method)
# Initialize TensorArray outside loop to collect gradients. Note: this data structure
# is similar to a Python list but more performant and supports backpropogation.
# See https://www.tensorflow.org/api_docs/python/tf/TensorArray for additional details.
gradient_batches = tf.TensorArray(tf.float32, size=m_steps+1)
# Iterate alphas range and batch computation for speed, memory efficiency, and scaling to larger m_steps.
# Note: this implementation opted for lightweight tf.range iteration with @tf.function.
# Alternatively, you could also use tf.data, which adds performance overhead for the IG
# algorithm but provides more functionality for working with tensors and image data pipelines.
for alpha in tf.range(0, len(alphas), batch_size):
from_ = alpha
to = tf.minimum(from_ + batch_size, len(alphas))
alpha_batch = alphas[from_:to]
# 2. Generate interpolated inputs between baseline and input.
interpolated_path_input_batch = generate_path_inputs(baseline=baseline,
input=input,
alphas=alpha_batch)
# 3. Compute gradients between model outputs and interpolated inputs.
gradient_batch = compute_gradients(model=model,
path_inputs=interpolated_path_input_batch,
target_class_idx=target_class_idx)
# Write batch indices and gradients to TensorArray. Note: writing batch indices with
# scatter() allows for uneven batch sizes. Note: this operation is similar to a Python list extend().
# See https://www.tensorflow.org/api_docs/python/tf/TensorArray#scatter for additional details.
gradient_batches = gradient_batches.scatter(tf.range(from_, to), gradient_batch)
# Stack path gradients together row-wise into single tensor.
total_gradients = gradient_batches.stack()
# 4. Integral approximation through averaging gradients.
avg_gradients = integral_approximation(gradients=total_gradients,
method=method)
# 5. Scale integrated gradients with respect to input.
integrated_gradients = (input - baseline) * avg_gradients
return integrated_gradients
ig_attributions = integrated_gradients(model=inception_v1_classifier,
baseline=name_baseline_tensors['Baseline Image: Black'],
input=img_name_tensors['Giant Panda'],
target_class_idx=389,
m_steps=55,
method='riemann_trapezoidal')
ig_attributions.shape
def convergence_check(model, attributions, baseline, input, target_class_idx):
Args:
model(keras.Model): A trained model to generate predictions and inspect.
baseline(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3) with the same shape as the input tensor.
input(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3).
target_class_idx(Tensor): An integer that corresponds to the correct
ImageNet class index in the model's output predictions tensor. Default
value is 50 steps.
Returns:
(none): Prints scores and convergence delta to sys.stdout.
# Your model's prediction on the baseline tensor. Ideally, the baseline score
# should be close to zero.
baseline_prediction = model(tf.expand_dims(baseline, 0))
baseline_score = tf.nn.softmax(tf.squeeze(baseline_prediction))[target_class_idx]
# Your model's prediction and score on the input tensor.
input_prediction = model(tf.expand_dims(input, 0))
input_score = tf.nn.softmax(tf.squeeze(input_prediction))[target_class_idx]
# Sum of your IG prediction attributions.
ig_score = tf.math.reduce_sum(attributions)
delta = ig_score - (input_score - baseline_score)
try:
# Test your IG score is <= 5% of the input minus baseline score.
tf.debugging.assert_near(ig_score, (input_score - baseline_score), rtol=0.05)
tf.print('Approximation accuracy within 5%.', output_stream=sys.stdout)
except tf.errors.InvalidArgumentError:
tf.print('Increase or decrease m_steps to increase approximation accuracy.', output_stream=sys.stdout)
tf.print('Baseline score: {:.3f}'.format(baseline_score))
tf.print('Input score: {:.3f}'.format(input_score))
tf.print('IG score: {:.3f}'.format(ig_score))
tf.print('Convergence delta: {:.3f}'.format(delta))
convergence_check(model=inception_v1_classifier,
attributions=ig_attributions,
baseline=name_baseline_tensors['Baseline Image: Black'],
input=img_name_tensors['Giant Panda'],
target_class_idx=389)
def plot_img_attributions(model,
baseline,
img,
target_class_idx,
m_steps=50,
cmap=None,
overlay_alpha=0.4):
Args:
model(keras.Model): A trained model to generate predictions and inspect.
baseline(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3) with the same shape as the input tensor.
img(Tensor): A 3D image tensor with the shape
(image_height, image_width, 3).
target_class_idx(Tensor): An integer that corresponds to the correct
ImageNet class index in the model's output predictions tensor. Default
value is 50 steps.
m_steps(Tensor): A 0D tensor of an integer corresponding to the number of
linear interpolation steps for computing an approximate integral.
cmap(matplotlib.cm): Defaults to None. Reference for colormap options -
https://matplotlib.org/3.2.1/tutorials/colors/colormaps.html. Interesting
options to try are None and high contrast 'inferno'.
overlay_alpha(float): A float between 0 and 1 that represents the intensity
of the original image overlay.
Returns:
fig(matplotlib.pyplot.figure): fig object to utilize for displaying, saving
plots.
# Attributions
ig_attributions = integrated_gradients(model=model,
baseline=baseline,
input=img,
target_class_idx=target_class_idx,
m_steps=m_steps)
convergence_check(model, ig_attributions, baseline, img, target_class_idx)
# Per the original paper, take the absolute sum of the attributions across
# color channels for visualization. The attribution mask shape is a greyscale image
# with shape (224, 224).
attribution_mask = tf.reduce_sum(tf.math.abs(ig_attributions), axis=-1)
# Visualization
fig, axs = plt.subplots(nrows=2, ncols=2, squeeze=False, figsize=(8, 8))
axs[0,0].set_title('Baseline Image')
axs[0,0].imshow(baseline)
axs[0,0].axis('off')
axs[0,1].set_title('Original Image')
axs[0,1].imshow(img)
axs[0,1].axis('off')
axs[1,0].set_title('IG Attribution Mask')
axs[1,0].imshow(attribution_mask, cmap=cmap)
axs[1,0].axis('off')
axs[1,1].set_title('Original + IG Attribution Mask Overlay')
axs[1,1].imshow(attribution_mask, cmap=cmap)
axs[1,1].imshow(img, alpha=overlay_alpha)
axs[1,1].axis('off')
plt.tight_layout()
return fig
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Fireboat'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=555,
m_steps=240,
cmap=plt.cm.inferno,
overlay_alpha=0.4)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['School Bus'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=780,
m_steps=100,
cmap=None,
overlay_alpha=0.2)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Giant Panda'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=389,
m_steps=55,
cmap=None,
overlay_alpha=0.5)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Fireboat'],
baseline=name_baseline_tensors['Baseline Image: Random'],
target_class_idx=555,
m_steps=240,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['School Bus'],
baseline=name_baseline_tensors['Baseline Image: Random'],
target_class_idx=780,
m_steps=100,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Black Beetle'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=307,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Black Beetle'],
baseline=name_baseline_tensors['Baseline Image: White'],
target_class_idx=307,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_predictions(
model=inception_v1_classifier,
img=tf.stack([img_name_tensors['Golden Retriever'],
img_name_tensors['Yellow Labrador Retriever']]),
img_titles=tf.stack(['Golden Retriever',
'Yellow Labrador Retriever']),
label_vocab=imagenet_label_vocab,
top_k=5
)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Golden Retriever'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=208,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Yellow Labrador Retriever'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=209,
m_steps=100,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_predictions(
model=inception_v1_classifier,
img=tf.stack([img_name_tensors['Military Uniform (Grace Hopper)'],
img_name_tensors['Military Uniform (General Ulysses S. Grant)'],
img_name_tensors['Military Uniform (Greek Presidential Guard)']]),
img_titles=tf.stack(['Military Uniform (Grace Hopper)',
'Military Uniform (General Ulysses S. Grant)',
'Military Uniform (Greek Presidential Guard)']),
label_vocab=imagenet_label_vocab,
top_k=5
)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Military Uniform (Grace Hopper)'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=653,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Military Uniform (General Ulysses S. Grant)'],
baseline=name_baseline_tensors['Baseline Image: White'],
target_class_idx=870,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
_ = plot_img_attributions(model=inception_v1_classifier,
img=img_name_tensors['Military Uniform (Greek Presidential Guard)'],
baseline=name_baseline_tensors['Baseline Image: Black'],
target_class_idx=653,
m_steps=200,
cmap=None,
overlay_alpha=0.3)
rotate90_labrador_retriever_img = tf.image.rot90(img_name_tensors['Yellow Labrador Retriever'])
upsidedown_labrador_retriever_img = tf.image.flip_up_down(img_name_tensors['Yellow Labrador Retriever'])
zoom_labrador_retriever_img = tf.keras.preprocessing.image.random_zoom(x=img_name_tensors['Yellow Labrador Retriever'], zoom_range=(0.45,0.45))
_ = plot_img_predictions(
model=inception_v1_classifier,
img=tf.stack([img_name_tensors['Yellow Labrador Retriever'],
rotate90_labrador_retriever_img,
upsidedown_labrador_retriever_img,
zoom_labrador_retriever_img]),
img_titles=tf.stack(['Yellow Labrador Retriever (original)',
'Yellow Labrador Retriever (rotated 90 degrees)',
'Yellow Labrador Retriever (flipped upsidedown)',
'Yellow Labrador Retriever (zoomed in)']),
label_vocab=imagenet_label_vocab,
top_k=5
)
labrador_retriever_attributions = integrated_gradients(model=inception_v1_classifier,
baseline=name_baseline_tensors['Baseline Image: Black'],
input=img_name_tensors['Yellow Labrador Retriever'],
target_class_idx=209,
m_steps=200,
method='riemann_trapezoidal')
zoom_labrador_retriever_attributions = integrated_gradients(model=inception_v1_classifier,
baseline=name_baseline_tensors['Baseline Image: Black'],
input=zoom_labrador_retriever_img,
target_class_idx=209,
m_steps=200,
method='riemann_trapezoidal')
fig, axs = plt.subplots(nrows=1, ncols=3, squeeze=False, figsize=(16, 12))
axs[0,0].set_title('IG Attributions - Incorrect Prediction: Saluki')
axs[0,0].imshow(tf.reduce_sum(tf.abs(zoom_labrador_retriever_attributions), axis=-1), cmap=plt.cm.inferno)
axs[0,0].axis('off')
axs[0,1].set_title('IG Attributions - Correct Prediction: Labrador Retriever')
axs[0,1].imshow(tf.reduce_sum(tf.abs(labrador_retriever_attributions), axis=-1), cmap=None)
axs[0,1].axis('off')
axs[0,2].set_title('IG Attributions - both predictions overlayed')
axs[0,2].imshow(tf.reduce_sum(tf.abs(zoom_labrador_retriever_attributions), axis=-1), cmap=plt.cm.inferno, alpha=0.99)
axs[0,2].imshow(tf.reduce_sum(tf.abs(labrador_retriever_attributions), axis=-1), cmap=None, alpha=0.5)
axs[0,2].axis('off')
plt.tight_layout();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introducing TinyImageNet
Step2: TinyImageNet-100-A classes
Step3: Visualize Examples
Step4: Pretrained model
Step5: Pretrained model performance
Step7: Saliency Maps
Step8: Once you have completed the implementation in the cell above, run the following to visualize some class saliency maps on the validation set of TinyImageNet-100-A.
Step10: Fooling Images
Step11: Run the following to choose a random validation set image that is correctly classified by the network, and then make a fooling image.
|
<ASSISTANT_TASK:>
Python Code:
# As usual, a bit of setup
import time, os, json
import numpy as np
import skimage.io
import matplotlib.pyplot as plt
from cs231n.classifiers.pretrained_cnn import PretrainedCNN
from cs231n.data_utils import load_tiny_imagenet
from cs231n.image_utils import blur_image, deprocess_image
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True)
for i, names in enumerate(data['class_names']):
print i, ' '.join('"%s"' % name for name in names)
# Visualize some examples of the training data
classes_to_show = 7
examples_per_class = 5
class_idxs = np.random.choice(len(data['class_names']), size=classes_to_show, replace=False)
for i, class_idx in enumerate(class_idxs):
train_idxs, = np.nonzero(data['y_train'] == class_idx)
train_idxs = np.random.choice(train_idxs, size=examples_per_class, replace=False)
for j, train_idx in enumerate(train_idxs):
img = deprocess_image(data['X_train'][train_idx], data['mean_image'])
plt.subplot(examples_per_class, classes_to_show, 1 + i + classes_to_show * j)
if j == 0:
plt.title(data['class_names'][class_idx][0])
plt.imshow(img)
plt.gca().axis('off')
plt.show()
model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5')
batch_size = 100
# Test the model on training data
mask = np.random.randint(data['X_train'].shape[0], size=batch_size)
X, y = data['X_train'][mask], data['y_train'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Training accuracy: ', (y_pred == y).mean()
# Test the model on validation data
mask = np.random.randint(data['X_val'].shape[0], size=batch_size)
X, y = data['X_val'][mask], data['y_val'][mask]
y_pred = model.loss(X).argmax(axis=1)
print 'Validation accuracy: ', (y_pred == y).mean()
def compute_saliency_maps(X, y, model):
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images, of shape (N, 3, H, W)
- y: Labels for X, of shape (N,)
- model: A PretrainedCNN that will be used to compute the saliency map.
Returns:
- saliency: An array of shape (N, H, W) giving the saliency maps for the input
images.
saliency = None
##############################################################################
# TODO: Implement this function. You should use the forward and backward #
# methods of the PretrainedCNN class, and compute gradients with respect to #
# the unnormalized class score of the ground-truth classes in y. #
##############################################################################
N, _, H, W = X.shape
saliency = np.zeros([N, H, W])
scores, cache = model.forward(X)
dscores = np.zeros(scores.shape)
dscores[:, y] = 1
dX, grads = model.backward(dscores, cache)
saliency = np.max(np.abs(dX), axis=1)
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
def show_saliency_maps(mask):
mask = np.asarray(mask)
X = data['X_val'][mask]
y = data['y_val'][mask]
saliency = compute_saliency_maps(X, y, model)
for i in xrange(mask.size):
plt.subplot(2, mask.size, i + 1)
plt.imshow(deprocess_image(X[i], data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[i]][0])
plt.subplot(2, mask.size, mask.size + i + 1)
plt.title(mask[i])
plt.imshow(saliency[i])
plt.axis('off')
plt.gcf().set_size_inches(10, 4)
plt.show()
# Show some random images
mask = np.random.randint(data['X_val'].shape[0], size=5)
show_saliency_maps(mask)
# These are some cherry-picked images that should give good results
show_saliency_maps([128, 3225, 2417, 1640, 4619])
def make_fooling_image(X, target_y, model):
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image, of shape (1, 3, 64, 64)
- target_y: An integer in the range [0, 100)
- model: A PretrainedCNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
X_fooling = X.copy()
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. Use gradient ascent on the target class score, using #
# the model.forward method to compute scores and the model.backward method #
# to compute image gradients. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
##############################################################################
it = 1
y_pred = -1
lr = 200
while it < 100 and y_pred != target_y:
score, cache = model.forward(X_fooling)
y_pred = np.argmax(score[0])
if it % 10 == 0:
print 'Iter:', it, ', Predicted class:', ' '.join('"%s"' % name for name in data['class_names'][y_pred])
dscore = np.zeros(score.shape)
dscore[:, target_y] = 1
dX, grads = model.backward(dscore, cache)
X_fooling += lr * dX
it += 1
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
# Find a correctly classified validation image
while True:
i = np.random.randint(data['X_val'].shape[0])
X = data['X_val'][i:i+1]
y = data['y_val'][i:i+1]
y_pred = model.loss(X)[0].argmax()
if y_pred == y: break
target_y = 67
X_fooling = make_fooling_image(X, target_y, model)
# Make sure that X_fooling is classified as y_target
scores = model.loss(X_fooling)
assert scores[0].argmax() == target_y, 'The network is not fooled!'
# Show original image, fooling image, and difference
plt.subplot(1, 3, 1)
plt.imshow(deprocess_image(X, data['mean_image']))
plt.axis('off')
plt.title(data['class_names'][y[0]][0])
plt.subplot(1, 3, 2)
plt.imshow(deprocess_image(X_fooling, data['mean_image'], renorm=True))
plt.title(data['class_names'][target_y][0])
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Difference')
plt.imshow(deprocess_image(X - X_fooling, data['mean_image']))
plt.axis('off')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ะัะพะฒะตัะธะผ ะฒะตัะฝะพ ะปะธ ะฒััะธัะปัะตััั ะณัะฐะดะธะตะฝั
Step2: ะัะฑะพั ะฝะฐัะฐะปัะฝะพะณะพ ะฟัะธะฑะปะธะถะตะฝะธั ะดะพะฟัััะธะผะพะณะพ ะฟะพ ะพะณัะฐะฝะธัะตะฝะธัะผ ะธ ะพะฑะปะฐััะธ ะพะฟัะตะดะตะปะตะฝะธั ัะตะปะตะฒะพะน ััะฝะบัะธะธ
Step3: ะัะพะฒะตัะธะผ ัั
ะพะดะธะผะพััั
Step4: ะ ะตะฐะปะธะทะฐัะธั ะฑะฐััะตัะฝะพะณะพ ะผะตัะพะดะฐ
Step5: ะกัะฐะฒะฝะตะฝะธะต ะฒัะตะผะตะฝะธ ัะฐะฑะพัั
Step6: ะะพะผะผะตะฝัะฐัะธะธ
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import scipy.optimize as scopt
import scipy.linalg as sclin
USE_COLAB = False
if not USE_COLAB:
plt.rc("text", usetex=True)
def NewtonLinConstraintsFeasible(f, gradf, hessf, A, x0, line_search, linsys_solver, args=(),
disp=False, disp_conv=False, callback=None, tol=1e-6, max_iter=100, **kwargs):
x = x0.copy()
n = x0.shape[0]
iteration = 0
lam = np.random.randn(A.shape[0])
while True:
gradient, hess = gradf(x, *args), hessf(x, *args)
h = linsys_solver(hess, A, gradient)
descent_dir = h[:n]
decrement = descent_dir.dot(hessf(x, *args).dot(descent_dir))
if decrement < tol:
if disp_conv:
print("Tolerance achieved! Decrement = {}".format(decrement))
break
alpha = line_search(x, descent_dir, f, gradf, args, **kwargs)
if alpha < 1e-16:
if disp_conv:
print("Step is too small!")
x = x + alpha * descent_dir
if callback is not None:
callback((descent_dir, x))
iteration += 1
if disp:
print("Current function val = {}".format(f(x, *args)))
print("Newton decrement = {}".format(decrement))
if iteration >= max_iter:
if disp_conv:
print("Maxiter exceeds!")
break
res = {"x": x, "num_iter": iteration, "tol": decrement}
return res
def simple_solver(hess, A, gradient):
n = hess.shape[0]
n_lin_row, n_lin_col = A.shape
modified_hess = np.zeros((n + n_lin_row, n + n_lin_row))
modified_hess[:n, :n] = hess
modified_hess[n:n + n_lin_row, :n_lin_col] = A
modified_hess[:n_lin_col, n:n + n_lin_row] = A.T
rhs = np.zeros(n + n_lin_row)
rhs[:n] = -gradient
h = np.linalg.solve(modified_hess, rhs)
return h
def elimination_solver(hess, A, gradient):
inv_hess_diag = np.divide(1.0, np.diag(hess))
inv_hess_grad = np.multiply(-inv_hess_diag, gradient)
rhs = A.dot(inv_hess_grad)
L_inv_hess = np.sqrt(inv_hess_diag)
AL_inv_hess = A * L_inv_hess
# print(AL_inv_hess.shape)
S = AL_inv_hess.dot(AL_inv_hess.T)
cho_S = sclin.cho_factor(S)
w = sclin.cho_solve(cho_S, rhs)
# w = np.linalg.solve(S, rhs)
v = np.subtract(inv_hess_grad, np.multiply(inv_hess_diag, A.T.dot(w)))
# h = np.zeros(hess.shape[1] + A.shape[0])
# h[:hess.shape[1]] = v
# h[hess.shape[1]:hess.shape[1] + A.shape[0]] = w
return v
def backtracking(x, descent_dir, f, grad_f, args, **kwargs):
beta1 = kwargs["beta1"]
rho = kwargs["rho"]
alpha = 1
while f(x + alpha * descent_dir, *args) >= f(x, *args) + beta1 * alpha * grad_f(x, *args).dot(descent_dir) \
or np.isnan(f(x + alpha * descent_dir, *args)):
alpha *= rho
if alpha < 1e-16:
break
return alpha
def generate_KleeMinty_test_problem(n):
c = np.array([2**i for i in range(n)])
c = -c[::-1]
bounds = [(0, None) for i in range(n)]
b = np.array([5**(i+1) for i in range(n)])
a = np.array([1] + [2**(i+1) for i in range(1, n)])
A = np.zeros((n, n))
for i in range(n):
A[i:, i] = a[:n-i]
return c, A, b, bounds
n = 7
c, A, b, _ = generate_KleeMinty_test_problem(n)
eps = 1e-10
def f(x, c, mu):
n = c.shape[0]
return c.dot(x[:n]) - mu * np.sum(np.log(eps + x))
def gradf(x, c, mu):
grad = np.zeros(len(x))
n = c.shape[0]
grad[:n] = c - mu / (eps + x[:n])
grad[n:] = -mu / (eps + x[n:])
return grad
def hessf(x, c, mu):
return mu * np.diag(1. / (eps + x)**2)
A_lin = np.zeros((n, n + A.shape[0]))
A_lin[:n, :n] = A
A_lin[:n, n:n + A.shape[0]] = np.eye(A.shape[0])
mu = 0.1
scopt.check_grad(f, gradf, np.random.rand(n), c, mu)
x0 = np.zeros(2*n)
x0[:n] = np.random.rand(n)
x0[n:2*n] = b - A.dot(x0[:n])
print(np.linalg.norm(A_lin.dot(x0) - b))
print(np.sum(x0 <= 1e-6))
hist_conv = []
def cl(x):
hist_conv.append(x)
res = NewtonLinConstraintsFeasible(f, gradf, hessf, A_lin, x0, backtracking, elimination_solver, (c, mu), callback=cl,
max_iter=2000, beta1=0.1, rho=0.7)
print("Decrement value = {}".format(res["tol"]))
fstar = f(res["x"], c, mu)
hist_conv_f = [np.abs(fstar - f(descdir_x[1], c, mu)) for descdir_x in hist_conv]
plt.figure(figsize=(12, 5))
plt.subplot(1,2,1)
plt.semilogy(hist_conv_f)
plt.xlabel("Number of iteration, $k$", fontsize=18)
plt.ylabel("$f^* - f_k$", fontsize=18)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
hist_conv_x = [np.linalg.norm(res["x"] - x[1]) for x in hist_conv]
plt.subplot(1,2,2)
plt.semilogy(hist_conv_x)
plt.xlabel("Number of iteration, $k$", fontsize=18)
plt.ylabel("$\| x_k - x^*\|_2$", fontsize=18)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
plt.tight_layout()
def BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, linesearch, linsys_solver,
tol=1e-8, max_iter=500, disp_conv=False, **kwargs):
x = x0.copy()
n = x0.shape[0]
mu = mu0
while True:
res = NewtonLinConstraintsFeasible(f, gradf, hessf, A, x, linesearch, linsys_solver, (c, mu),
disp_conv=disp_conv, max_iter=max_iter, beta1=0.01, rho=0.5)
x = res["x"].copy()
if n * mu < tol:
break
mu *= rho_mu
return x
mu0 = 5
rho_mu = 0.5
x = BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, elimination_solver, max_iter=100)
%timeit BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, elimination_solver, max_iter=100)
%timeit BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, simple_solver, max_iter=100)
print(x[:n])
mu0 = 2
rho_mu = 0.5
n_list = range(3, 10)
n_iters = np.zeros(len(n_list))
times_simplex = np.zeros(len(n_list))
times_barrier_simple = np.zeros(len(n_list))
for i, n in enumerate(n_list):
print("Current dimension = {}".format(n))
c, A, b, bounds = generate_KleeMinty_test_problem(n)
res = scopt.linprog(c, A, b, bounds=bounds, options={"maxiter": 2**max(n_list) + 1})
time = %timeit -o -q scopt.linprog(c, A, b, bounds=bounds, options={"maxiter": 2**max(n_list) + 1})
times_simplex[i] = time.best
A_lin = np.zeros((n, n + A.shape[0]))
A_lin[:n, :n] = A
A_lin[:n, n:n + A.shape[0]] = np.eye(A.shape[0])
x0 = np.zeros(2*n)
x0[:n] = np.random.rand(n)
x0[n:2*n] = b - A.dot(x0[:n])
time = %timeit -o -q BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, simple_solver)
times_barrier_simple[i] = time.best
plt.figure(figsize=(8, 5))
plt.semilogy(n_list, times_simplex, label="Simplex")
plt.semilogy(n_list, times_barrier_simple, label="Primal barrier")
plt.legend(fontsize=18)
plt.xlabel("Dimension, $n$", fontsize=18)
plt.ylabel("Computation time, sec.", fontsize=18)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
def generate_linprog_problem(n, m=10):
x = np.random.rand(n)
A = np.random.randn(m, n)
b = A.dot(x)
c = np.random.randn(n)
c[c < 0] = 1
return c, A, b, x
n = 20
c, A, b, x0 = generate_linprog_problem(n)
res = scopt.linprog(c, A_eq=A, b_eq=b, bounds=[(0, None) for i in range(n)])
m = A.shape[0]
# x0 = np.random.rand(n)
# resid = b - A[:, m:].dot(x0[m:])
# x0[:m] = np.linalg.solve(A[:m, :m], resid)
print(np.linalg.norm(A.dot(x0) - b))
mu0 = 1
rho_mu = 0.5
x_bar_simple = BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, backtracking,
simple_solver, max_iter=100, tol=1e-8)
x_bar_elim = BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, backtracking,
elimination_solver, tol=1e-8)
print(np.linalg.norm(res["x"] - x_bar_simple))
print(np.linalg.norm(res["x"] - x_bar_elim))
print(c.dot(res["x"]))
print(c.dot(x_bar_elim))
n_list = [10*i for i in range(2, 63, 10)]
times_simple = np.zeros(len(n_list))
times_elim = np.zeros(len(n_list))
mu0 = 5
rho_mu = 0.5
for i, n in enumerate(n_list):
print("Current dimension = {}".format(n))
c, A, b, x0 = generate_linprog_problem(n)
time = %timeit -o BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, backtracking, simple_solver)
times_simple[i] = time.average
time = %timeit -o BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, backtracking, elimination_solver)
times_elim[i] = time.average
plt.semilogy(n_list, times_elim, label="Elimination solver")
plt.semilogy(n_list, times_simple, label="Simple solver")
# plt.semilogy(n_list, 10**(-6)*np.array(n_list)**3 / 3)
plt.legend(fontsize=18)
plt.xlabel("Dimension, $n$", fontsize=18)
plt.ylabel("Computation time, sec.", fontsize=18)
plt.xticks(fontsize=18)
_ = plt.yticks(fontsize=18)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What this function does is to get the relevant inputs which are
Step2: I can use the same function to also compute the fluid bulk modulus log which is usually done via Reuss average (the lower bound k_l in the vrh function above)
Step3: Then I calculate the original (insitu) fluid density rho_fl and bulk modulus k_fl, and the average mineral bulk modulus k0
Step4: ...and put it all together using the frm function defined above
Step5: Now I create 3 sets of copies of the original elastic logs stored in my DataFrame logs (logs.VP, logs.VSB, logs.RHO) for the three fluid scenarios investigated (and I will append an appropriate suffix to identify these 3 cases, i.e. _FRMB for brine, _FRMG for gas and _FRMO for oil, respectively). These three sets will be placeholders to store the values of the actual fluid-replaced logs (vpb, vsb, rhob, etc.).
Step6: The syntax I use to do this is
Step7: Finally, I will add three more LFC logs that will be companions to the new fluid-replaced logs.
Step8: And this is the same summary plot that I have used above, updated to show the fluid changes in the elastic logs Ip and Vp/Vs. It is also zoomed into the reservoir between 2150 and 2200 m, and the LFC log is the original one, i.e. it reflects the insitu case.
Step9: Let's have a look at the results in the Ip versus Vp/Vs crossplot domain; I will now plot 4 different plots to compare the initial situation to the results of the 4 fluid replacements
Step10: statistical analysis
Step11: What I have done here is to first define 3 lists containing the names of the logs we want to extract (lines 1-4). Then I extract into 4 separate temporary DataFrames (lines 5-8) different sets of logs, e.g. ww0 will contain only the logs LFC,IP,VPVS, and ww1 will hold only LFC_B,IP_FRMB, VPVS_FRMB. I will also rename the fluid-replaced logs to have the same name as my insitu logs using ww1.columns=[lognames0]. In this way, when I merge all these 3 DataFrame together (line 9) I will have created a megalog (ww) that includes all values of Ip and Vp/Vs that are both measured for a certain facies, and synthetically created through fluid substitution.
Step12: With the code above I simply build the headers for a Pandas DataFrame to store mean and covariances for each class.
Step13: This is how the stat DataFrame looks like now
Step14: So it's like an empty box, made of four rows (because we have 4 classes
Step15: Now let's look back at stat and see how it has been filled up with all the information I need
Step16: I can also interrogate stat to know for example the average Ip for the litho-fluid class 2 (brine sands)
Step17: Obviously I need to remember that the first property is Ip, so that's why I am querying the column mean0 (mean1 holds the average values for the second property, in this case Vp/Vs).
Step18: creation of synthetic datasets
Step19: First I define how many samples per class I want (line 1), then I create an empty Pandas DataFrame (lines 3-5) dimensioned like this
Step20: And these are the results, comparing the original, augmented dataset (i.e. the results of fluid replacement merged with the insitu log, all stored in the DataFrame ww defined earlier when calculating the statistics) with the newly created synthetic data
|
<ASSISTANT_TASK:>
Python Code:
def frm(vp1, vs1, rho1, rho_f1, k_f1, rho_f2, k_f2, k0, phi):
vp1 = vp1 / 1000.
vs1 = vs1 / 1000.
mu1 = rho1 * vs1**2.
k_s1 = rho1 * vp1**2 - (4./3.)*mu1
# The dry rock bulk modulus
kdry = (k_s1 * ((phi*k0)/k_f1+1-phi)-k0) / ((phi*k0)/k_f1+(k_s1/k0)-1-phi)
# Now we can apply Gassmann to get the new values
k_s2 = kdry + (1- (kdry/k0))**2 / ( (phi/k_f2) + ((1-phi)/k0) - (kdry/k0**2) )
rho2 = rho1-phi * rho_f1+phi * rho_f2
mu2 = mu1
vp2 = np.sqrt(((k_s2+(4./3)*mu2))/rho2)
vs2 = np.sqrt((mu2/rho2))
return vp2*1000, vs2*1000, rho2, k_s2
def vrh(volumes,k,mu):
f = np.array(volumes).T
k = np.resize(np.array(k),np.shape(f))
mu = np.resize(np.array(mu),np.shape(f))
k_u = np.sum(f*k, axis=1)
k_l = 1. / np.sum(f/k, axis=1)
mu_u = np.sum(f*mu, axis=1)
mu_l = 1. / np.sum(f/mu, axis=1)
k0 = (k_u+k_l) / 2.
mu0 = (mu_u+mu_l) / 2.
return k_u, k_l, mu_u, mu_l, k0, mu0
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
logs = pd.read_csv('qsiwell2_lfc.csv')
rho_qz=2.65; k_qz=37; mu_qz=44 # mineral properties, quartz (i.e., sands)
rho_sh=2.81; k_sh=15; mu_sh=5 # mineral properties, clay (i.e., shales)
rho_b=1.09; k_b=2.8 # fluid properties, brine
rho_o=0.78; k_o=0.94 # fluid properties, oil
rho_g=0.25; k_g=0.06 # fluid properties, gas
# mineral mixture bulk and shear moduli, k0 and mu0
shale = logs.VSH.values
sand = 1 - shale - logs.PHI.values
shaleN = shale / (shale+sand) # normalized shale and sand volumes
sandN = sand / (shale+sand)
k_u, k_l, mu_u, mu_l, k0, mu0 = vrh([shaleN, sandN], [k_sh, k_qz], [mu_sh, mu_qz])
# fluid mixture bulk modulus, using the same vrh function but capturing the Reuss average (second output)
water = logs.SW.values
hc = 1 - logs.SW.values
tmp, k_fl, tmp, tmp, tmp, tmp = vrh([water, hc], [k_b, k_o], [0, 0])
# fluid mixture density
rho_fl = water*rho_b + hc*rho_o
vpb, vsb, rhob, kb = frm(logs.VP, logs.VS, logs.RHO, rho_fl, k_fl, rho_b, k_b, k0, logs.PHI)
vpo, vso, rhoo, ko = frm(logs.VP, logs.VS, logs.RHO, rho_fl, k_fl, rho_o, k_o, k0, logs.PHI)
vpg, vsg, rhog, kg = frm(logs.VP, logs.VS, logs.RHO, rho_fl, k_fl, rho_g, k_g, k0, logs.PHI)
sand_cutoff = 0.20
brine_sand = ((logs.VSH <= sand_cutoff) & (logs.SW >= 0.9))
oil_sand = ((logs.VSH <= sand_cutoff) & (logs.SW < 0.9))
shale = (logs.VSH > sand_cutoff)
logs['VP_FRMB'] = logs.VP
logs['VS_FRMB'] = logs.VS
logs['RHO_FRMB'] = logs.RHO
logs['VP_FRMB'][brine_sand|oil_sand] = vpb[brine_sand|oil_sand]
logs['VS_FRMB'][brine_sand|oil_sand] = vsb[brine_sand|oil_sand]
logs['RHO_FRMB'][brine_sand|oil_sand] = rhob[brine_sand|oil_sand]
logs['IP_FRMB'] = logs.VP_FRMB*logs.RHO_FRMB
logs['IS_FRMB'] = logs.VS_FRMB*logs.RHO_FRMB
logs['VPVS_FRMB'] = logs.VP_FRMB/logs.VS_FRMB
logs['VP_FRMO'] = logs.VP
logs['VS_FRMO'] = logs.VS
logs['RHO_FRMO'] = logs.RHO
logs['VP_FRMO'][brine_sand|oil_sand] = vpo[brine_sand|oil_sand]
logs['VS_FRMO'][brine_sand|oil_sand] = vso[brine_sand|oil_sand]
logs['RHO_FRMO'][brine_sand|oil_sand] = rhoo[brine_sand|oil_sand]
logs['IP_FRMO'] = logs.VP_FRMO*logs.RHO_FRMO
logs['IS_FRMO'] = logs.VS_FRMO*logs.RHO_FRMO
logs['VPVS_FRMO'] = logs.VP_FRMO/logs.VS_FRMO
logs['VP_FRMG'] = logs.VP
logs['VS_FRMG'] = logs.VS
logs['RHO_FRMG'] = logs.RHO
logs['VP_FRMG'][brine_sand|oil_sand] = vpg[brine_sand|oil_sand]
logs['VS_FRMG'][brine_sand|oil_sand] = vsg[brine_sand|oil_sand]
logs['RHO_FRMG'][brine_sand|oil_sand] = rhog[brine_sand|oil_sand]
logs['IP_FRMG'] = logs.VP_FRMG*logs.RHO_FRMG
logs['IS_FRMG'] = logs.VS_FRMG*logs.RHO_FRMG
logs['VPVS_FRMG'] = logs.VP_FRMG/logs.VS_FRMG
temp_lfc_b = np.zeros(np.shape(logs.VSH))
temp_lfc_b[brine_sand.values | oil_sand.values] = 1 # LFC is 1 when either brine_sand (brine sand flag) or oil_sand (oil) is True
temp_lfc_b[shale.values] = 4 # LFC 4=shale
logs['LFC_B'] = temp_lfc_b
temp_lfc_o = np.zeros(np.shape(logs.VSH))
temp_lfc_o[brine_sand.values | oil_sand.values] = 2 # LFC is now 2 when there's sand (brine_sand or oil_sand is True)
temp_lfc_o[shale.values] = 4 # LFC 4=shale
logs['LFC_O'] = temp_lfc_o
temp_lfc_g = np.zeros(np.shape(logs.VSH))
temp_lfc_g[brine_sand.values | oil_sand.values] = 3 # LFC 3=gas sand
temp_lfc_g[shale.values] = 4 # LFC 4=shale
logs['LFC_G'] = temp_lfc_g
import matplotlib.colors as colors
# 0=undef 1=bri 2=oil 3=gas 4=shale
ccc = ['#B3B3B3','blue','green','red','#996633',]
cmap_facies = colors.ListedColormap(ccc[0:len(ccc)], 'indexed')
ztop = 2150; zbot = 2200
ll = logs.ix[(logs.DEPTH>=ztop) & (logs.DEPTH<=zbot)]
cluster=np.repeat(np.expand_dims(ll['LFC'].values,1),100,1)
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(8, 12))
ax[0].plot(ll.VSH, ll.DEPTH, '-g', label='Vsh')
ax[0].plot(ll.SW, ll.DEPTH, '-b', label='Sw')
ax[0].plot(ll.PHI, ll.DEPTH, '-k', label='phi')
ax[1].plot(ll.IP_FRMG, ll.DEPTH, '-r')
ax[1].plot(ll.IP_FRMB, ll.DEPTH, '-b')
ax[1].plot(ll.IP, ll.DEPTH, '-', color='0.5')
ax[2].plot(ll.VPVS_FRMG, ll.DEPTH, '-r')
ax[2].plot(ll.VPVS_FRMB, ll.DEPTH, '-b')
ax[2].plot(ll.VPVS, ll.DEPTH, '-', color='0.5')
im=ax[3].imshow(cluster, interpolation='none', aspect='auto',cmap=cmap_facies,vmin=0,vmax=4)
cbar=plt.colorbar(im, ax=ax[3])
# cbar.set_label('0=undef,1=brine,2=oil,3=gas,4=shale')
# cbar.set_ticks(range(0,4+1));
cbar.set_label((12*' ').join(['undef', 'brine', 'oil', 'gas', 'shale']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in ax[:-1]:
i.set_ylim(ztop,zbot)
i.invert_yaxis()
i.grid()
i.locator_params(axis='x', nbins=4)
ax[0].legend(fontsize='small', loc='lower right')
ax[0].set_xlabel("Vcl/phi/Sw"), ax[0].set_xlim(-.1,1.1)
ax[1].set_xlabel("Ip [m/s*g/cc]"), ax[1].set_xlim(3000,9000)
ax[2].set_xlabel("Vp/Vs"), ax[2].set_xlim(1.5,3)
ax[3].set_xlabel('LFC')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([]); ax[3].set_xticklabels([]);
f, ax = plt.subplots(nrows=1, ncols=4, sharey=True, sharex=True, figsize=(16, 4))
ax[0].scatter(logs.IP,logs.VPVS,20,logs.LFC,marker='o',edgecolors='none',alpha=0.5,cmap=cmap_facies,vmin=0,vmax=4)
ax[1].scatter(logs.IP_FRMB,logs.VPVS_FRMB,20,logs.LFC_B,marker='o',edgecolors='none',alpha=0.5,cmap=cmap_facies,vmin=0,vmax=4)
ax[2].scatter(logs.IP_FRMO,logs.VPVS_FRMO,20,logs.LFC_O,marker='o',edgecolors='none',alpha=0.5,cmap=cmap_facies,vmin=0,vmax=4)
ax[3].scatter(logs.IP_FRMG,logs.VPVS_FRMG,20,logs.LFC_G,marker='o',edgecolors='none',alpha=0.5,cmap=cmap_facies,vmin=0,vmax=4)
ax[0].set_xlim(3000,9000); ax[0].set_ylim(1.5,3);
ax[0].set_title('original data');
ax[1].set_title('FRM to brine');
ax[2].set_title('FRM to oil');
ax[3].set_title('FRM to gas');
for i in ax: i.grid()
lognames0 = ['LFC','IP','VPVS']
lognames1 = ['LFC_B','IP_FRMB', 'VPVS_FRMB']
lognames2 = ['LFC_O','IP_FRMO', 'VPVS_FRMO']
lognames3 = ['LFC_G','IP_FRMG', 'VPVS_FRMG']
ww0 = logs[pd.notnull(logs.LFC)].ix[:,lognames0];
ww1 = logs[pd.notnull(logs.LFC)].ix[:,lognames1]; ww1.columns=[lognames0]
ww2 = logs[pd.notnull(logs.LFC)].ix[:,lognames2]; ww2.columns=[lognames0]
ww3 = logs[pd.notnull(logs.LFC)].ix[:,lognames3]; ww3.columns=[lognames0]
ww = pd.concat([ww0, ww1, ww2, ww3])
import itertools
list(itertools.product(['a', 'b'], ['a', 'b']))
nlfc = int(ww.LFC.max())
nlogs = len(ww.columns) - 1 # my merged data always contain a facies log...
# ...that needs to be excluded from the statistical analysis
means, covs = [], []
for col in ww.columns[1:]:
means.append(col + '_mean')
import itertools
covariances = list(itertools.product(ww.columns[1:], ww.columns[1:]))
print covariances
for element in covariances:
if element[0] == element[1]:
covs.append(element[0] + '_var')
else:
covs.append(element[0] + '-' + element[1] + '_cov')
covs
stat = pd.DataFrame(data=None,
columns=['LFC']+means+covs+['SAMPLES'],
index=np.arange(nlfc))
stat['LFC'] = range(1, nlfc+1)
stat
np.math.factorial(3)
for i in range(1, 1+nlfc):
temp = ww[ww.LFC==i].drop('LFC',1)
stat.ix[(stat.LFC==i),'SAMPLES'] = temp.count()[0]
stat.ix[stat.LFC==i,means[0]:means[-1]] = np.mean(temp.values,0)
stat.ix[stat.LFC==i,covs[0]:covs[-1]] = np.cov(temp,rowvar=0).flatten()
print (temp.describe().ix['mean':'std'])
print ("LFC=%d, number of samples=%d" % (i, temp.count()[0]))
stat
stat.ix[stat.LFC==2, 'VPVS_mean']
i = 2
pd.scatter_matrix(ww[ww.LFC==i].drop('LFC',1),
color='black',
diagonal='kde',
alpha=0.1,
density_kwds={'color':'#000000','lw':2})
plt.suptitle('LFC=%d' % i)
NN = 300
mc = pd.DataFrame(data=None,
columns=lognames0,
index=np.arange(nlfc*NN),
dtype='float')
for i in range(1, nlfc+1):
mc.loc[NN*i-NN:NN*i-1, 'LFC'] = i
from numpy.random import multivariate_normal
for i in range(1, nlfc+1):
mean = stat.loc[i-1,
means[0]:means[-1]].values
sigma = np.reshape(stat.loc[i-1,
covs[0]:covs[-1]].values,
(nlogs,nlogs))
m = multivariate_normal(mean,sigma,NN)
mc.ix[mc.LFC==i,1:] = m
mc.describe()
f, ax = plt.subplots(nrows=1, ncols=2, sharey=True, sharex=True, figsize=(8, 4))
scatt1 = ax[0].scatter(ww.IP, ww.VPVS,
s=20,
c=ww.LFC,
marker='o',
edgecolors='none',
alpha=0.2,
cmap=cmap_facies,
vmin=0,vmax=4)
scatt2 = ax[1].scatter(mc.IP, mc.VPVS,
s=20,
c=mc.LFC,
marker='o',
edgecolors='none',
alpha=0.5,
cmap=cmap_facies,
vmin=0,vmax=4)
ax[0].set_xlim(3000,9000); ax[0].set_ylim(1.5,3.0);
ax[0].set_title('augmented well data');
ax[1].set_title('synthetic data');
for i in ax: i.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's show the symbols data, to see how good the recommender has to be.
Step2: Let's run the trained agent, with the test set
Step3: And now a "realistic" test, in which the learner continues to learn from past samples in the test set (it even makes some random moves, though very few).
|
<ASSISTANT_TASK:>
Python Code:
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
from multiprocessing import Pool
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
import recommender.simulator as sim
from utils.analysis import value_eval
from recommender.agent import Agent
from functools import partial
NUM_THREADS = 1
LOOKBACK = 252*2 + 28
STARTING_DAYS_AHEAD = 20
POSSIBLE_FRACTIONS = [0.0, 0.5, 1.0]
# Get the data
SYMBOL = 'SPY'
total_data_train_df = pd.read_pickle('../../data/data_train_val_df.pkl').stack(level='feature')
data_train_df = total_data_train_df[SYMBOL].unstack()
total_data_test_df = pd.read_pickle('../../data/data_test_df.pkl').stack(level='feature')
data_test_df = total_data_test_df[SYMBOL].unstack()
if LOOKBACK == -1:
total_data_in_df = total_data_train_df
data_in_df = data_train_df
else:
data_in_df = data_train_df.iloc[-LOOKBACK:]
total_data_in_df = total_data_train_df.loc[data_in_df.index[0]:]
# Create many agents
index = np.arange(NUM_THREADS).tolist()
env, num_states, num_actions = sim.initialize_env(total_data_in_df,
SYMBOL,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS)
agents = [Agent(num_states=num_states,
num_actions=num_actions,
random_actions_rate=0.98,
random_actions_decrease=0.999,
dyna_iterations=0,
name='Agent_{}'.format(i)) for i in index]
def show_results(results_list, data_in_df, graph=False):
for values in results_list:
total_value = values.sum(axis=1)
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(total_value))))
print('-'*100)
initial_date = total_value.index[0]
compare_results = data_in_df.loc[initial_date:, 'Close'].copy()
compare_results.name = SYMBOL
compare_results_df = pd.DataFrame(compare_results)
compare_results_df['portfolio'] = total_value
std_comp_df = compare_results_df / compare_results_df.iloc[0]
if graph:
plt.figure()
std_comp_df.plot()
print('Sharpe ratio: {}\nCum. Ret.: {}\nAVG_DRET: {}\nSTD_DRET: {}\nFinal value: {}'.format(*value_eval(pd.DataFrame(data_in_df['Close'].iloc[STARTING_DAYS_AHEAD:]))))
# Simulate (with new envs, each time)
n_epochs = 4
for i in range(n_epochs):
tic = time()
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL,
agents[0],
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_in_df)
env.reset(STARTING_DAYS_AHEAD)
results_list = sim.simulate_period(total_data_in_df,
SYMBOL, agents[0],
learn=False,
starting_days_ahead=STARTING_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
other_env=env)
show_results([results_list], data_in_df, graph=True)
TEST_DAYS_AHEAD = 20
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=False,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
env.set_test_data(total_data_test_df, TEST_DAYS_AHEAD)
tic = time()
results_list = sim.simulate_period(total_data_test_df,
SYMBOL,
agents[0],
learn=True,
starting_days_ahead=TEST_DAYS_AHEAD,
possible_fractions=POSSIBLE_FRACTIONS,
verbose=False,
other_env=env)
toc = time()
print('Epoch: {}'.format(i))
print('Elapsed time: {} seconds.'.format((toc-tic)))
print('Random Actions Rate: {}'.format(agents[0].random_actions_rate))
show_results([results_list], data_test_df, graph=True)
import pickle
with open('../../data/simple_q_learner_fast_learner_3_actions.pkl', 'wb') as best_agent:
pickle.dump(agents[0], best_agent)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading things from storage
Step2: A lot of information can be recovered from the old storage, and so we don't have the recreate it. However, we did not save our network, so we'll have to create a new one. Since the network creates the ensembles, that means we will have to translate the trajectories from the old ensembles to new ensembles.
Step3: Loading from storage is very easy. Each store is a list. We take the 0th snapshot as a template (it doesn't actually matter which one) for the next storage we'll create. There's only one engine stored, so we take the only one.
Step4: Named objects can be found in storage by using their name as a dictionary key. This allows us to load our old collective variables and states.
Step5: Once again, we have everything we need to build the MSTIS network. Recall that this will create all the ensembles we need for the simulation. However, even though the ensembles are semantically the same, these are not the same objects. We'll need to deal with that later.
Step6: Now we need to set up real trajectories that we can use for each of these. We can start by loading the stored sampleset.
Step7: Remapping old ensembles to new ensembles
Step8: Setting up special ensembles
Step9: Minus interface ensemble
Step10: Now that all the innermost ensembles are safe to use for extending into a minus interface, we extend them into a minus interface
Step11: Equilibration
Step12: Running RETIS
Step13: Now make and show the default move scheme, so we can later compare it to the scheme with the constrained shooting.
Step14: Now make the interface constrained shooting move scheme.
Step15: Visualise the scheme that uses interface constrained shooting. Note that the only difference with the default scheme is in the shooting part, where we only use forward shooting.
Step16: By looking at the to_dict method for a mover, we can see that it does, in fact, use the InterfaceConstrainedSelector.
Step17: The next block sets up a live visualization. This is optional, and only recommended if you're using OPS interactively (which would only be for very small systems). Some of the same tools can be used to play back the behavior after the fact if you want to see the behavior for more complicated systems. You can create a background (here we use the PES contours), and the visualization will plot the trajectories.
Step18: Now everything is ready
Step19: Now we perform some basic analysis. First, we look at the acceptance. Note that the shooting now has 100% acceptance; this is one of the advantages of interface constrained shooting.
Step20: Next we build a path tree. Note that there are only forward shots, and successive forward shots (without a path reversal or replica exchange between them) will always have the same shooting point.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
%matplotlib inline
import openpathsampling as paths
import numpy as np
from IPython.display import SVG
import openpathsampling.visualize as vis
old_store = paths.AnalysisStorage("mstis_bootstrap.nc")
print("PathMovers: "+ str(len(old_store.pathmovers)))
print("Samples: " + str(len(old_store.samples)))
print("Ensembles: " + str(len(old_store.ensembles)))
print("SampleSets: " + str(len(old_store.samplesets)))
print("Snapshots: " + str(len(old_store.snapshots)))
print("Networks: " + str(len(old_store.networks)))
template = old_store.snapshots[0]
engine = old_store.engines[0]
opA = old_store.cvs['opA']
opB = old_store.cvs['opB']
opC = old_store.cvs['opC']
stateA = old_store.volumes['A']
stateB = old_store.volumes['B']
stateC = old_store.volumes['C']
# we could also load the interfaces, but it takes less code to build new ones:
interfacesA = paths.VolumeInterfaceSet(opA, 0.0,[0.2, 0.3, 0.4])
interfacesB = paths.VolumeInterfaceSet(opB, 0.0,[0.2, 0.3, 0.4])
interfacesC = paths.VolumeInterfaceSet(opC, 0.0,[0.2, 0.3, 0.4])
ms_outers = paths.MSOuterTISInterface.from_lambdas(
{ifaces: 0.5
for ifaces in [interfacesA, interfacesB, interfacesC]}
)
mstis = paths.MSTISNetwork(
[(stateA, interfacesA),
(stateB, interfacesB),
(stateC, interfacesC)],
ms_outers=ms_outers
)
# load the sampleset we have saved before
old_sampleset = old_store.samplesets[0]
sset = paths.SampleSet.translate_ensembles(old_sampleset, mstis.sampling_ensembles)
sset.sanity_check()
assert(len(sset)==9)
for outer_ens in mstis.special_ensembles['ms_outer']:
# doesn't matter which we take, so we take the first
traj = next(s.trajectory for s in old_sampleset if outer_ens(s.trajectory)==True)
samp = paths.Sample(
replica=None,
ensemble=outer_ens,
trajectory=traj
)
# now we apply it and correct for the replica ID
sset.append_as_new_replica(samp)
sset.sanity_check()
assert(len(sset)==10)
for transition in mstis.sampling_transitions:
innermost_ensemble = transition.ensembles[0]
shooter = None
if not transition.stateA(sset[innermost_ensemble].trajectory[-1]):
shooter = paths.OneWayShootingMover(ensemble=innermost_ensemble,
selector=paths.UniformSelector(),
engine=engine)
pseudoscheme = paths.LockedMoveScheme(root_mover=shooter)
pseudosim = paths.PathSampling(storage=None,
move_scheme=pseudoscheme,
sample_set=sset,
)
while not transition.stateA(sset[innermost_ensemble].trajectory[-1]):
pseudosim.run(1)
sset = pseudosim.sample_set
minus_samples = []
for transition in mstis.sampling_transitions:
minus_samples.append(transition.minus_ensemble.extend_sample_from_trajectories(
sset[transition.ensembles[0]].trajectory,
replica=-len(minus_samples)-1,
engine=engine
))
sset = sset.apply_samples(minus_samples)
sset.sanity_check()
assert(len(sset)==13)
equil_scheme = paths.OneWayShootingMoveScheme(mstis, engine=engine)
equilibration = paths.PathSampling(
storage=None,
sample_set=sset,
move_scheme=equil_scheme
)
#! skip
# tests need the unequilibrated samples to ensure passing
equilibration.run(5)
sset = equilibration.sample_set
# logging creates ops_output.log file with details of what the calculation is doing
#import logging.config
#logging.config.fileConfig("../resources/logging.conf", disable_existing_loggers=False)
storage = paths.storage.Storage("constrained_mstis.nc", "w")
scheme = paths.DefaultScheme(mstis, engine=engine)
move_vis = vis.MoveTreeBuilder.from_scheme(scheme, hidden_ensembles=False)
#move_vis.options.analysis['only_canonical']=False
SVG(move_vis.svg())
from openpathsampling.high_level import move_strategy
scheme = paths.DefaultScheme(mstis, engine=engine)
listofselectors = [paths.InterfaceConstrainedSelector(ens.interface)
for ens in mstis.sampling_ensembles]
constrained_strategy = move_strategy.ForwardShootingStrategy(selector=listofselectors,
ensembles=mstis.sampling_ensembles,
engine=engine)
scheme.append(constrained_strategy)
scheme.build_move_decision_tree()
move_vis=vis.MoveTreeBuilder.from_scheme(scheme)
SVG(move_vis.svg())
scheme.movers['shooting'][0].to_dict()
mstis_calc = paths.PathSampling(
storage=storage,
sample_set=sset,
move_scheme=scheme
)
mstis_calc.save_frequency = 50
#! skip
# skip this during testing, but leave it for demo purposes
# we use the %run magic because this isn't in a package
%run ../resources/toy_plot_helpers.py
xval = paths.FunctionCV("xval", lambda snap : snap.xyz[0][0])
yval = paths.FunctionCV("yval", lambda snap : snap.xyz[0][1])
mstis_calc.live_visualizer = paths.StepVisualizer2D(mstis, xval, yval, [-1.0, 1.0], [-1.0, 1.0])
background = ToyPlot()
background.contour_range = np.arange(-1.5, 1.0, 0.1)
background.add_pes(engine.pes)
mstis_calc.live_visualizer.background = background.plot()
mstis_calc.status_update_frequency = 1 # increasing this number speeds things up, but isn't as pretty
mstis_calc.run_until(100)
scheme.move_summary(storage.steps)
tree = vis.PathTree(
storage.steps[0:100],
vis.ReplicaEvolution(replica=0)
)
tree.options.movers['ReplicaExchangeMover']['hide'] = False
SVG(tree.svg())
len(storage.steps)
storage.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using the already cached JSON obtained from the Open Context API, we can make a second dataframe that is "wider" (has many more columns"). This wide dataframe will express multiple values for "Motif", "Decorative Technique", and "Fabric Category" in different columns. We set the dictioary oc_api.multi_value_handle_keyed_attribs to do this.
Step2: The df_wide dataframe handles multiple values for some attributes by making many boolean columns, with each column noting the presense of a given attribute value on a row for an artifact. For example, True values on the column "Motif
|
<ASSISTANT_TASK:>
Python Code:
# Define a list of columns to drop.
drop_cols = [
'Fragment Noted',
'Depth Notes',
'Supplement Note',
'Fabric Description',
'Description',
'Size',
]
df.drop(columns=drop_cols, inplace=True)
# The API returns 'False' if a citation URI is not defined, it's better
# practice to make this a null.
df.loc[(df['citation uri'] == False), 'citation uri'] = np.nan
import os
# Now save the results of all of this as a CSV file.
repo_path = os.path.dirname(os.path.abspath(os.getcwd()))
csv_path = os.path.join(
repo_path,
'files',
'oc-api-murlo-objects-multivalue-as-json.csv'
)
df.to_csv(csv_path, index=False)
print('Saved this example as a CSV table at: {}'.format(csv_path))
oc_api.multi_value_handle_non_number = 'concat'
oc_api.multi_value_handle_keyed_attribs = {
'Motif': 'column_val',
'Decorative Technique': 'column_val',
'Fabric Category': 'column_val',
}
df_wide = oc_api.url_to_dataframe(url, attribs_for_records)
df_wide.drop(columns=drop_cols, inplace=True)
# The API returns 'False' if a citation URI is not defined, it's better
# practice to make this a null.
df_wide.loc[(df_wide['citation uri'] == False), 'citation uri'] = np.nan
csv_wide_path = os.path.join(
repo_path,
'files',
'oc-api-murlo-objects-multivalue-as-cols.csv'
)
df_wide.to_csv(csv_wide_path, index=False)
print('Saved this example wide as a CSV table at: {}'.format(csv_wide_path))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Material Properties
Step2: Slab Geometry Width and Discretization
Step3: Generation of Leakage and Absorption Matrices
Step4: Boundary Conditions $(\phi(0) = \phi(L) = 0)$
Step5: Power Iteration Scheme for k-eigenvalue and Flux
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
D = 0.9
nusigf = 0.70
siga = 0.066
#Lx = np.pi*((nusigf-siga)/D)**(-0.5)
Lx = 15.0
N = 50;
h = Lx/(N-1)
x = np.zeros(N)
for i in range(N-1):
x[i+1] = x[i] + h
L = np.zeros((N,N))
A = np.zeros((N,N))
M = np.zeros((N,N))
for i in range(N):
L[i][i] = L[i][i] + (-2*(-D/(h**2)))
for i in range(1,N):
L[i][i-1] = L[i][i-1] + (1*(-D/h**2))
for i in range(N-1):
L[i][i+1] = L[i][i+1] + (1*(-D/h**2))
for i in range(N):
A[i][i] = A[i][i] + siga
M = L + A
M[0][0] = 1
M[0][1] = 0
M[N-1][N-1] = 1
M[N-1][N-2] = 0
phi0 = np.ones((N,1))
phi0[0] = 0
phi0[N-1] = 0
tol = 1e-15
k = 1.00
for i in range(100):
kold = k
psi = np.linalg.solve(M,nusigf*phi0)
k = sum(nusigf*psi)/sum(nusigf*phi0)
phi0 = (1/k)*psi
phi0[0] = 0
phi0[N-1] = 0
residual = np.abs(k-kold)
if residual <= tol:
break
plt.plot(x,phi0)
plt.xlabel('Slab (cm)')
plt.ylabel('Neutron Flux')
plt.grid()
print "k-effective = ", k
print " approx alpha = ", (k-1)/k * sum(nusigf*phi0)/sum(phi0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we should have a data/names directory which contains a number of text files, one for each year of data
Step2: Let's take a quick look at one of these files
Step3: Each file is just a comma-separated list of names, genders, and counts of babies with that name in each year.
Step4: Oops! Something went wrong. Our algorithm tried to be smart, and use the first line as index labels.
Step5: That looks better. Now we can start playing with the data a bit.
Step6: Now we can take the sum of the births for each of these
Step7: But there's an easier way to do this, using one of Pandas' very powerful features
Step8: This grouped object is now an abstract representation of the data, where the data is split on the given column.
Step9: We can do other aggregations as well
Step10: Or, if we wish, we can get a description of the grouping
Step11: Concatenating multiple data sources
Step12: Now let's load all the data into a list, and call pd.concat on that list
Step13: It looks like we've done it!
Step14: We can use the plot() method to see a quick plot of these (note that because we used the %matplotlib inline magic at the start of the notebook, the resulting plot will be shown inline within the notebook).
Step15: The so-called "baby boom" generation after the second world war is abundantly clear!
Step16: Apparently there's been a huge increase of the diversity of names with time!
Step17: Notice that the apply() function iterates over each group, and calls a function which modifies the group.
Step18: And then we could proceed as above, using groupby to group on the year.
Step19: Note that this has grouped the index by the value of year, and grouped the columns by the value of gender.
Step20: Name Evolution Over Time
Step21: We can see that prior to about 1905, all babies named Allison were male. Over the 20th century, this reversed, until the end of the century nearly all Allisons were female!
|
<ASSISTANT_TASK:>
Python Code:
# !curl -O http://www.ssa.gov/oact/babynames/names.zip
# !mkdir -p data/names
# !mv names.zip data/names/
# !cd data/names/ && unzip names.zip
!ls data/names
!head data/names/yob1880.txt
names1880 = pd.read_csv('data/names/yob1880.txt')
names1880.head()
names1880 = pd.read_csv('data/names/yob1880.txt',
names=['name', 'gender', 'births'])
names1880.head()
males = names1880[names1880.gender == 'M']
females = names1880[names1880.gender == 'F']
males.births.sum(), females.births.sum()
grouped = names1880.groupby('gender')
grouped
grouped.sum()
grouped.size()
grouped.mean()
grouped.describe()
def load_year(year):
data = pd.read_csv('data/names/yob{0}.txt'.format(year),
names=['name', 'gender', 'births'])
data['year'] = year
return data
names = pd.concat([load_year(year) for year in range(1880, 2014)])
names.head()
births = names.groupby('year').births.sum()
births.head()
births.plot();
names.groupby('year').births.count().plot();
def add_frequency(group):
group['birth_freq'] = group.births / group.births.sum()
return group
names = names.groupby(['year', 'gender']).apply(add_frequency)
names.head()
men = names[names.gender == 'M']
women = names[names.gender == 'W']
births = names.pivot_table('births',
index='year', columns='gender',
aggfunc=sum)
births.head()
births.plot(title='Total Births');
names_to_check = ['Allison', 'Alison']
# filter on just the names we're interested in
births = names[names.name.isin(names_to_check)]
# pivot table to get year vs. gender
births = births.pivot_table('births', index='year', columns='gender')
# fill all NaNs with zeros
births = births.fillna(0)
# normalize along columns
births = births.div(births.sum(1), axis=0)
births.plot(title='Fraction of babies named Allison');
pd.rolling_mean(births, 5).plot(title="Allisons: 5-year moving average");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Data
Step2: LOVE can be used with any type of GP model, including exact GPs, multitask models and scalable approximations. Here we demonstrate LOVE in conjunction with KISS-GP, which has the amazing property of producing constant time variances.
Step3: Training the model
Step4: Computing predictive variances (KISS-GP or Exact GPs)
Step5: Using LOVE
Step6: The above cell additionally computed the caches required to get fast predictions. From this point onwards, unless we put the model back in training mode, predictions should be extremely fast. The cell below re-runs the above code, but takes full advantage of both the mean cache and the LOVE cache for variances.
Step7: Compute Error between Exact and Fast Variances
Step8: Computing posterior samples (KISS-GP only)
Step9: Using LOVE
Step10: Compute the empirical covariance matrices
|
<ASSISTANT_TASK:>
Python Code:
import math
import torch
import gpytorch
import tqdm
from matplotlib import pyplot as plt
# Make plots inline
%matplotlib inline
import urllib.request
import os
from scipy.io import loadmat
from math import floor
# this is for running the notebook in our testing framework
smoke_test = ('CI' in os.environ)
if not smoke_test and not os.path.isfile('../elevators.mat'):
print('Downloading \'elevators\' UCI dataset...')
urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1jhWL3YUHvXIaftia4qeAyDwVxo6j1alk', '../elevators.mat')
if smoke_test: # this is for running the notebook in our testing framework
X, y = torch.randn(100, 3), torch.randn(100)
else:
data = torch.Tensor(loadmat('../elevators.mat')['data'])
X = data[:, :-1]
X = X - X.min(0)[0]
X = 2 * (X / X.max(0)[0]) - 1
y = data[:, -1]
train_n = int(floor(0.8 * len(X)))
train_x = X[:train_n, :].contiguous()
train_y = y[:train_n].contiguous()
test_x = X[train_n:, :].contiguous()
test_y = y[train_n:].contiguous()
if torch.cuda.is_available():
train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()
class LargeFeatureExtractor(torch.nn.Sequential):
def __init__(self, input_dim):
super(LargeFeatureExtractor, self).__init__()
self.add_module('linear1', torch.nn.Linear(input_dim, 1000))
self.add_module('relu1', torch.nn.ReLU())
self.add_module('linear2', torch.nn.Linear(1000, 500))
self.add_module('relu2', torch.nn.ReLU())
self.add_module('linear3', torch.nn.Linear(500, 50))
self.add_module('relu3', torch.nn.ReLU())
self.add_module('linear4', torch.nn.Linear(50, 2))
class GPRegressionModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.GridInterpolationKernel(
gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()),
grid_size=100, num_dims=2,
)
# Also add the deep net
self.feature_extractor = LargeFeatureExtractor(input_dim=train_x.size(-1))
def forward(self, x):
# We're first putting our data through a deep net (feature extractor)
# We're also scaling the features so that they're nice values
projected_x = self.feature_extractor(x)
projected_x = projected_x - projected_x.min(0)[0]
projected_x = 2 * (projected_x / projected_x.max(0)[0]) - 1
# The rest of this looks like what we've seen
mean_x = self.mean_module(projected_x)
covar_x = self.covar_module(projected_x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
model = GPRegressionModel(train_x, train_y, likelihood)
if torch.cuda.is_available():
model = model.cuda()
likelihood = likelihood.cuda()
training_iterations = 1 if smoke_test else 20
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
def train():
iterator = tqdm.notebook.tqdm(range(training_iterations))
for i in iterator:
optimizer.zero_grad()
output = model(train_x)
loss = -mll(output, train_y)
loss.backward()
iterator.set_postfix(loss=loss.item())
optimizer.step()
%time train()
import time
# Set into eval mode
model.eval()
likelihood.eval()
with torch.no_grad():
start_time = time.time()
preds = likelihood(model(test_x))
exact_covar = preds.covariance_matrix
exact_covar_time = time.time() - start_time
print(f"Time to compute exact mean + covariances: {exact_covar_time:.2f}s")
# Clear the cache from the previous computations
model.train()
likelihood.train()
# Set into eval mode
model.eval()
likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var(), gpytorch.settings.max_root_decomposition_size(100):
start_time = time.time()
preds = model(test_x)
fast_time_no_cache = time.time() - start_time
with torch.no_grad(), gpytorch.settings.fast_pred_var():
start_time = time.time()
preds = likelihood(model(test_x))
fast_covar = preds.covariance_matrix
fast_time_with_cache = time.time() - start_time
print('Time to compute mean + covariances (no cache) {:.2f}s'.format(fast_time_no_cache))
print('Time to compute mean + variances (cache): {:.2f}s'.format(fast_time_with_cache))
mae = ((exact_covar - fast_covar).abs() / exact_covar.abs()).mean()
print(f"MAE between exact covar matrix and fast covar matrix: {mae:.6f}")
import time
num_samples = 20 if smoke_test else 20000
# Set into eval mode
model.eval()
likelihood.eval()
with torch.no_grad():
start_time = time.time()
exact_samples = model(test_x).rsample(torch.Size([num_samples]))
exact_sample_time = time.time() - start_time
print(f"Time to compute exact samples: {exact_sample_time:.2f}s")
# Clear the cache from the previous computations
model.train()
likelihood.train()
# Set into eval mode
model.eval()
likelihood.eval()
with torch.no_grad(), gpytorch.settings.fast_pred_var(), gpytorch.settings.max_root_decomposition_size(200):
# NEW FLAG FOR SAMPLING
with gpytorch.settings.fast_pred_samples():
start_time = time.time()
_ = model(test_x).rsample(torch.Size([num_samples]))
fast_sample_time_no_cache = time.time() - start_time
# Repeat the timing now that the cache is computed
with torch.no_grad(), gpytorch.settings.fast_pred_var():
with gpytorch.settings.fast_pred_samples():
start_time = time.time()
love_samples = model(test_x).rsample(torch.Size([num_samples]))
fast_sample_time_cache = time.time() - start_time
print('Time to compute LOVE samples (no cache) {:.2f}s'.format(fast_sample_time_no_cache))
print('Time to compute LOVE samples (cache) {:.2f}s'.format(fast_sample_time_cache))
# Compute exact posterior covar
with torch.no_grad():
start_time = time.time()
posterior = model(test_x)
mean, covar = posterior.mean, posterior.covariance_matrix
exact_empirical_covar = ((exact_samples - mean).t() @ (exact_samples - mean)) / num_samples
love_empirical_covar = ((love_samples - mean).t() @ (love_samples - mean)) / num_samples
exact_empirical_error = ((exact_empirical_covar - covar).abs()).mean()
love_empirical_error = ((love_empirical_covar - covar).abs()).mean()
print(f"Empirical covariance MAE (Exact samples): {exact_empirical_error}")
print(f"Empirical covariance MAE (LOVE samples): {love_empirical_error}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/Mean_Variance_Image.png" style="height
Step6: Checkpoint
Step7: Problem 2
Step8: <img src="image/Learn_Rate_Tune_Image.png" style="height
Step9: Test
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
return 0.1+(image_data-np.min(image_data))*(0.9-0.1)/(np.max(image_data)-np.min(image_data))
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print(np.shape(test_labels))
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32, shape = [None, 784])
labels = tf.placeholder(tf.float32, shape = [None, 10])
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([10]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 1
learning_rate = 0.05
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Open Dataset and Load Package
Step2: Familiarize Yourself with the Data
Step3: Selecting Relevant Variables
Step4: Basic Descriptives
Step5: Grouping Data
Step6: We can group data by more than one factor. Let's say we're interested in how levels of adhd interact with groupStatus (multitasking
Step7: Then we'll check how evenly split these groups are by using groupby() the size() functions
Step8: Then we'll calculate some summary info about these groups
Step9: A note on piping / stringing commands together
Step10: How many trials were there per subject?
Step11: Combine summary statistics with the full dataframe
|
<ASSISTANT_TASK:>
Python Code:
# load packages we will be using for this lesson
import pandas as pd
# use pd.read_csv to open data into python
df = pd.read_csv("uncapher_2016_repeated_measures_dataset.csv")
df.head()
df.shape
df.columns
df = df[["subjNum", "groupStatus", "adhd", "hitRate", "faRate", "dprime"]]
df.head()
df.describe()
df.groupby(["groupStatus"]).mean()
df["adhdF"] = pd.cut(df["adhd"],bins=2,labels=["Low","High"])
df.groupby(["groupStatus","adhdF"]).size()
df.groupby(["groupStatus","adhdF"]).mean()
subList = df["subjNum"].unique()
nSubs = len(subList)
nSubs
nTrialsPerSubj = df.groupby(["subjNum"]).size().reset_index(name="nTrials")
nTrialsPerSubj.head()
avgHR = df.groupby(["subjNum"])["hitRate"].mean().reset_index(name="avgHR")
avgHR.head()
df = df.merge(avgHR,on="subjNum")
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set environment parameters
Step2: 3. Start the environment
Step3: 4. Examine the observation and state spaces
Step4: 5. Take random actions in the environment
Step5: 6. Close the environment when finished
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
from unityagents import UnityEnvironment
%matplotlib inline
env_name = "3DBall" # Name of the Unity environment binary to launch
train_mode = True # Whether to run the environment in training or inference mode
env = UnityEnvironment(file_name=env_name)
# Examine environment parameters
print(str(env))
# Set the default brain to work with
default_brain = env.brain_names[0]
brain = env.brains[default_brain]
# Reset the environment
env_info = env.reset(train_mode=train_mode)[default_brain]
# Examine the state space for the default brain
print("Agent state looks like: \n{}".format(env_info.states[0]))
# Examine the observation space for the default brain
for observation in env_info.observations:
print("Agent observations look like:")
if observation.shape[3] == 3:
plt.imshow(observation[0,:,:,:])
else:
plt.imshow(observation[0,:,:,0])
for episode in range(10):
env_info = env.reset(train_mode=train_mode)[default_brain]
done = False
episode_rewards = 0
while not done:
if brain.action_space_type == 'continuous':
env_info = env.step(np.random.randn(len(env_info.agents),
brain.action_space_size))[default_brain]
else:
env_info = env.step(np.random.randint(0, brain.action_space_size,
size=(len(env_info.agents))))[default_brain]
episode_rewards += env_info.rewards[0]
done = env_info.local_done[0]
print("Total reward this episode: {}".format(episode_rewards))
env.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read some product review data
Step2: Let's explore this data together
Step3: Build the word count vector for each review
Step4: Examining the reviews for most-sold product
Step5: Build a sentiment classifier
Step6: Define what's a positive and a negative sentiment
Step7: Let's train the sentiment classifier
Step8: Evaluate the sentiment model
Step9: Applying the learned model to understand sentiment for Giraffe
Step10: Sort the reviews based on the predicted sentiment and explore
Step11: Most positive reviews for the giraffe
Step12: Show most negative reviews for giraffe
|
<ASSISTANT_TASK:>
Python Code:
import graphlab;
graphlab.product_key.set_product_key("7348-CE53-3B3E-DBED-152B-828E-A99E-F303");
products = graphlab.SFrame('amazon_baby.gl/')
products.head()
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
products.head()
graphlab.canvas.set_target('ipynb')
products['name'].show()
giraffe_reviews = products[products['name'] == 'Vulli Sophie the Giraffe Teether']
len(giraffe_reviews)
giraffe_reviews['rating'].show(view='Categorical')
products['rating'].show(view='Categorical')
#ignore all 3* reviews
products = products[products['rating'] != 3]
#positive sentiment = 4* or 5* reviews
products['sentiment'] = products['rating'] >=4
products.head()
train_data,test_data = products.random_split(.8, seed=0)
sentiment_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
giraffe_reviews['predicted_sentiment'] = sentiment_model.predict(giraffe_reviews, output_type='probability')
giraffe_reviews.head()
giraffe_reviews = giraffe_reviews.sort('predicted_sentiment', ascending=False)
giraffe_reviews.head()
giraffe_reviews[0]['review']
giraffe_reviews[1]['review']
giraffe_reviews[-1]['review']
giraffe_reviews[-2]['review']
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
def awesome_count(cell):
if 'wow' in cell:
return cell['wow']
else:
return 0
products['wow'] = products['word_count'].apply(awesome_count)
products.head()
train_data,test_data = products.random_split(.8, seed=0)
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
selected_words_model = graphlab.logistic_classifier.create(train_data,target='sentiment',features=selected_words,validation_set=test_data, )
selected_words_model['coefficients'].sort('value', ascending = True)
selected_words_model.evaluate(test_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MCD functions as found in
Step2: Inputs
Step3: Scalar MCD calculations following Nano Lett. 2013, 13, 4785โ4789
Step4: Full Transfer Matrix Calculations
Step5: Plot
|
<ASSISTANT_TASK:>
Python Code:
# libraries
import numpy as np # numpy
import scipy as sp # scipy
import scipy.constants as sp_c # scientific constants
import sys # sys to add py_matrix to the path
# matplotlib inline plots
import matplotlib.pylab as plt
%matplotlib inline
# adding py_matrix parent folder to python path
sys.path.append('../../')
import py_matrix as pm # importing py_matrix
# useful parameters
f_size=20;
h_bar=sp_c.hbar/sp_c.eV # h_bar in eV
# size corrected damping constant for plasma oscillation
def gamma_r(gamma_inf,v_f,r):
return gamma_inf + h_bar*v_f/(r*1e-9)
# coupling function to external magnetic field
def f_w(wl,w_p,gamma):
w=1240/wl # nm to eV
f_out=-h_bar*(sp_c.e/sp_c.m_e)*(w_p**2/w)*((gamma-1j*w)*(gamma-1j*w))/((gamma**2+w**2)**2)
return f_out
# magnetic field dependent polarizability
def alpha_mattei(wl,eps,eps_m,f_w,f_m,D,B):
norm=-np.pi*(D**3)/2.0
num=eps-eps_m+(f_w-f_m)*B
den=eps+2.0*eps_m+(f_w-f_m)*B
alpha=norm*num/den
return alpha
# scattering cross section
def sigma_mattei(wl,eps_m,alpha):
return (2.0*np.pi/wl)*np.sqrt(eps_m)*np.imag(alpha)
# building the optical constant database, point the folder below to the "materials" py_matrix folder
eps_db_out=pm.mat.generate_eps_db('../materials/',ext='*.edb')
eps_files,eps_names,eps_db=eps_db_out['eps_files'],eps_db_out['eps_names'],eps_db_out['eps_db']
# multilayer and computation inputs
D=13.0; # nanocluster diameter
f=0.01; # filling factor for the effective medium containing the Au nanoclusters
stack=['e_hexane','e_au','e_hexane'] # materials composing the stack, as taken from eps_db
d_list=[0.0,D,0.0] # multilayer thicknesses: incident medium and substrate have zero thickness
# wavelength
wl_min=400
wl_max=800
wl_step=100;
v_wl=np.linspace(wl_min,wl_max,wl_step);
# polar and azimuthal angles in radians
theta_0=0.0
phi_0=0.0
# drude metal parameters
w_p=8.95 # plasma frequency
gamma=0.02 # damping constant
f_m=1.06e-6 # medium magnetic coupling
v_f=1.4e6; # Fermi velocity
gamma_inf=0.069; # bulk damping constant
# B field in Tesla
B=1.0
# Scalar MCD calculations following Nano Lett. 2013, 13, 4785โ4789
v_sigma_plus=np.zeros_like(v_wl)
v_sigma_minus=np.zeros_like(v_wl)
v_sigma_zero=np.zeros_like(v_wl)
for i_wl,wl in enumerate(v_wl):
# Magnetic field corrections
gamma_r0=gamma_r(gamma_inf,v_f,D/2)
f_w0=f_w(wl,w_p,gamma_r0)
# optical constants
eps=pm.mat.db_to_eps(wl,eps_db,['e_au'])[0];
e_corr=pm.mat.eps_corr_drude(wl,w_p,gamma_inf,v_f,D/2) # mean free path correction
eps=eps+e_corr;
eps=np.real(eps)-1j*np.imag(eps); # flipping imaginary part size due to different conventions
eps_m=np.real(pm.mat.db_to_eps(wl,eps_db,['e_hexane'])[0])
# polarizability and
alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,B);v_sigma_plus[i_wl]=sigma_mattei(wl,eps_m,alpha) # Left pol
alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,-B);v_sigma_minus[i_wl]=sigma_mattei(wl,eps_m,alpha) # Right pol
alpha=alpha_mattei(wl,eps,eps_m,f_w0,f_m,D,0.0);v_sigma_zero[i_wl]=sigma_mattei(wl,eps_m,alpha) # B=0
v_mcd=(v_sigma_minus-v_sigma_plus)/v_sigma_zero.max()
# polarization correction factors for gold nanoclusters
a_x=D/2;a_y=D/2;a_z=D/2;
m_L=pm.moe.m_L(a_x,a_y,a_z);
m_D=pm.moe.m_D(a_x,a_y,a_z);
V=pm.moe.f_V(a_x,a_y,a_z)
# Full transfer matrix calculations
v_A_r=np.zeros_like(v_wl);v_A_l=np.zeros_like(v_wl);
v_A_p=np.zeros_like(v_wl);v_A_s=np.zeros_like(v_wl);
m_eps=np.zeros((len(stack),3,3),dtype=np.complex128)
for i_wl,wl in enumerate(v_wl):
# retrieving optical constants at wl from the database
e_list=pm.mat.db_to_eps(wl,eps_db,stack);
e_corr=pm.mat.eps_corr_drude(wl,w_p,gamma_inf,v_f,D/2)
#print e_corr
e_list[1]=e_list[1]+e_corr
e_xy=pm.mat.eps_xy_drude(wl, w_p, gamma_r0, B)
# calculating the MG effective medium
em=e_list[0]
m_e1=np.identity(3)*em;
m_e2=np.identity(3)*e_list[1];
m_e2[1,0]=e_xy;m_e2[0,1]=-e_xy;
m_eff=pm.mat.m_eff_MG(m_L,m_D,V,m_e1,m_e2,wl,f)
# filling dielectric tensor
m_eps[:,0,0]=e_list
m_eps[:,1,1]=e_list
m_eps[:,2,2]=e_list
m_eps[1,:,:]=m_eff
# computing transmission matrix
m_t_ps=pm.core.rt(wl,theta_0,phi_0,m_eps,d_list)['m_t_ps']
# extracting the mcd information from the transmission matrix
n_0=np.sqrt(e_list[0]);n_s=np.sqrt(e_list[-1]);
out=pm.utils.T_ps_rl(m_t_ps,theta_0,n_0,n_s)
v_A_r[i_wl]=out['A_r']
v_A_l[i_wl]=out['A_l']
v_A_p[i_wl]=out['A_p']
v_A_s[i_wl]=out['A_s']
# cmd plot
fg2=plt.figure(num=2,figsize=(15,10))
plt.plot(v_wl,1e4*(v_A_r-v_A_l)/(v_A_r).max(),'r-',
v_wl,1e4*v_mcd,'ko',
markersize=10,linewidth=3);
#-----ticks------
fsize=15;
plt.xticks(fontsize=fsize+10);plt.yticks(fontsize=fsize+10);
#------axis labels------
plt.xlabel(r'Wavelength (nm)',fontsize=fsize+15);
plt.ylabel(r'$\Delta$A\(A$_{\mathrm{max}}*$B)(T$^{-1}*10^{4}$)',fontsize=fsize+15);
#------plot legend------
plt.legend(('Tensorial','Scalar'), fontsize=fsize+15,loc='upper right',frameon=False);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the data to be generated
Step2: Create two queues, an input and output. Call them CSP (in) and TM (out).
Step3: Now populate the CSP queue with 3 "Measurements".
Step4: Can view the queue contents, if desired. Essentially an SDP Jobs List.
Step5: Can iterate over the queue, to process those jobs (non-prioritised).
|
<ASSISTANT_TASK:>
Python Code:
import queue
import numpy
from matplotlib import pylab
# %matplotlib inline
pylab.rcParams['figure.figsize'] = (8.0, 8.0)
pylab.rcParams['image.cmap'] = 'rainbow'
import matplotlib.pyplot as plt
from astropy.coordinates import SkyCoord
from astropy import units as u
from data_models.polarisation import PolarisationFrame
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.simulation.testing_support import create_blockvisibility_iterator
from wrappers.serial.simulation.configurations import create_named_configuration
from wrappers.serial.calibration.operations import qa_gaintable
from wrappers.serial.calibration.rcal import rcal
lowcore = create_named_configuration('LOWBD2-CORE')
times = numpy.linspace(-3.0, +3.0, 7) * numpy.pi / 12.0
frequency = numpy.linspace(1.0e8, 1.50e8, 3)
channel_bandwidth = numpy.array([5e7, 5e7, 5e7])
# Define the component and give it some polarisation and spectral behaviour
f = numpy.array([100.0, 20.0, -10.0, 1.0])
flux = numpy.array([f, 0.8 * f, 0.6 * f])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
compdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000')
comp = create_skycomponent(flux=flux, frequency=frequency, direction=compdirection)
def plotgain(gt, title=''):
plt.clf()
plt.plot(numpy.real(gt.gain[...,0,0]).flat, numpy.imag(gt.gain[...,0,0]).flat, '.')
plt.plot(numpy.real(gt.gain[...,1,1]).flat, numpy.imag(gt.gain[...,1,1]).flat, '.')
plt.title(title)
plt.xlabel('Real part of gain')
plt.ylabel('Imaginary part of gain')
plt.show()
csp_queue = queue.Queue()
tm_queue = queue.Queue()
num_tasks_to_queue = 3
for i in range(num_tasks_to_queue):
sb_id = i+100000
data = [sb_id, create_blockvisibility_iterator(lowcore, times=times,
frequency=frequency,
channel_bandwidth=channel_bandwidth, phasecentre=phasecentre,
weight=1, polarisation_frame=PolarisationFrame('linear'),
integration_time=1.0, number_integrations=1,
components=comp, phase_error=0.1, amplitude_error=0.01)]
csp_queue.put(data)
for elem in list(csp_queue.queue):
print(elem)
for ingest in iter(csp_queue.get, None):
sb_id = ingest[0]
print("Processing SB_ID:", sb_id, "(", csp_queue.qsize(), "items left in CSP queue )")
rcal_pipeline = rcal(vis=ingest[1], components=comp, phase_only=False)
print("Starting pipeline")
for igt, gt in enumerate(rcal_pipeline):
plotgain(gt, title="Chunk %d, time %s, residual %.3g (Jy)" % (igt, numpy.unique(gt.time),
numpy.average(gt.residual)))
tm_queue.put( [sb_id, qa_gaintable(gt)] )
print("Ingest and RCAL pipelines are empty, stopping")
print("CSP Queue is empty, stopping")
csp_queue.join()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filter
Step2: Potom mozeme vyberat atributy na zaklade zavislosti atributu a predikovanej hodnoty
Step3: Daju sa pouzivat rozne metriky
Step4: Viem si vytiahnut zoznam najlepsich vlastnosti a uspesnost modelu pri nich
Step5: Embedded
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
plt.rcParams['figure.figsize'] = 9, 6
from sklearn.feature_selection import VarianceThreshold
X = np.array([[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]])
X
selector = VarianceThreshold(threshold=0.0)
selector.fit_transform(X)
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2 # daju sa pouzit ine metriky
iris = load_iris()
X, y = iris.data, iris.target
X.shape
X_new = SelectKBest(chi2, k=2).fit_transform(X, y)
X_new.shape
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
knn = KNeighborsClassifier(n_neighbors=4)
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
sfs1 = SFS(knn, k_features=3, forward=True, floating=False, verbose=2, scoring='accuracy', cv=0)
# pomocou tejto triedy vieme robit SFS, SFFS, SBS aj SFBS a dokonca aj pridat cross-validaciu
sfs1 = sfs1.fit(X, y)
sfs1.subsets_
sfs1.k_feature_idx_
sfs1.k_score_
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import load_iris
from sklearn.feature_selection import SelectFromModel
iris = load_iris()
X, y = iris.data, iris.target
X.shape
clf = RandomForestClassifier()
clf = clf.fit(X, y)
clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
X_new.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 2
Step3: Step 3
|
<ASSISTANT_TASK:>
Python Code:
import bs4
import requests
jbindexurl = lambda page: "http://www.amnesty.de/laenderbericht/australien?page=%d&country=&topic=&node_type=ai_annual_report&from_month=0&from_year=&to_month=0&to_year=&submit_x=103&submit_y=13&submit=Auswahl+anzeigen&result_limit=50&form_id=ai_core_search_form" % page
jbindices = [bs4.BeautifulSoup(requests.get(jbindexurl(i)).text) for i in range(4)]
import re
ar2015 = re.compile("Amnesty Report 2015")
reports = {}
for jbindex in jbindices:
a_reports = jbindex.find_all("a", text=ar2015)
for a in a_reports:
country = ' '.join(a.contents[0].split()[3:])
reports[country] = requests.get("http://www.amnesty.de" + a.get("href")).text
print(country, end=", ")
TMPL = \
<!DOCTYPE html>
<html>
<head>
<title>Amnesty Report 2015 {country}</title>
</head>
<body>
{content}
</body>
</html>
from codecs import open
for country, report in reports.items():
bs = bs4.BeautifulSoup(report)
h3 = bs.find("h3", text=ar2015)
# parent of parent contains the main content
content = h3.parent.parent
# changing the h3 header to a proper h1 header
h3.name = "h1"
# we neither want the top bar nor the bar at the bottom for "zurรผck"
# (extract() removes it from the DOM)
for bar in content.find_all("ul", class_ = "ai_core_service_bar"):
bar.extract()
# writing to html file
with open(country.lower().replace(" ", "_") + ".html", "w", "utf8") as f:
f.write(TMPL.format(country = country, content = str(content)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Retrieving training and test data
Step2: Visualize the training data
Step3: Building the network
Step4: Training the network
Step5: Testing
|
<ASSISTANT_TASK:>
Python Code:
# Import Numpy, TensorFlow, TFLearn, and MNIST data
import numpy as np
import tensorflow as tf
import tflearn
import tflearn.datasets.mnist as mnist
# mnist fails to load, so got this patch from the nd101 slack
def patched_read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
mnist._read32 = patched_read32
# Retrieve the training and test data
trainX, trainY, testX, testY = mnist.load_data(one_hot=True)
# Visualizing the data
import matplotlib.pyplot as plt
%matplotlib inline
# Function for displaying a training image by it's index in the MNIST set
def show_digit(index):
label = trainY[index].argmax(axis=0)
# Reshape 784 array into 28x28 image
image = trainX[index].reshape([28,28])
plt.title('Training data, index: %d, Label: %d' % (index, label))
plt.imshow(image, cmap='gray_r')
plt.show()
# Display the first (index 0) training image
show_digit(0)
# Define the neural network
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
# Include the input layer, hidden layer(s), and set how you want to train the model
#input layer
net = tflearn.input_data([None, 784])
#hidden layer 1
net = tflearn.fully_connected(net, 196, activation='ReLU')
# hidden layer 2
net = tflearn.fully_connected(net, 49, activation='ReLU')
# output layer
net = tflearn.fully_connected(net, 10, activation='softmax')
# how does it learn?
net = tflearn.regression(net, optimizer='sgd', learning_rate=0.05, loss='categorical_crossentropy')
# This model assumes that your network is named "net"
model = tflearn.DNN(net)
return model
# Build the model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=100, n_epoch=20)
# Compare the labels that our model predicts with the actual labels
# Find the indices of the most confident prediction for each item. That tells us the predicted digit for that sample.
predictions = np.array(model.predict(testX)).argmax(axis=1)
# Calculate the accuracy, which is the percentage of times the predicated labels matched the actual labels
actual = testY.argmax(axis=1)
test_accuracy = np.mean(predictions == actual, axis=0)
# Print out the result
print("Test accuracy: ", test_accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: My niece is always complaining there was no service between 11
Step2: That's actually not so bad. There is a lull in service at 1
Step3: Now that the timestamps are sorted, we can check how long a person that just missed a bus will wait for the next one.
Step4: Now we can compute the average wait time per hour blocks.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import csv
from datetime import datetime, timedelta
def timeparse(ds):
timestamp = datetime.strptime(ds, "%Y%m%d%H%M%S")
#return "{0}-{1}-{2}T{3}:{4}:{5}Z".format(year, month, day, hh, mm, ss)
return timestamp
counts = [0] * 24
trips = []
times = []
last = datetime.fromordinal(1)
epsilon = timedelta(minutes=15) # it takes around 20 minutes for the bus to come back
lastbus = 0
with open('parana-big.txt', 'rb') as csvfile:
amareader = csv.reader(csvfile)
for row in amareader:
timestamp = timeparse(row[1])
bus = row[0]
tdiff = abs(timestamp - last)
# make sure it's a different bus, not the same stuck in traffic
# the data is sorted by vehicle number, then date
if tdiff > epsilon or bus != lastbus:
# print timestamp, last, row
lastbus = bus
last = timestamp
hour = timestamp.hour
counts[hour] += 1
trips.append(timestamp)
midnight = timestamp.replace(hour=0, minute=0, second=0)
diff = timestamp - midnight
times.append(diff.total_seconds())
counts
from pylab import *
bar(range(24), counts)
xlim(0,24)
trips[:10]
trips.sort() # sort by datetime
trips[:10]
intervals = [0] * 24
counts = [0] * 24
for i in range(1, len(trips)):
wait = trips[i] - trips[i-1] # how long since the last bus?
hour = trips[i].hour
# count how many hours you waited if you missed the last bus
intervals[hour] += wait.seconds / 60.0**2
counts[hour] += 1
for i in range(24):
if counts[i]: # don't divide by zero
intervals[i] = intervals[i] / counts[i]
intervals
bar(range(24), intervals)
xlim(0,24)
len(trips)
import numpy as np
day = 24 * 60 * 60
bins = np.arange(day / (6 * 60) ,dtype = np.float_) * 6 * 60
plt = hist(times, bins)
xlim(0, 240 * 6* 60)
trips[:20]
trips[15:15+30]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's plot the smith chart for it
Step2: Now let's calculate the source and load stability curves.
Step3: So we can see that we need to avoid inductive loads near short circuit in the input matching network and high impedance inductive loads on the output.
Step4: So we can see from the chart that just leaving the input at 50 ohms gets us under 0.1 dB of extra noise, which seems pretty good. I'm actually not sure that these actually correspond to the noise figure level increments I have listed up there, but the circles should at least correspond to increasing noise figures
Step5: This looks like it may be kind of close to the load instability circles, so it might make sense to pick a load point with less gain for more stability, or to pick a different source impedance with more noise.
Step6: Let's calculate what the component values are
Step7: The capacitance is kind of low but the inductance seems reasonable. Let's test it out
Step8: That looks pretty reasonable; let's take a look at the S21 to see what we got
Step9: So about 18 dB gain; let's see what our noise figure is
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [10, 10]
import skrf as rf
from skrf.media import DistributedCircuit
f = rf.Frequency(0.4, 2, 101)
tem = DistributedCircuit(f, z0=50)
# import the scattering parameters/noise data for the transistor
bjt = rf.Network('BFU520_05V0_010mA_NF_SP.s2p').interpolate(f)
print(bjt)
bjt.plot_s_smith(lw=2)
sqabs = lambda x: np.square(np.absolute(x))
delta = bjt.s11.s*bjt.s22.s - bjt.s12.s*bjt.s21.s
rl = np.absolute((bjt.s12.s * bjt.s21.s)/(sqabs(bjt.s22.s) - sqabs(delta)))
cl = np.conj(bjt.s22.s - delta*np.conj(bjt.s11.s))/(sqabs(bjt.s22.s) - sqabs(delta))
rs = np.absolute((bjt.s12.s * bjt.s21.s)/(sqabs(bjt.s11.s) - sqabs(delta)))
cs = np.conj(bjt.s11.s - delta*np.conj(bjt.s22.s))/(sqabs(bjt.s11.s) - sqabs(delta))
def calc_circle(c, r):
theta = np.linspace(0, 2*np.pi, 1000)
return c + r*np.exp(1.0j*theta)
for i, f in enumerate(bjt.f):
# decimate it a little
if i % 100 != 0:
continue
n = rf.Network(name=str(f/1.e+9), s=calc_circle(cs[i][0, 0], rs[i][0, 0]))
n.plot_s_smith()
for i, f in enumerate(bjt.f):
# decimate it a little
if i % 100 != 0:
continue
n = rf.Network(name=str(f/1.e+9), s=calc_circle(cl[i][0, 0], rl[i][0, 0]))
n.plot_s_smith()
idx_915mhz = rf.util.find_nearest_index(bjt.f, 915.e+6)
# we need the normalized equivalent noise and optimum source coefficient to calculate the constant noise circles
rn = bjt.rn[idx_915mhz]/50
gamma_opt = bjt.g_opt[idx_915mhz]
fmin = bjt.nfmin[idx_915mhz]
for nf_added in [0, 0.1, 0.2, 0.5]:
nf = 10**(nf_added/10) * fmin
N = (nf - fmin)*abs(1+gamma_opt)**2/(4*rn)
c_n = gamma_opt/(1+N)
r_n = 1/(1-N)*np.sqrt(N**2 + N*(1-abs(gamma_opt)**2))
n = rf.Network(name=str(nf_added), s=calc_circle(c_n, r_n))
n.plot_s_smith()
print("the optimum source reflection coefficient is ", gamma_opt)
gamma_s = 0.0
gamma_l = np.conj(bjt.s22.s - bjt.s21.s*gamma_s*bjt.s12.s/(1-bjt.s11.s*gamma_s))
gamma_l = gamma_l[idx_915mhz, 0, 0]
is_gamma_l_stable = np.absolute(gamma_l - cl[idx_915mhz]) > rl[idx_915mhz]
gamma_l, is_gamma_l_stable
def calc_matching_network_vals(z1, z2):
flipped = np.real(z1) < np.real(z2)
if flipped:
z2, z1 = z1, z2
# cancel out the imaginary parts of both input and output impedances
z1_par = 0.0
if abs(np.imag(z1)) > 1e-6:
# parallel something to cancel out the imaginary part of
# z1's impedance
z1_par = 1/(-1j*np.imag(1/z1))
z1 = 1/(1./z1 + 1/z1_par)
z2_ser = 0.0
if abs(np.imag(z2)) > 1e-6:
z2_ser = -1j*np.imag(z2)
z2 = z2 + z2_ser
Q = np.sqrt((np.real(z1) - np.real(z2))/np.real(z2))
x1 = -1.j * np.real(z1)/Q
x2 = 1.j * np.real(z2)*Q
x1_tot = 1/(1/z1_par + 1/x1)
x2_tot = z2_ser + x2
if flipped:
return x2_tot, x1_tot
else:
return x1_tot, x2_tot
z_l = rf.s2z(np.array([[[gamma_l]]]))[0,0,0]
# note that we're matching against the conjugate;
# this is because we want to see z_l from the BJT side
# if we plugged in z the matching network would make
# the 50 ohms look like np.conj(z) to match against it, so
# we use np.conj(z_l) so that it'll look like z_l from the BJT's side
z_par, z_ser = calc_matching_network_vals(np.conj(z_l), 50)
z_l, z_par, z_ser
c_par = np.real(1/(2j*np.pi*915e+6*z_par))
l_ser = np.real(z_ser/(2j*np.pi*915e+6))
print(c_par, l_ser)
output_network = tem.shunt_capacitor(c_par) ** tem.inductor(l_ser)
amplifier = bjt ** output_network
amplifier.plot_s_smith()
amplifier.s21.plot_s_db()
10*np.log10(amplifier.nf(50.)[idx_915mhz])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's first download an example file with some CTD data
Step2: The profile dPIRX003.cnv.OK was loaded with the default rule cnv.yaml
Step3: We have latitude in the header, and pressure in the data.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from seabird.cnv import fCNV
from gsw import z_from_p
!wget https://raw.githubusercontent.com/castelao/seabird/master/sampledata/CTD/dPIRX003.cnv
profile = fCNV('dPIRX003.cnv')
print("Header: %s" % profile.attributes.keys())
print("Data: %s" % profile.keys())
z = z_from_p(profile['PRES'], profile.attributes['LATITUDE'])
from matplotlib import pyplot as plt
plt.plot(profile['TEMP'], z,'b')
plt.plot(profile['TEMP2'], z,'g')
plt.xlabel('temperature')
plt.ylabel('depth')
plt.title(profile.attributes['filename'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authorization
Step2: Find and load an object
Step3: Get the contigs for the assembly
Step4: View the contigs
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import qgrid
qgrid.nbinstall()
from biokbase import data_api
from biokbase.data_api import display
display.nbviewer_mode(True)
import os
os.environ['KB_AUTH_TOKEN'] = open('/tmp/kb_auth_token.txt').read().strip()
b = data_api.browse(1019)
x = b[0].object # Assembly object
cid_strings = x.get_contig_ids() # 1 min
cids = display.Contigs(cid_strings)
from biokbase import data_api
from biokbase.data_api import display
list(b)
rg = b[0]
rgo = rg.object
type(rgo)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Linear regression
Step3: We could also implement it with the numpy covariance function. The diagonal terms represent the variance.
Step5: Coding as a least square problem
Step6: The simple ways
Step7: scipy
Step8: scikit-learn
Step9: Efficiency
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
x = np.arange(10.)
y = 5*x+3
np.random.seed(3)
y+= np.random.normal(scale=10,size=x.size)
plt.scatter(x,y);
def lin_reg(x,y):
Perform a linear regression of x vs y.
x, y are 1 dimensional numpy arrays
returns alpha and beta for the model y = alpha + beta*x
beta = np.mean(x*y)-np.mean(x)*np.mean(y)
#finish...
lin_reg(x,y)
def lin_reg2(x,y):
Perform a linear regression of x vs y. Uses covariances.
x, y are 1 dimensional numpy arrays
returns alpha and beta for the model y = alpha + beta*x
c = np.cov(x,y)
#finish...
lin_reg2(x,y)
def lin_reg3(x,y):
Perform a linear regression of x vs y. Uses least squares.
x, y are 1 dimensional numpy arrays
returns alpha and beta for the model y = alpha + beta*x
#finish...
lin_reg3(x,y)
#finish...
import scipy.stats as stats
#finish
from sklearn import linear_model
#Finish
x = np.arange(10.)
y = 5*x+3
np.random.seed(3)
y+= np.random.normal(scale=10,size=x.size)
plt.scatter(x,y);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: 1.1 Get the Pageviews Data, desktop
Step3: 1.2 Get the Pageviews Data, mobile-web
Step4: 1.3 Get the Pageviews Data, mobile-app
Step5: 1.4 Get the Pagecounts Data, desktop-site
Step6: 1.5 Get the Pagecounts Data, mobile-site
Step8: Step 2
Step9: Covert all json datasets to data frames by calling the function above.
Step10: Merge the monthly values for mobile-app and mobile-web by attributes year and month for the pageview data sets. Add a new column call pageview_mobile_views by summing the monthly values for mobile-app and mobile-web. Drop the pageview_mobile-web_views and pageview_mobile-app_views columns afterward.
Step11: Merge the monthly values for mobile and desktop by attributes year and month for the pageview dataset. Add a new column call pageview_all_views by summing pageview_mobile_views and pageview_desktop_views.
Step12: Merge the monthly values for desktop-site and mobile-site by attributes year and month. Replace all NaN values to 0. Add a new column call pagecout_all_views by suming pagecount_mobile_views and pagecount_desktop_views.
Step13: Merge pagecount dataset and pageview dataset by attributes year and month. Replace all NaN values to 0 and reorder columns.
Step14: Last step, output the data frame to a CSV file
Step15: Step 3
|
<ASSISTANT_TASK:>
Python Code:
def get_data(url, access, file_name):
This function takes an url, parameter for the key
'access'/'access-site' depends on getting pageviews
or pagecounts dataset. Then save the data as json
file with the name as given file_name to your directory.
Args:
param1 (str): an url for the API
param2 (str): name for the parameter
param3 (str): an input file name
endpoint = url
if 'pageviews' in url:
params = {'project' : 'en.wikipedia.org',
'access' : access,
'agent' : 'user',
'granularity' : 'monthly',
'start' : '2008080100',
'end' : '2017100100'}
elif 'pagecounts' in url:
params = {'project' : 'en.wikipedia.org',
'access-site' : access,
'granularity' : 'monthly',
'start' : '2008010100',
'end' : '2016080100'}
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
with open(file_name, 'w') as f:
json.dump(response, f)
url = 'http://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
file_name = 'pageviews_desktop_201507-201709.json'
get_data(url, 'desktop', file_name)
url = 'http://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
file_name = 'pageviews_mobile-web_201507-201709.json'
get_data(url, 'mobile-web', file_name)
url = 'http://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{access}/{agent}/{granularity}/{start}/{end}'
file_name = 'pageviews_mobile-app_201507-201709.json'
get_data(url, 'mobile-app', file_name)
url = 'http://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'
file_name = 'pagecounts_desktop-site_200801-201607.json'
get_data(url, 'desktop-site', file_name)
url = 'http://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{access-site}/{granularity}/{start}/{end}'
file_name = 'pagecounts_mobile-site_200801-201607.json'
get_data(url, 'mobile-site', file_name)
def convert_json_to_dataframe(file_name):
This function takes an input json file then converts it to
data frame and perform some manipulations.
Args:
param1 (str): input file name
Returns:
a data frame associated to the given input file
with open(file_name) as json_file:
json_data = json.load(json_file)
col_name = file_name.split('_')[1]
data_frame = pd.DataFrame(json_data['items'])
if 'pageviews' in file_name:
data_frame.drop(data_frame.columns[[0, 1, 2, 3]], axis=1, inplace=True)
data_frame.rename(columns={'views':'pageview_' + col_name + '_views'}, inplace=True)
elif 'pagecounts' in file_name:
data_frame.drop(data_frame.columns[[0, 2, 3]], axis=1, inplace=True)
data_frame.rename(columns={'count':'pagecount_' + col_name + '_views'}, inplace=True)
data_frame['year'] = data_frame['timestamp'].apply(lambda x: x[:4])
data_frame['month'] = data_frame['timestamp'].apply(lambda x: x[4:6])
data_frame.drop('timestamp', axis=1, inplace=True)
return data_frame
mobile_web = convert_json_to_dataframe('pageviews_mobile-web_201507-201709.json')
mobile_app = convert_json_to_dataframe('pageviews_mobile-app_201507-201709.json')
desktop = convert_json_to_dataframe('pageviews_desktop_201507-201709.json')
desktop_site = convert_json_to_dataframe('pagecounts_desktop-site_200801-201607.json')
mobile_site = convert_json_to_dataframe('pagecounts_mobile-site_200801-201607.json')
total_mobile = pd.merge(mobile_web, mobile_app, how='outer', on=['year', 'month'])
total_mobile['pageview_mobile_views'] = total_mobile['pageview_mobile-web_views'] + total_mobile['pageview_mobile-app_views']
total_mobile.drop(total_mobile.columns[[0, 3]], axis=1, inplace=True)
total_mobile.head()
pageview = pd.merge(total_mobile, desktop, how='outer', on=['year', 'month'])
pageview['pageview_all_views'] = pageview['pageview_mobile_views'] + pageview['pageview_desktop_views']
pageview.head()
pagecount = pd.merge(desktop_site, mobile_site, how='outer', on=['year', 'month'])
pagecount.rename(columns={'pagecount_mobile-site_views':'pagecount_mobile_views'}, inplace=True)
pagecount.rename(columns={'pagecount_desktop-site_views':'pagecount_desktop_views'}, inplace=True)
pagecount.fillna(0, inplace=True)
pagecount['pagecount_all_views'] = pagecount['pagecount_mobile_views'] + pagecount['pagecount_desktop_views']
pagecount.head()
combine = pd.merge(pagecount, pageview, how='outer', on=['year', 'month'])
combine = combine.fillna(0).astype(int)
combine = combine[["year", "month", "pagecount_all_views", "pagecount_desktop_views", "pagecount_mobile_views",
"pageview_all_views", "pageview_desktop_views", "pageview_mobile_views"]]
combine.head()
combine.to_csv('en-wikipedia_traffic_200801-201709.csv', encoding='utf-8', index=False)
scale = 1000000
x = pd.date_range(start='2008-01', end='2017-10', freq='M')
y_1 = combine["pageview_desktop_views"] / scale
y_2 = combine["pageview_mobile_views"] / scale
y_3 = combine["pageview_all_views"] / scale
y_4 = combine["pagecount_desktop_views"] / scale
y_5 = combine["pagecount_mobile_views"] / scale
y_6 = combine["pagecount_all_views"] / scale
fig = plt.figure(figsize=(18, 12))
# plot pageviews data
plt.plot(x, y_1, label="pageviews main site", color="black")
plt.plot(x, y_2, label="pageviews mobile site", color="green")
plt.plot(x, y_3, label="pageviews total", color="blue")
# plot pagecounts data
plt.plot(x, y_4, linestyle="--", label="pagecounts main site", color="black")
plt.plot(x, y_5, linestyle="--", label="pagecounts mobile site", color="green")
plt.plot(x, y_6, linestyle="--", label="pagecounts total", color="blue")
plt.legend(loc='upper left', prop={'size': 18})
plt.xlabel("Year", fontsize=24)
plt.xticks(fontsize=18)
plt.ylabel("Wikipedia Page Views (x 1,000,000)", fontsize=24)
plt.yticks(fontsize=18)
plt.title("Page views Traffic on English Wikipedia (x 1,000,000)", fontsize=24)
# save the plot
fig.savefig("en-wikipedia_traffic.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading directly from the csv
Step2: Excluding rows that are not needed
Step3: Now, we need to start using MLlib - spark ml library
Step4: Separating features from target
Step5: Inserting necessary fields insed features
Step6: Split our data into training set and test set
Step7: Apply Logistic Regression
|
<ASSISTANT_TASK:>
Python Code:
# Import findspark
import findspark
# Initialize and provide path
findspark.init("/home/henrique/Downloads/spark")
# Or use this alternative
#findspark.init()
# Import SparkSession
from pyspark.sql import SparkSession
# Build the SparkSession
spark = SparkSession.builder \
.master("local") \
.appName("Linear Regression Model") \
.config("spark.executor.memory", "1gb") \
.getOrCreate()
sc = spark.sparkContext
import pyspark
sql = pyspark.sql.SQLContext(sc)
df = (sql.read
.format("com.databricks.spark.csv") # Choose the bib to oad csv
.option("header", "true") # Use the first line as header
.option("inferSchema", "true") # Try to infer data type - if this is not set all the typer will be str
.load("games.csv")) # File name
df
excludes = [
't1_ban1',
't1_ban2',
't1_ban3',
't1_ban4',
't1_ban5',
't1_champ1_sum1',
't1_champ1_sum2',
't1_champ1id',
't1_champ2_sum1',
't1_champ2_sum2',
't1_champ2id',
't1_champ3_sum1',
't1_champ3_sum2',
't1_champ3id',
't1_champ4_sum1',
't1_champ4_sum2',
't1_champ4id',
't1_champ5_sum1',
't1_champ5_sum2',
't1_champ5id',
't2_ban1',
't2_ban2',
't2_ban3',
't2_ban4',
't2_ban5',
't2_champ1_sum1',
't2_champ1_sum2',
't2_champ1id',
't2_champ2_sum1',
't2_champ2_sum2',
't2_champ2id',
't2_champ3_sum1',
't2_champ3_sum2',
't2_champ3id',
't2_champ4_sum1',
't2_champ4_sum2',
't2_champ4id',
't2_champ5_sum1',
't2_champ5_sum2',
't2_champ5id']
for exclude in excludes:
df = df.drop(exclude)
print(df.columns)
df.printSchema()
df.dtypes
df.select('gameId','t1_inhibitorKills','t2_towerKills','winner').show(15)
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import StandardScaler
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import PCA
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.clustering import GaussianMixture
from pyspark.mllib.classification import LogisticRegressionWithLBFGS, LogisticRegressionModel
# Renaming winner to label
df = df.withColumnRenamed("winner","label")
df.printSchema()
feat_fields = ['gameDuration',
'seasonId',
'firstBlood',
'firstTower',
'firstInhibitor',
'firstBaron',
'firstDragon',
'firstRiftHerald',
't1_towerKills',
't1_inhibitorKills',
't2_baronKills',
't1_dragonKills',
't1_riftHeraldKills',
't2_towerKills',
't2_inhibitorKills',
't2_baronKills',
't2_dragonKills',
't2_riftHeraldKills']
assembler = VectorAssembler(inputCols=feat_fields, outputCol="features")
output = assembler.transform(df)
# The df will contain all the old Coluns and a new one features
# which will contain features we want
output.select('gameDuration','seasonId', 'features').show(20)
(trainingData, testData) = output.randomSplit([0.8, 0.2], seed = 1234)
print("Training Dataset Count: " + str(trainingData.count()))
print("Test Dataset Count: " + str(testData.count()))
lr = LogisticRegression(maxIter=20, regParam=0.3, elasticNetParam=0.8, family = "binomial")
lrModel = lr.fit(trainingData)
import matplotlib.pyplot as plt
import numpy as np
beta = np.sort(lrModel.coefficients)
plt.plot(beta)
plt.ylabel('Beta Coefficients')
plt.show()
trainingSummary = lrModel.summary
# Obtain the objective per iteration
objectiveHistory = trainingSummary.objectiveHistory
plt.plot(objectiveHistory)
plt.ylabel('Objective Function')
plt.xlabel('Iteration')
plt.show()
# Obtain the receiver-operating characteristic as a dataframe and areaUnderROC.
print("areaUnderROC: " + str(trainingSummary.areaUnderROC))
#trainingSummary.roc.show(n=10, truncate=15)
roc = trainingSummary.roc.toPandas()
plt.plot(roc['FPR'],roc['TPR'])
plt.ylabel('False Positive Rate')
plt.xlabel('True Positive Rate')
plt.title('ROC Curve')
plt.show()
pr = trainingSummary.pr.toPandas()
plt.plot(pr['recall'],pr['precision'])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.show()
predictions = lrModel.transform(testData)
predictions.select("label","prediction","probability")\
.show(n=10, truncate=40)
print("Number of correct prediction: " + str(predictions.filter(predictions['prediction'] == predictions['label']).count()))
print("Total of elements: " + str(testData.count()))
print(str(predictions.filter(predictions['prediction'] == predictions['label']).count()/testData.count()*100) + '%')
predictions.filter(predictions['prediction'] == predictions['label'])\
.select("gameId","probability","label","prediction").show(20)
from pyspark.ml.evaluation import BinaryClassificationEvaluator
print("Training: Area Under ROC: " + str(trainingSummary.areaUnderROC))
# Evaluate model
evaluator = BinaryClassificationEvaluator(rawPredictionCol="rawPrediction")
print("Test: Area Under ROC: " + str(evaluator.evaluate(predictions, {evaluator.metricName: "areaUnderROC"})))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Definitions
Step2: Problem Statement
Step3: Summarizing the states,
Step4: <div class="alert alert-success">
Step5: <div class="alert alert-success">
Step6: <div class="alert alert-success">
|
<ASSISTANT_TASK:>
Python Code:
from thermostate import State, Q_, units
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
substance = 'air'
p_1 = Q_(1.0, 'bar')
T_1 = Q_(300.0, 'K')
mdot = Q_(6.0, 'kg/s')
T_3 = Q_(1400.0, 'K')
p2_p1 = Q_(10.0, 'dimensionless')
T_3_low = Q_(1000.0, 'K')
T_3_high = Q_(1800.0, 'K')
st_amb = State(substance, T=T_1, p=p_1)
c_v = st_amb.cv
c_p = st_amb.cp
k = c_p/c_v
T_2 = T_1*p2_p1**((k - 1)/k)
p_2 = p2_p1*p_1
p_3 = p_2
p_4 = p_1
T_4 = T_3*(p_4/p_3)**((k - 1)/k)
Wdot_c = (mdot*c_p*(T_1 - T_2)).to('kW')
Wdot_t = (mdot*c_p*(T_3 - T_4)).to('kW')
bwr = abs(Wdot_c)/Wdot_t
Wdot_net = Wdot_c + Wdot_t
Qdot_23 = (mdot*c_p*(T_3 - T_2)).to('kW')
eta = Wdot_net/Qdot_23
T_range = np.linspace(T_3_low, T_3_high, 200)
Wdot_net_l = np.zeros(T_range.shape) * units.kW
eta_l = np.zeros(T_range.shape) * units.dimensionless
for i, T_3 in enumerate(T_range):
T_4 = T_3*(p_4/p_3)**((k - 1)/k)
Wdot_t = (mdot*c_p*(T_3 - T_4)).to('kW')
Wdot_net = Wdot_c + Wdot_t
Wdot_net_l[i] = Wdot_net
Qdot_23 = (mdot*c_p*(T_3 - T_2)).to('kW')
eta = Wdot_net/Qdot_23
eta_l[i] = eta
fig, power_ax = plt.subplots()
power_ax.plot(T_range, Wdot_net_l, label='Net power output', color='C0')
eta_ax = power_ax.twinx()
eta_ax.plot(T_range, eta_l, label='Thermal efficiency', color='C1')
power_ax.set_xlabel('Turbine Inlet Temperature (K)')
power_ax.set_ylabel('Net power output (kW)')
eta_ax.set_ylabel('Thermal efficiency')
lines, labels = power_ax.get_legend_handles_labels()
lines2, labels2 = eta_ax.get_legend_handles_labels()
power_ax.legend(lines + lines2, labels + labels2, loc='best');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: However pickle cannot serialise all objects ... "Use dill!" you say; still the pickle/dill method of serializing is rather indiscriminate. Some of our objects may contain runtime data we can't or don't want to store, coroutines, threads, locks, open files, you name it. We work with a Sqlite3 database to store our data. An application might store gigabytes of numerical data. We don't want those binary blobs in our database, rather to store them externally in a HDF5 file.
Step2: Let's see what is made of our objects!
Step3: Great! JSON compatible data stays the same. Now try an object that JSON doesn't know about.
Step5: Objects are encoded as a dictionary containing a '_noodles' key. So what will happen if we serialise an object the registry cannot possibly know about? Next we define a little astronomical class describing a star in the Morgan-Keenan classification scheme.
Step8: The registry obviously doesn't know about Stars, so it falls back to serialisation using pickle. The pickled data is further encoded using base64. This solution won't work if some of your data cannot be pickled. Also, if you're sensitive to aesthetics, the pickled output doesn't look very nice.
Step9: The class became quite a bit bigger. However, the __str__, __repr__ and from_string methods are part of an interface you'd normally implement to make your class more useful.
Step10: The __serialize__ method takes one argument (besides self). The argument pack is a function that creates the data record with all handles attached. The reason for this construct is that it takes keyword arguments for special cases.
Step13: Data classes
Step14: Data classes are recognised by Noodles and will be automatically serialised.
Step16: Writing a Serialiser class (example with large data)
Step18: Is this useable for large data? Let's see how this scales (code to generate this plot is below)
Step20: And put it all together in a class derived from SerArray.
Step21: We have to insert the serialiser into a new registry.
Step22: Now we can serialise our first Numpy array!
Step23: Now, we should be able to read back the data directly from the HDF5.
Step24: We have set the ref property to True, we can now read back the serialised object without dereferencing. This will result in a placeholder object containing only the encoded data
Step25: If we want to retrieve the data we should run from_json with deref=True
Step26: Appendix A
Step27: The following code will parse the stellar types we used before
Step29: Appendix B
|
<ASSISTANT_TASK:>
Python Code:
from noodles.tutorial import display_text
import pickle
function = pickle.dumps(str.upper)
message = pickle.dumps("Hello, Wold!")
display_text("function: " + str(function))
display_text("message: " + str(message))
pickle.loads(function)(pickle.loads(message))
import noodles
def registry():
return noodles.serial.pickle() \
+ noodles.serial.base()
reg = registry()
display_text(reg.to_json([
"These data are JSON compatible!", 0, 1.3, None,
{"dictionaries": "too!"}], indent=2))
display_text(reg.to_json({1, 2, 3}, indent=2), [1])
class Star(object):
Morgan-Keenan stellar classification.
def __init__(self, spectral_type, number, luminocity_class):
assert spectral_type in "OBAFGKM"
assert number in range(10)
self.spectral_type = spectral_type
self.number = number
self.luminocity_class = luminocity_class
rigel = Star('B', 8, 'Ia')
display_text(reg.to_json(rigel, indent=2), [4], max_width=60)
class Star(object):
Morgan-Keenan stellar classification.
def __init__(self, spectral_type, number, luminocity_class):
assert spectral_type in "OBAFGKM"
assert number in range(10)
self.spectral_type = spectral_type
self.number = number
self.luminocity_class = luminocity_class
def __str__(self):
return f'{self.spectral_type}{self.number}{self.luminocity_class}'
def __repr__(self):
return f'Star.from_string(\'{str(self)}\')'
@staticmethod
def from_string(string):
Construct a new Star from a string describing the stellar type.
return Star(string[0], int(string[1]), string[2:])
def __serialize__(self, pack):
return pack(str(self))
@classmethod
def __construct__(cls, data):
return Star.from_string(data)
sun = Star('G', 2, 'V')
print("The Sun is a", sun, "type star.")
encoded_star = reg.to_json(sun, indent=2)
display_text(encoded_star, [4])
decoded_star = reg.from_json(encoded_star)
display_text(repr(decoded_star))
from dataclasses import dataclass, is_dataclass
@dataclass
class Star:
Morgan-Keenan stellar classification.
spectral_type: str
number: int
luminocity_class: str
def __str__(self):
return f'{self.spectral_type}{self.number}{self.luminocity_class}'
@staticmethod
def from_string(string):
Construct a new Star from a string describing the stellar type.
return Star(string[0], int(string[1]), string[2:])
altair = Star.from_string("A7V")
encoded_star = reg.to_json(altair, indent=2)
display_text(encoded_star, [2])
import numpy
import hashlib
import base64
def array_sha256(a):
Create a SHA256 hash from a Numpy array.
dtype = str(a.dtype).encode()
shape = numpy.array(a.shape)
sha = hashlib.sha256()
sha.update(dtype)
sha.update(shape)
sha.update(a.tobytes())
return base64.urlsafe_b64encode(sha.digest()).decode()
import h5py
def save_array_to_hdf5(filename, lock, array):
Save an array to a HDF5 file, using the SHA-256 of the array
data as path within the HDF5. The `lock` is needed to prevent
simultaneous access from multiple threads.
hdf5_path = array_sha256(array)
with lock, h5py.File(filename) as hdf5_file:
if not hdf5_path in hdf5_file:
dataset = hdf5_file.create_dataset(
hdf5_path, shape=array.shape, dtype=array.dtype)
dataset[...] = array
hdf5_file.close()
return hdf5_path
import filelock
from noodles.serial import Serialiser, Registry
class SerArray(Serialiser):
Serialises Numpy array to HDF5 file.
def __init__(self, filename, lockfile):
super().__init__(numpy.ndarray)
self.filename = filename
self.lock = filelock.FileLock(lockfile)
def encode(self, obj, pack):
key = save_array_to_hdf5(self.filename, self.lock, obj)
return pack({
"filename": self.filename,
"hdf5_path": key,
}, files=[self.filename], ref=True)
def decode(self, cls, data):
with self.lock, h5py.File(self.filename) as hdf5_file:
return hdf5_file[data["hdf5_path"]].value
!rm -f tutorial.h5 # remove from previous run
import noodles
from noodles.tutorial import display_text
def registry():
return Registry(
parent=noodles.serial.base(),
types={
numpy.ndarray: SerArray('tutorial.h5', 'tutorial.lock')
})
reg = registry()
encoded_array = reg.to_json(numpy.arange(10), host='localhost', indent=2)
display_text(encoded_array, [6])
with h5py.File('tutorial.h5') as f:
result = f['4Z8kdMg-CbjgTKKYlz6b-_-Tsda5VAJL44OheRB10mU='][()]
print(result)
ref = reg.from_json(encoded_array)
display_text(ref)
display_text(vars(ref), max_width=60)
display_text(reg.from_json(encoded_array, deref=True))
!pip install pyparsing
from pyparsing import Literal, replaceWith, OneOrMore, Word, nums, oneOf
def roman_numeral_literal(string, value):
return Literal(string).setParseAction(replaceWith(value))
one = roman_numeral_literal("I", 1)
four = roman_numeral_literal("IV", 4)
five = roman_numeral_literal("V", 5)
roman_numeral = OneOrMore(
(five | four | one).leaveWhitespace()) \
.setName("roman") \
.setParseAction(lambda s, l, t: sum(t))
integer = Word(nums) \
.setName("integer") \
.setParseAction(lambda t:int(t[0]))
mkStar = oneOf(list("OBAFGKM")) + integer + roman_numeral
list(mkStar.parseString('B2IV'))
roman_class = {
'I': 'supergiant',
'II': 'bright giant',
'III': 'regular giant',
'IV': 'sub-giants',
'V': 'main-sequence',
'VI': 'sub-dwarfs',
'VII': 'white dwarfs'
}
import timeit
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = "serif"
from scipy import stats
def benchmark(size, number=10):
Measure performance of SHA-256 hashing large arrays.
data = numpy.random.uniform(size=size)
return timeit.timeit(
stmt=lambda: array_sha256(data),
number=number) / number
sizes = numpy.logspace(10, 25, 16, base=2, dtype=int)
timings = numpy.array([[benchmark(size, 1) for size in sizes]
for i in range(10)])
sizes_MB = sizes * 8 / 1e6
timings_ms = timings.mean(axis=0) * 1000
timings_err = timings.std(axis=0) * 1000
slope, intercept, _, _, _ = stats.linregress(
numpy.log(sizes_MB[5:]),
numpy.log(timings_ms[5:]))
print("scaling:", slope, "(should be ~1)")
print("speed:", numpy.exp(-intercept), "GB/s")
ax = plt.subplot(111)
ax.set_xscale('log', nonposx='clip')
ax.set_yscale('log', nonposy='clip')
ax.plot(sizes_MB, numpy.exp(intercept) * sizes_MB,
label='{:.03} GB/s'.format(numpy.exp(-intercept)))
ax.errorbar(sizes_MB, timings_ms, yerr=timings_err,
marker='.', ls=':', c='k', label='data')
ax.set_xlabel('size ($MB$)')
ax.set_ylabel('time ($ms$)')
ax.set_title('SHA-256 performance', fontsize=10)
ax.legend()
plt.savefig('sha256-performance.svg')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input data
Step2: Show a slice of the dataset
Step3: Show the shape of the raw input data
Step4: Create a plot function for the temperature for a visual comparison of the time series
Step5: Plot an example series - in this case the temperature
Step6: Simple k-mean aggregation
Step7: Create the typical periods
Step8: Show shape of typical periods
Step9: Save typical periods to .csv file
Step10: Repredict the original time series based on the typical periods
Step11: Plot the repredicted data
Step12: As seen, they days with the minimal temperature are excluded. In case that they are required they can be added to the aggregation as follow.
Step13: Create the typical periods
Step14: The aggregation can also be evaluated by indicators
Step15: Repredict the original time series based on the typical periods
Step16: Plot repredicted data
Step17: Here bigger biggest values and lower lowest values can be observed compared to k-means clustering.
Step18: Or as unsorted time series for an example week
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import copy
import os
import pandas as pd
import matplotlib.pyplot as plt
import tsam.timeseriesaggregation as tsam
%matplotlib inline
raw = pd.read_csv('testdata.csv', index_col = 0)
raw.head()
raw.shape
def plotTS(data, periodlength, vmin, vmax):
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
stacked, timeindex = tsam.unstackToPeriods(copy.deepcopy(data), periodlength)
cax = axes.imshow(stacked.values.T, interpolation = 'nearest', vmin = vmin, vmax = vmax)
axes.set_aspect('auto')
axes.set_ylabel('Hour')
plt.xlabel('Day')
fig.subplots_adjust(right = 1.2)
cbar=plt.colorbar(cax)
cbar.set_label('T [ยฐC]')
plotTS(raw['T'], 24, vmin = raw['T'].min(), vmax = raw['T'].max())
aggregation = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 8, hoursPerPeriod = 24,
clusterMethod = 'k_means')
typPeriods = aggregation.createTypicalPeriods()
typPeriods.shape
typPeriods.to_csv(os.path.join('results','testperiods_kmeans.csv'))
predictedPeriods = aggregation.predictOriginalData()
plotTS(predictedPeriods['T'], 24, vmin = raw['T'].min(), vmax = raw['T'].max())
aggregationMax = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 8, hoursPerPeriod = 24,
clusterMethod = 'k_maxoids', rescaleClusterPeriods=False)
typPeriodsMax = aggregationMax.createTypicalPeriods()
aggregationMax.accuracyIndicators()
predictedPeriodsMax = aggregationMax.predictOriginalData()
plotTS(predictedPeriodsMax['T'], 24, vmin = raw['T'].min(), vmax = raw['T'].max())
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
raw['Load'].sort_values(ascending=False).reset_index(drop=True).plot(label = 'Original')
predictedPeriods['Load'].sort_values(ascending=False).reset_index(drop=True).plot(label = '8 typ days \n + (Centroids)')
predictedPeriodsMax['Load'].sort_values(
ascending=False).reset_index(drop=True).plot(label = '8 typ days \n + (Maxoids)')
plt.legend()
plt.xlabel('Hours [h]')
plt.ylabel('Duration Load [MW]')
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
raw['Load']['20100210':'20100218'].plot(label = 'Original')
predictedPeriods['Load']['20100210':'20100218'].plot(label = '8 typ days \n + (Centroids)')
predictedPeriodsMax['Load']['20100210':'20100218'].plot(label = '8 typ days \n + (Maxoids)')
plt.legend()
plt.ylabel('Load [MW]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: View location of significantly active sensors
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import numpy as np
import mne
from mne import io
from mne.stats import permutation_t_test
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# pick MEG Gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data()
times = epochs.times
temporal_mask = np.logical_and(0.04 <= times, times <= 0.06)
data = np.mean(data[:, :, temporal_mask], axis=2)
n_permutations = 50000
T0, p_values, H0 = permutation_t_test(data, n_permutations, n_jobs=1)
significant_sensors = picks[p_values <= 0.05]
significant_sensors_names = [raw.ch_names[k] for k in significant_sensors]
print("Number of significant sensors : %d" % len(significant_sensors))
print("Sensors names : %s" % significant_sensors_names)
evoked = mne.EvokedArray(-np.log10(p_values)[:, np.newaxis],
epochs.info, tmin=0.)
# Extract mask and indices of active sensors in the layout
stats_picks = mne.pick_channels(evoked.ch_names, significant_sensors_names)
mask = p_values[:, np.newaxis] <= 0.05
evoked.plot_topomap(ch_type='grad', times=[0], scalings=1,
time_format=None, cmap='Reds', vmin=0., vmax=np.max,
units='-log10(p)', cbar_fmt='-%0.1f', mask=mask,
size=3, show_names=lambda x: x[4:] + ' ' * 20,
time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run notebook ssvm.ipynb.
Step2: Step 1 - Generate new dataset
Step3: Compute feature scaling parameters
Step4: Generating trajectories
Step5: Step 2 - Train SSVM on generated dataset
Step6: Training on generated data
Step7: Plot the primal dual objective value curve
Step8: Make prediction
Step9: Compute evaluation metrics
Step10: Train on generated dataset
Step11: Evaluate on training set
Step12: Step 3 - Leave-one-out evaluation on generated dataset
Step13: Choose hyper-parameter C
Step14: Leave-one-out cross validation
Step15: Step 4 - Check the informative of duration related features
Step 5 - Compute POI and transition features on the generated data
Step16: Plot the transition matrix
Step17: Plot the histograms of the number of visit
Step18: Choose hyper-parameter C
Step19: Leave-one-out cross validation
Step20: Performance of RankSVM
Step21: Tune hyper-parameter
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import os, pickle, random
import pandas as pd
import numpy as np
import cvxopt
import seaborn as sns
random.seed(1234554321)
np.random.seed(123456789)
cvxopt.base.setseed(123456789)
%run 'ssvm_ml.ipynb'
#dump_vars = True
#fname = os.path.join(data_dir, 'ssvm-listViterbi-Glas.pkl')
fname = os.path.join(data_dir, 'ssvm-listViterbi-Osak.3.pkl')
ssvm_lv = pickle.load(open(fname, 'rb')) # a dict: query -> {'PRED': trajectory, 'C': ssvm-c, 'W': model_params}
query = (2, 5)
#query = (5, 4)
W = ssvm_lv[query]['W']
ssvm_lv[query]
#W = np.random.randn(W.shape[0]) # Use a random weight vector to generate trajectories
#W
#%%script false
trajid_set = set(trajid_set_all) - TRAJ_GROUP_DICT[query]
poi_set = {p for tid in trajid_set for p in traj_dict[tid] if len(traj_dict[tid]) >= 2}
poi_list = sorted(poi_set)
n_states = len(poi_set)
n_edge_features = 5
n_node_features = (len(W) - n_states * n_states * n_edge_features) // n_states
#print(len(W), n_states, n_node_features)
#unary_params = W[:n_states * n_node_features].reshape(n_states, n_node_features)
#pw_params = W[n_states * n_node_features:].reshape((n_states, n_states, n_edge_features))
unary_params = W[:-n_edge_features]
pw_params = W[-n_edge_features:].reshape(n_edge_features)
# duplicate params so that inference procedures work the same way no matter params shared or not
unary_params = np.tile(unary_params, (n_states, 1))
pw_params = np.tile(pw_params, (n_states, n_states, 1))
poi_id_dict, poi_id_rdict = dict(), dict()
for idx, poi in enumerate(poi_list):
poi_id_dict[poi] = idx
poi_id_rdict[idx] = poi
print('Finished.')
len(poi_list)
#%%script false
poi_info = calc_poi_info(sorted(trajid_set), traj_all, poi_all)
traj_list = [traj_dict[k] for k in sorted(trajid_set) if len(traj_dict[k]) >= 2]
node_features_list = Parallel(n_jobs=N_JOBS)\
(delayed(calc_node_features)\
(tr[0], len(tr), poi_list, poi_info.copy(), poi_clusters=POI_CLUSTERS, \
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST) for tr in traj_list)
edge_features = calc_edge_features(list(trajid_set), poi_list, traj_dict, poi_info.copy())
fdim = node_features_list[0].shape
X_node_all = np.vstack(node_features_list)
#scaler = MaxAbsScaler(copy=False)
scaler = MinMaxScaler(feature_range=(-1,1), copy=False)
scaler.fit(X_node_all)
# edge feature scaling
scaler_edge = MinMaxScaler(feature_range=(-1,1), copy=False)
fdim_edge = edge_features.shape
edge_features = scaler_edge.fit_transform(edge_features.reshape(fdim_edge[0]*fdim_edge[1], -1))
edge_features = edge_features.reshape(fdim_edge)
print('Finished.')
print(poi_info.shape)
print(edge_features.shape)
poi, L = query
X_node_test = calc_node_features(poi, L, poi_list, poi_info.copy(), poi_clusters=POI_CLUSTERS,
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
X_node_test = scaler.transform(X_node_test) # feature scaling
unary_features = X_node_test
pw_features = edge_features.copy()
y_pred = do_inference_listViterbi(poi_id_dict[poi], L, len(poi_set),
unary_params, pw_params, unary_features, pw_features)
print([poi_id_rdict[p] for p in y_pred])
y_hat = [2, 1, 6, 21, 20]
y_hat = [2, 1, 6, 20, 21]
#y_hat = [5, 7, 8, 6]
#y_hat = [5, 8, 7, 6]
score = 0
y = [poi_id_dict[x] for x in y_hat]
for j in range(len(y)-1):
ss = y[j]
tt = y[j+1]
score += np.dot(pw_params[ss, tt], pw_features[ss, tt])
score += np.dot(unary_params[tt], unary_features[tt])
print(score)
[traj_dict[x] for x in TRAJ_GROUP_DICT[query]]
#%%script false
lengthes = [3, 4, 5, 6]#, 7]
fake_labels = []
for poi in sorted(poi_list):
for L in lengthes:
X_node_test = calc_node_features(poi, L, poi_list, poi_info.copy(), poi_clusters=POI_CLUSTERS, \
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
X_node_test = scaler.transform(X_node_test) # feature scaling
unary_features = X_node_test
pw_features = edge_features.copy()
y_pred = do_inference_listViterbi(poi_id_dict[poi], L, len(poi_set),
unary_params, pw_params, unary_features, pw_features)
fake_labels.append([poi_id_rdict[p] for p in y_pred])
print('Finished.')
len(fake_labels)
#fname = 'fake_labels.pkl'
#if dump_vars == True: pickle.dump(fake_labels, open(fname, 'wb'))
#vars_equal(pickle.load(open(fname, 'rb')), fake_labels)
def calc_train_data(train_labels, poi_list, poi_info, edge_features, poi_id_dict):
node_features_all = Parallel(n_jobs=N_JOBS)\
(delayed(calc_node_features)\
(tr[0], len(tr), poi_list, poi_info, poi_clusters=POI_CLUSTERS, \
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST) for tr in train_labels)
fdim_train = node_features_all[0].shape
X_node_train = np.vstack(node_features_all)
scaler_node = MinMaxScaler(feature_range=(-1,1), copy=False)
X_node_train = scaler_node.fit_transform(X_node_train)
X_node_train = X_node_train.reshape(-1, fdim_train[0], fdim_train[1])
assert(len(train_labels) == X_node_train.shape[0])
X_train = [(X_node_train[k, :, :], edge_features.copy(),
(poi_id_dict[train_labels[k][0]], len(train_labels[k]))) for k in range(len(train_labels))]
y_train = [np.array([poi_id_dict[k] for k in tr]) for tr in train_labels]
assert(len(X_train) == len(y_train))
return X_train, y_train, scaler_node
def train_ssvm(X_train, y_train, C):
sm = MyModel(inference_fun=do_inference_listViterbi)
osssvm = OneSlackSSVM(model=sm, C=C, n_jobs=N_JOBS, verbose=0)
try:
osssvm.fit(X_train, y_train, initialize=True)
print('SSVM training finished.')
except:
sys.stderr.write('SSVM training FAILED.\n')
return osssvm
def plot_obj_curve(ssvm):
plt.plot(ssvm.objective_curve_, label='dual')
plt.plot(ssvm.primal_objective_curve_, label='primal')
plt.legend()
def predict(ssvm, ps, L, poi_list, poi_info, edge_features, scaler_node, poi_id_dict, poi_id_rdict):
X_node_test = calc_node_features(ps, L, poi_list, poi_info, poi_clusters=POI_CLUSTERS,
cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
X_node_test = scaler_node.transform(X_node_test)
X_test = [(X_node_test, edge_features, (poi_id_dict[ps], L))]
y_hat = ssvm.predict(X_test)
return np.array([poi_id_rdict[p] for p in y_hat[0]])
def evaluation(predictions):
F1_ssvm = []; pF1_ssvm = []; tau_ssvm = []
for key in sorted(predictions.keys()):
F1 = calc_F1(predictions[key]['REAL'], predictions[key]['PRED'])
pF1 = calc_pairsF1(predictions[key]['REAL'], predictions[key]['PRED'])
tau = calc_kendalltau(predictions[key]['REAL'], predictions[key]['PRED'])
F1_ssvm.append(F1); pF1_ssvm.append(pF1); tau_ssvm.append(tau)
F1_mean = np.mean(F1_ssvm); pF1_mean = np.mean(pF1_ssvm); tau_mean = np.mean(tau_ssvm)
print('F1 (%.3f, %.3f), pairsF1 (%.3f, %.3f), Tau (%.3f, %.3f)' % \
(F1_mean, np.std(F1_ssvm)/np.sqrt(len(F1_ssvm)), \
pF1_mean, np.std(pF1_ssvm)/np.sqrt(len(pF1_ssvm)), \
tau_mean, np.std(tau_ssvm)/np.sqrt(len(tau_ssvm))))
return F1_mean, pF1_mean, tau_mean
#C = 0.3
%%script false
train_labels = fake_labels.copy()
X_train, y_train, scaler_node = calc_train_data(train_labels, poi_list, poi_info.copy(),
edge_features.copy(), poi_id_dict.copy())
ssvm = train_ssvm(X_train, y_train, C)
#plot_obj_curve(ssvm)
%%script false
predictions = dict()
for label in train_labels:
y_pred = predict(ssvm, label[0], len(label), poi_list, poi_info.copy(), edge_features.copy(),
scaler_node, poi_id_dict, poi_id_rdict)
predictions[(label[0], len(label))] = {'PRED': y_pred, 'REAL': label}
%%script false
ret = evaluation(predictions)
#poi_info['avgDuration'] = 0.0
# transition features: [poiCat, popularity, nVisit, avgDuration, clusterID]
#edge_features = edge_features[:, :, [0,1,2,4]]
%%script false
num_test = int(len(fake_labels) * MC_PORTION)
best_tau = 0; best_C = 0
#edge_features = np.zeros_like(edge_features) # Turn off transition features
%%script false
np.random.seed(0)
for C in C_SET:
print('\n--------------- try_C: %f ---------------\n' % C); sys.stdout.flush()
F1_test = []; pF1_test = []; tau_test = []
for t in range(MC_NITER):
while True:
indices = np.arange(len(fake_labels))
np.random.shuffle(indices)
test_ix = indices[:num_test]
train_ix = indices[num_test:]
train_labels = [fake_labels[ix] for ix in train_ix]
test_labels = [fake_labels[ix] for ix in test_ix]
poi_set_ = {p for x in train_labels for p in x}
if len(poi_set_) == len(poi_list): break
X_train, y_train, scaler_node = calc_train_data(train_labels, poi_list, poi_info.copy(),
edge_features.copy(), poi_id_dict.copy())
ssvm = train_ssvm(X_train, y_train, C)
predictions = dict()
for label in test_labels:
y_pred = predict(ssvm, label[0], len(label), poi_list, poi_info.copy(), edge_features.copy(),
scaler_node, poi_id_dict, poi_id_rdict)
predictions[(label[0], len(label))] = {'PRED': y_pred, 'REAL': label}
F1, pF1, tau = evaluation(predictions)
F1_test.append(F1); pF1_test.append(pF1); tau_test.append(tau)
mean_tau = np.mean(tau_test)
print('mean_tau: %.3f' % mean_tau)
if mean_tau > best_tau:
best_tau = mean_tau
best_C = C
print('\nbest_tau: %.3f, best_C: %.3f' % (best_tau, best_C))
%%script false
predictions = dict()
%%script false
for i in range(len(fake_labels)):
sys.stdout.write('%s ' % str(i+1))
train_labels = fake_labels[:i] + fake_labels[i+1:]
X_train, y_train, scaler_node = calc_train_data(train_labels, poi_list, poi_info.copy(),
edge_features.copy(), poi_id_dict.copy())
ssvm = train_ssvm(X_train, y_train, best_C)
test_label = fake_labels[i]
y_pred = predict(ssvm, test_label[0], len(test_label), poi_list, poi_info.copy(), edge_features.copy(),
scaler_node, poi_id_dict, poi_id_rdict)
predictions[(test_label[0], len(test_label))] = {'PRED': y_pred, 'REAL': test_label}
%%script false
ret = evaluation(predictions)
%%script false
fname = 'ssvm-orig-feature.pkl'
if dump_vars == True: pickle.dump(predictions, open(fname, 'wb'))
%%script false
vars_equal(pickle.load(open(fname, 'rb')), predictions)
#transmat_visit0, logbins_visit0 = gen_transmat_visit(trajid_set, traj_dict, poi_info)
%%script false
poi_info_new = calc_poi_info(sorted(trajid_set), traj_all, poi_all)
edge_features_new = calc_edge_features(list(trajid_set), poi_list, traj_dict, poi_info_new.copy())
# set POI popularity and nvisit
poi_info_new['avgDuration'] = 0.0
poi_info_new['popularity'] = 1 # only a single user
poi_info_new['nVisit'] = 0
for label in fake_labels:
for p in label: poi_info_new.loc[p, 'nVisit'] += 1
# set popularity (drop it) and nvisit based transition features
# compute binning boundaries
poi_visits = poi_info_new.loc[poi_list, 'nVisit']
expo_visit1 = np.log10(max(1, min(poi_visits)))
expo_visit2 = np.log10(max(poi_visits))
nbins_visit = BIN_CLUSTER
logbins_visit = np.logspace(np.floor(expo_visit1), np.ceil(expo_visit2), nbins_visit+1)
logbins_visit[0] = 0 # deal with underflow
if not (logbins_visit[-1] > poi_info_new['nVisit'].max()):
logbins_visit[-1] = poi_info_new['nVisit'].max() + 1
# compute transition matrix between different nVist buckets
nbins = len(logbins_visit) - 1
transmat_visit_cnt = pd.DataFrame(data=np.zeros((nbins, nbins), dtype=np.float),
columns=np.arange(1, nbins+1), index=np.arange(1, nbins+1))
for t in fake_labels:
for pi in range(len(t)-1):
p1, p2 = t[pi], t[pi+1]
assert(p1 in poi_info_new.index and p2 in poi_info_new.index)
visit1 = poi_info_new.loc[p1, 'nVisit']
visit2 = poi_info_new.loc[p2, 'nVisit']
vc1, vc2 = np.digitize([visit1, visit2], logbins_visit)
if vc1 > 5 or vc2 > 5: print(p1, visit1, p2, visit2)
transmat_visit_cnt.loc[vc1, vc2] += 1
transmat_visit = normalise_transmat(transmat_visit_cnt)
# compute nvisit based transition features
poi_features = pd.DataFrame(data=np.zeros((len(poi_list), 1)), columns=['nVisit'], index=poi_list)
poi_features.index.name = 'poiID'
poi_features['nVisit'] = np.digitize(poi_info_new.loc[poi_list, 'nVisit'], logbins_visit)
for j in range(len(poi_list)): # NOTE: POI order
pj = poi_list[j]
visit = poi_features.loc[pj, 'nVisit']
for k in range(len(poi_list)): # NOTE: POI order
pk = poi_list[k]
#edge_features_new[j, k, 2] = np.log10(transmat_visit.loc[visit, poi_features.loc[pk, 'nVisit']])
edge_features_new[j, k, 2] = transmat_visit.loc[visit, poi_features.loc[pk, 'nVisit']]
# transition features: [poiCat, popularity, nVisit, avgDuration, clusterID]
edge_features_new = edge_features_new[:, :, [0, 1, 2, 4]]
# edge feature scaling
scaler_edge = MinMaxScaler(feature_range=(-1,1), copy=False)
fdim_edge = edge_features_new.shape
edge_features_new = scaler_edge.fit_transform(edge_features_new.reshape(fdim_edge[0]*fdim_edge[1], -1))
edge_features_new = edge_features_new.reshape(fdim_edge)
print('Finished.')
#sns.heatmap(np.log10(transmat_visit0), cmap='BuGn')#, vmin=0, vmax=1)
#sns.heatmap(np.log10(transmat_visit), cmap='BuGn')#, vmin=0, vmax=1)
#print(logbins_visit0)
#poi_info['nVisit'].hist()
#print(logbins_visit)
#poi_info_new['nVisit'].hist()
%%script false
edge_features_new = np.zeros_like(edge_features_new) # Turn off transition features
%%script false
num_test = int(len(fake_labels) * MC_PORTION)
best_tau = 0; best_C = 0
%%script false
np.random.seed(0)
for C in C_SET:
print('\n--------------- try_C: %f ---------------\n' % C); sys.stdout.flush()
F1_test = []; pF1_test = []; tau_test = []
for t in range(MC_NITER):
while True:
indices = np.arange(len(fake_labels))
np.random.shuffle(indices)
test_ix = indices[:num_test]
train_ix = indices[num_test:]
train_labels = [fake_labels[ix] for ix in train_ix]
test_labels = [fake_labels[ix] for ix in test_ix]
poi_set_ = {p for x in train_labels for p in x}
if len(poi_set_) == len(poi_list): break
X_train, y_train, scaler_node = calc_train_data(train_labels, poi_list, poi_info_new.copy(),
edge_features_new.copy(), poi_id_dict.copy())
ssvm = train_ssvm(X_train, y_train, C)
predictions = dict()
for label in test_labels:
y_pred = predict(ssvm, label[0], len(label), poi_list, poi_info_new.copy(), edge_features_new.copy(),
scaler_node, poi_id_dict, poi_id_rdict)
predictions[(label[0], len(label))] = {'PRED': y_pred, 'REAL': label}
F1, pF1, tau = evaluation(predictions)
F1_test.append(F1); pF1_test.append(pF1); tau_test.append(tau)
tau_mean = np.mean(tau_test)
print('mean_tau: %.3f' % tau_mean)
if tau_mean > best_tau:
best_tau = tau_mean
best_C = C
print('\nbest_tau: %.3f, best_C: %.3f' % (best_tau, best_C))
%%script false
predictions = dict()
%%script false
for i in range(len(fake_labels)):
sys.stdout.write('%s ' % str(i+1))
train_labels = fake_labels[:i] + fake_labels[i+1:]
X_train, y_train, scaler_node = calc_train_data(train_labels, poi_list, poi_info_new.copy(),
edge_features_new.copy(), poi_id_dict.copy())
ssvm = train_ssvm(X_train, y_train, best_C)
test_label = fake_labels[i]
y_pred = predict(ssvm, test_label[0], len(test_label), poi_list, poi_info_new.copy(), edge_features_new.copy(),
scaler_node, poi_id_dict, poi_id_rdict)
predictions[(test_label[0], len(test_label))] = {'PRED': y_pred, 'REAL': test_label}
%%script false
ret = evaluation(predictions)
%run 'baseline.ipynb'
def gen_train_df_new(train_labels, poi_list, poi_info, query_id_dict, poi_clusters, cats, clusters, n_jobs=-1):
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
train_trajs = train_labels
qid_set = sorted(set(query_id_dict.values()))
query_id_rdict = dict()
for k, v in query_id_dict.items():
query_id_rdict[v] = k # qid --> (start, length)
train_df_list = Parallel(n_jobs=n_jobs)\
(delayed(gen_train_subdf_new)(poi, qid_set, poi_info, poi_clusters,
cats,clusters,query_id_rdict) for poi in poi_list)
assert(len(train_df_list) > 0)
df_ = train_df_list[0]
for j in range(1, len(train_df_list)):
df_ = df_.append(train_df_list[j], ignore_index=True)
# set label
df_.set_index(['queryID', 'poiID'], inplace=True)
df_['label'] = 0
for t in train_trajs:
qid = query_id_dict[(t[0], len(t))]
for poi in t[1:]: # do NOT count if the POI is startPOI/endPOI
df_.loc[(qid, poi), 'label'] += 1
df_.reset_index(inplace=True)
return df_
def gen_train_subdf_new(poi_id, query_id_set, poi_info, poi_clusters, cats, clusters, query_id_rdict):
assert(isinstance(cats, list))
assert(isinstance(clusters, list))
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
df_ = pd.DataFrame(index=np.arange(len(query_id_set)), columns=columns)
pop, nvisit = poi_info.loc[poi_id, 'popularity'], poi_info.loc[poi_id, 'nVisit']
cat, cluster = poi_info.loc[poi_id, 'poiCat'], poi_clusters.loc[poi_id, 'clusterID']
duration = poi_info.loc[poi_id, 'avgDuration']
for j in range(len(query_id_set)):
qid = query_id_set[j]
assert(qid in query_id_rdict) # qid --> (start, end, length)
(p0, trajLen) = query_id_rdict[qid]
idx = df_.index[j]
df_.loc[idx, 'poiID'] = poi_id
df_.loc[idx, 'queryID'] = qid
df_.set_value(idx, 'category', tuple((cat == np.array(cats)).astype(np.int) * 2 - 1))
df_.set_value(idx, 'neighbourhood', tuple((cluster == np.array(clusters)).astype(np.int) * 2 - 1))
df_.loc[idx, 'popularity'] = LOG_SMALL if pop < 1 else np.log10(pop)
df_.loc[idx, 'nVisit'] = LOG_SMALL if nvisit < 1 else np.log10(nvisit)
df_.loc[idx, 'avgDuration'] = LOG_SMALL if duration < 1 else np.log10(duration)
df_.loc[idx, 'trajLen'] = trajLen
df_.loc[idx, 'sameCatStart'] = 1 if cat == poi_info.loc[p0, 'poiCat'] else -1
df_.loc[idx, 'distStart'] = poi_distmat.loc[poi_id, p0]
df_.loc[idx, 'diffPopStart'] = pop - poi_info.loc[p0, 'popularity']
df_.loc[idx, 'diffNVisitStart'] = nvisit - poi_info.loc[p0, 'nVisit']
df_.loc[idx, 'diffDurationStart'] = duration - poi_info.loc[p0, 'avgDuration']
df_.loc[idx, 'sameNeighbourhoodStart'] = 1 if cluster == poi_clusters.loc[p0, 'clusterID'] else -1
return df_
def gen_test_df_new(startPOI, nPOI, poi_info, query_id_dict, poi_clusters, cats, clusters):
assert(isinstance(cats, list))
assert(isinstance(clusters, list))
columns = DF_COLUMNS
poi_distmat = POI_DISTMAT
key = (p0, trajLen) = (startPOI, nPOI)
assert(key in query_id_dict)
assert(p0 in poi_info.index)
df_ = pd.DataFrame(index=np.arange(poi_info.shape[0]), columns=columns)
poi_list = sorted(poi_info.index)
qid = query_id_dict[key]
df_['queryID'] = qid
df_['label'] = np.random.rand(df_.shape[0]) # label for test data is arbitrary according to libsvm FAQ
for i in range(df_.index.shape[0]):
poi = poi_list[i]
lon, lat = poi_info.loc[poi, 'poiLon'], poi_info.loc[poi, 'poiLat']
pop, nvisit = poi_info.loc[poi, 'popularity'], poi_info.loc[poi, 'nVisit']
cat, cluster = poi_info.loc[poi, 'poiCat'], poi_clusters.loc[poi, 'clusterID']
duration = poi_info.loc[poi, 'avgDuration']
idx = df_.index[i]
df_.loc[idx, 'poiID'] = poi
df_.set_value(idx, 'category', tuple((cat == np.array(cats)).astype(np.int) * 2 - 1))
df_.set_value(idx, 'neighbourhood', tuple((cluster == np.array(clusters)).astype(np.int) * 2 - 1))
df_.loc[idx, 'popularity'] = LOG_SMALL if pop < 1 else np.log10(pop)
df_.loc[idx, 'nVisit'] = LOG_SMALL if nvisit < 1 else np.log10(nvisit)
df_.loc[idx, 'avgDuration'] = LOG_SMALL if duration < 1 else np.log10(duration)
df_.loc[idx, 'trajLen'] = trajLen
df_.loc[idx, 'sameCatStart'] = 1 if cat == poi_all.loc[p0, 'poiCat'] else -1
df_.loc[idx, 'distStart'] = poi_distmat.loc[poi, p0]
df_.loc[idx, 'diffPopStart'] = pop - poi_info.loc[p0, 'popularity']
df_.loc[idx, 'diffNVisitStart'] = nvisit - poi_info.loc[p0, 'nVisit']
df_.loc[idx, 'diffDurationStart'] = duration - poi_info.loc[p0, 'avgDuration']
df_.loc[idx, 'sameNeighbourhoodStart'] = 1 if cluster == poi_clusters.loc[p0, 'clusterID'] else -1
return df_
%%script false
num_test = int(len(fake_labels) * 0.2)
best_tau = 0; best_C = 0
query_id_dict = {(tr[0], len(tr)): ix for ix, tr in enumerate(fake_labels)}
#poi_info_new = calc_poi_info(sorted(trajid_set), traj_all, poi_all) # Compute features on the original dataset
%%script false
np.random.seed(0)
for C in C_SET:
print('\n--------------- try_C: %f ---------------\n' % C); sys.stdout.flush()
F1_test = []; pF1_test = []; tau_test = []
for t in range(MC_NITER):
while True:
indices = np.arange(len(fake_labels))
np.random.shuffle(indices)
test_ix = indices[:num_test]
train_ix = indices[num_test:]
train_labels = [fake_labels[ix] for ix in train_ix]
test_labels = [fake_labels[ix] for ix in test_ix]
poi_set_ = {p for x in train_labels for p in x}
if len(poi_set_) == len(poi_list): break
train_df = gen_train_df_new(train_labels, poi_list, poi_info_new.copy(), query_id_dict.copy(),
poi_clusters=POI_CLUSTERS,cats=POI_CAT_LIST,clusters=POI_CLUSTER_LIST,n_jobs=N_JOBS)
ranksvm = RankSVM(ranksvm_dir, useLinear=True)
ranksvm.train(train_df, cost=C)
predictions = dict()
for label in test_labels:
test_df = gen_test_df_new(label[0], len(label), poi_info_new.copy(), query_id_dict.copy(),
poi_clusters=POI_CLUSTERS, cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
rank_df = ranksvm.predict(test_df)
rank_df.sort_values(by='rank', ascending=False, inplace=True)
y_pred = [label[0]] + [p for p in rank_df.index.tolist() if p != label[0]][:len(label)-1]
predictions[(label[0], len(label))] = {'PRED': y_pred, 'REAL': label}
F1, pF1, tau = evaluation(predictions)
F1_test.append(F1); pF1_test.append(pF1); tau_test.append(tau)
tau_mean = np.mean(tau_test)
print('mean_tau: %.3f' % tau_mean)
if tau_mean > best_tau:
best_tau = tau_mean
best_C = C
print('\nbest_tau: %.3f, best_C: %.3f' % (best_tau, best_C))
#predictions = dict()
%%script false
for i in range(len(fake_labels)):
sys.stdout.write('%s ' % str(i+1))
train_labels = fake_labels[:i] + fake_labels[i+1:]
train_df = gen_train_df_new(train_labels, poi_list, poi_info_new.copy(), query_id_dict.copy(),
poi_clusters=POI_CLUSTERS, cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST, n_jobs=N_JOBS)
ranksvm = RankSVM(ranksvm_dir, useLinear=True)
ranksvm.train(train_df, cost=best_C)
test_label = fake_labels[i]
test_df = gen_test_df_new(test_label[0], len(test_label), poi_info_new.copy(), query_id_dict.copy(),
poi_clusters=POI_CLUSTERS, cats=POI_CAT_LIST, clusters=POI_CLUSTER_LIST)
rank_df = ranksvm.predict(test_df)
rank_df.sort_values(by='rank', ascending=False, inplace=True)
y_pred = [test_label[0]] + [p for p in rank_df.index.tolist() if p != test_label[0]][:len(test_label)-1]
predictions[(test_label[0], len(test_label))] = {'PRED': y_pred, 'REAL': test_label}
#ret = evaluation(predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ไธ่ฌ็ๆฐๆฎๆๅ๏ผไฝฟ็จurllib2ๅbeautifulsoup้
ๅๅฐฑๅฏไปฅไบใ
Step2: http
Step3: ๆๅๅคฉๆถฏ่ฎบๅPXๅธๅญๅ่กจ
Step4: ๆๅไฝ่
ไฟกๆฏ
Step5: http
Step6: http
Step7: ไฝ่
๏ผๆ ๆชฌๅจ่ฟฝ้ ๆถ้ด๏ผ2012-10-28 21
Step8: ๅฆไฝ็ฟป้กต
Step9: ๆต่ฏ
Step10: ๆญฃๅผๆๅ๏ผ
Step11: ่ฏปๅๆฐๆฎ
Step12: ๆปๅธๆฐๆฏๅคๅฐ๏ผ
|
<ASSISTANT_TASK:>
Python Code:
import requests
from bs4 import BeautifulSoup
from IPython.display import display_html, HTML
HTML('<iframe src= http://bbs.tianya.cn/list.jsp?item=free&nextid=%d&order=8&k=PX width=1000 height=500></iframe>')
# the webpage we would like to crawl
page_num = 0
url = "http://bbs.tianya.cn/list.jsp?item=free&nextid=%d&order=8&k=PX"% page_num
content = requests.get(url).text
soup = BeautifulSoup(content, "lxml")
articles = soup.find_all('tr')
#td[0].find('a', {'class', ""})
articles[0]
articles[1]
len(articles[1:])
for t in articles[1].find_all('td'):
print(t)
td = articles[1].find_all('td')
td
td[0]
td[0].text
td[0].text.strip()
td[0].a['href']
td[1]
td[1].find('a', {'class', "author"})['href']
td[2]
td[3]
td[4]
records = []
for k, i in enumerate(articles[1:]):
td = i.find_all('td')
title = td[0].text.strip()
try:
title_url = td[0].find('a', {'class', "author"})['href']
except:
title_url = td[0].a['href']
author = td[1].text
author_url = td[1].a['href']
views = td[2].text
replies = td[3].text
date = td[4]['title']
record = '\t'.join([title, title_url, author, author_url, views, replies, date])
# record = title + '\t' + title_url+ '\t' + author +
# '\t'+ author_url + '\t' + views+ '\t' + replies+ '\t'+ date
records.append(record)
records[:3]
print(records[2])
articles[2]
def crawler(page_num, file_name):
try:
# open the browser
url = "http://bbs.tianya.cn/list.jsp?item=free&nextid=%d&order=8&k=PX" % page_num
content = requests.get(url).text #่ทๅ็ฝ้กต็htmlๆๆฌ
soup = BeautifulSoup(content, "lxml")
articles = soup.find_all('tr')
# write down info
for i in articles[1:]:
td = i.find_all('td')
title = td[0].text.strip()
title_url = td[0].a['href']
author = td[1].text
author_url = td[1].a['href']
views = td[2].text
replies = td[3].text
date = td[4]['title']
record = title + '\t' + title_url+ '\t' + author + '\t'+ \
author_url + '\t' + views+ '\t' + replies+ '\t'+ date
with open(file_name,'a') as p: # '''Note'''๏ผ๏ผกppend mode, run only once!
p.write(record+"\n") ##!!encode here to utf-8 to avoid encoding
except Exception as e:
print(e)
pass
# crawl all pages
for page_num in range(10):
print(page_num)
crawler(page_num, '../data/tianya_bbs_threads_list2018.txt')
import pandas as pd
df = pd.read_csv('../data/tianya_bbs_threads_list2018.txt',
sep = "\t", names = ['title', 'link', 'author', \
'author_page', 'click', 'reply', 'time'])
df[:3]
len(df)
len(df.link)
df.author_page[:5]
# user_info
url = df.author_page[10]
content = requests.get(url).text #่ทๅ็ฝ้กต็htmlๆๆฌ
soup = BeautifulSoup(content, "lxml")
soup
print(url)
user_info = soup.find('div', {'class', 'userinfo'})('p')
score, reg_time = [i.get_text()[4:] for i in user_info]
print(score, reg_time )
link_info = soup.find_all('div', {'class', 'link-box'})
followed_num, fans_num = [i.a.text for i in link_info]
print(followed_num, fans_num)
#activity = soup.find_all('span', {'class', 'subtitle'})
#post_num, reply_num = [j.text[2:] for i in activity[:1] for j in i('a')]
#print(post_num, reply_num)
#activity
#activity[0]
link_info = soup.find_all('div', {'class', 'link-box'})
followed_num, fans_num = [i.a.text for i in link_info]
print(followed_num, fans_num)
link_info[0].a.text
# user_info = soup.find('div', {'class', 'userinfo'})('p')
# user_infos = [i.get_text()[4:] for i in user_info]
def author_crawler(url, file_name):
try:
content = requests.get(url).text #่ทๅ็ฝ้กต็htmlๆๆฌ
soup = BeautifulSoup(content, "lxml")
link_info = soup.find_all('div', {'class', 'link-box'})
followed_num, fans_num = [i.a.text for i in link_info]
try:
activity = soup.find_all('span', {'class', 'subtitle'})
post_num, reply_num = [j.text[2:] for i in activity[:1] for j in i('a')]
except:
post_num, reply_num = '1', '0'
record = '\t'.join([url, followed_num, fans_num, post_num, reply_num])
with open(file_name,'a') as p: # '''Note'''๏ผ๏ผกppend mode, run only once!
p.write(record+"\n") ##!!encode here to utf-8 to avoid encoding
except Exception as e:
print(e, url)
record = '\t'.join([url, 'na', 'na', 'na', 'na'])
with open(file_name,'a') as p: # '''Note'''๏ผ๏ผกppend mode, run only once!
p.write(record+"\n") ##!!encode here to utf-8 to avoid encoding
pass
#soup
url = df.author_page[10]
content = requests.get(url).text #่ทๅ็ฝ้กต็htmlๆๆฌ
soup = BeautifulSoup(content, "lxml")
link_info = soup.find_all('div', {'class', 'link-box'})
followed_num, fans_num = [i.a.text for i in link_info]
try:
activity = soup.find_all('span', {'class', 'subtitle'})
post_num, reply_num = [j.text[2:] for i in activity[:1] for j in i('a')]
except:
post_num, reply_num = '1', '0'
record = '\t'.join([url, followed_num, fans_num, post_num, reply_num])
import sys
def flushPrint(s):
sys.stdout.write('\r')
sys.stdout.write('%s' % s)
sys.stdout.flush()
import time, random
for k, url in enumerate(df.author_page[:15]):
time.sleep(random.random()) # ๅคฉๆถฏๅญๅจๅๆๅๆบๅถ๏ผ้่ฆ้ไฝๆๅ้ๅบฆ๏ผ
#flushPrint(k)
author_crawler(url, '../data/tianya_bbs_threads_author_info2018.txt')
df.link[2]
url = 'http://bbs.tianya.cn' + df.link[2]
print(url)
from IPython.display import display_html, HTML
HTML('<iframe src=http://bbs.tianya.cn/post-free-2849477-1.shtml width=1000 height=500></iframe>')
# the webpage we would like to crawl
post = requests.get(url).text #่ทๅ็ฝ้กต็htmlๆๆฌ
post_soup = BeautifulSoup(post, "lxml")
#articles = soup.find_all('tr')
print (post_soup.prettify())[:5000]
pa = post_soup.find_all('div', {'class', 'atl-item'})
len(pa)
print(pa[0])
pa[1]
pa[70]
pa[0].find('div', {'class', 'bbs-content'}).text.strip()
print(pa[0].text.strip())#.find('div', {'class', 'marg'}).text.strip()
pa[67].find('div', {'class', 'bbs-content'}).text.strip()
pa[1].a
pa[0].find('a', class_ = 'reportme a-link')
pa[0].find('a', class_ = 'reportme a-link')['replytime']
pa[0].find('a', class_ = 'reportme a-link')['author']
for i in pa[:10]:
p_info = i.find('a', class_ = 'reportme a-link')
p_time = p_info['replytime']
p_author_id = p_info['authorid']
p_author_name = p_info['author']
p_content = i.find('div', {'class', 'bbs-content'}).text.strip()
p_content = p_content.replace('\t', '')
print(p_time, '--->', p_author_id, '--->', p_author_name,'--->', p_content, '\n')
post_soup.find('div', {'class', 'atl-pages'})#.['onsubmit']
post_pages = post_soup.find('div', {'class', 'atl-pages'})
post_pages = post_pages.form['onsubmit'].split(',')[-1].split(')')[0]
post_pages
#post_soup.select('.atl-pages')[0].select('form')[0].select('onsubmit')
url = 'http://bbs.tianya.cn' + df.link[2]
url_base = ''.join(url.split('-')[:-1]) + '-%d.shtml'
url_base
def parsePage(pa):
records = []
for i in pa:
p_info = i.find('a', class_ = 'reportme a-link')
p_time = p_info['replytime']
p_author_id = p_info['authorid']
p_author_name = p_info['author']
p_content = i.find('div', {'class', 'bbs-content'}).text.strip()
p_content = p_content.replace('\t', '').replace('\n', '')#.replace(' ', '')
record = p_time + '\t' + p_author_id+ '\t' + p_author_name + '\t'+ p_content
records.append(record)
return records
import sys
def flushPrint(s):
sys.stdout.write('\r')
sys.stdout.write('%s' % s)
sys.stdout.flush()
url_1 = 'http://bbs.tianya.cn' + df.link[10]
content = requests.get(url_1).text #่ทๅ็ฝ้กต็htmlๆๆฌ
post_soup = BeautifulSoup(content, "lxml")
pa = post_soup.find_all('div', {'class', 'atl-item'})
b = post_soup.find('div', class_= 'atl-pages')
b
url_0 = 'http://bbs.tianya.cn' + df.link[2]
content = requests.get(url_0).text #่ทๅ็ฝ้กต็htmlๆๆฌ
post_soup = BeautifulSoup(content, "lxml")
pa = post_soup.find_all('div', {'class', 'atl-item'})
a = post_soup.find('div', class_= 'atl-pages')
a
a.form
if b.form:
print('true')
else:
print('false')
import random
import time
def crawler(url, file_name):
try:
# open the browser
url_1 = 'http://bbs.tianya.cn' + url
content = requests.get(url_0).text #่ทๅ็ฝ้กต็htmlๆๆฌ
post_soup = BeautifulSoup(content, "lxml")
# how many pages in a post
post_form = post_soup.find('div', {'class', 'atl-pages'})
if post_form.form:
post_pages = post_form.form['onsubmit'].split(',')[-1].split(')')[0]
post_pages = int(post_pages)
url_base = '-'.join(url_1.split('-')[:-1]) + '-%d.shtml'
else:
post_pages = 1
# for the first page
pa = post_soup.find_all('div', {'class', 'atl-item'})
records = parsePage(pa)
with open(file_name,'a') as p: # '''Note'''๏ผ๏ผกppend mode, run only once!
for record in records:
p.write('1'+ '\t' + url + '\t' + record+"\n")
# for the 2nd+ pages
if post_pages > 1:
for page_num in range(2, post_pages+1):
time.sleep(random.random())
flushPrint(page_num)
url2 =url_base % page_num
content = requests.get(url2).text #่ทๅ็ฝ้กต็htmlๆๆฌ
post_soup = BeautifulSoup(content, "lxml")
pa = post_soup.find_all('div', {'class', 'atl-item'})
records = parsePage(pa)
with open(file_name,'a') as p: # '''Note'''๏ผ๏ผกppend mode, run only once!
for record in records:
p.write(str(page_num) + '\t' +url + '\t' + record+"\n")
else:
pass
except Exception as e:
print(e)
pass
url = df.link[2]
file_name = '../data/tianya_bbs_threads_2018test.txt'
crawler(url, file_name)
for k, link in enumerate(df.link):
flushPrint(link)
if k % 10== 0:
print('This it the post of : ' + str(k))
file_name = '../data/tianya_bbs_threads_network_2018.txt'
crawler(link, file_name)
dtt = []
with open('../data/tianya_bbs_threads_network_2018.txt', 'r') as f:
for line in f:
pnum, link, time, author_id, author, content = line.replace('\n', '').split('\t')
dtt.append([pnum, link, time, author_id, author, content])
len(dtt)
dt = pd.DataFrame(dtt)
dt[:5]
dt=dt.rename(columns = {0:'page_num', 1:'link', 2:'time', 3:'author',4:'author_name', 5:'reply'})
dt[:5]
dt.reply[:100]
18459/50
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replication of 'csv_to_hdf5.py'
Step2: The array of long/lat coordinates per trip (row) is read in as a string. The function ast.literal_eval(x) evaluates the string into the expression it represents (safely). This happens below
Step3: Split into latitude/longitude
Step4: Further Feature Engineering
Step5: The paper discusses how many categorical variables there are per category. The following all check out
Step6: Self-explanatory
Step7: Quarter hour of the day, i.e. 1 of the 4*24 = 96 quarter hours of the day
Step8: Self-explanatory
Step9: Target coords are the last in the sequence (final position). If there are no positions, or only 1, then mark as invalid w/ nan in order to drop later
Step10: This function creates the continuous inputs, which are the concatened k first and k last coords in a sequence, as discussed in the paper.
Step11: Drop na's
Step12: End to end feature transformation
Step13: Pre-calculated below on train set
Step14: MEANSHIFT
Step15: Clustering performed on the targets
Step16: Can use the commented out code for a estimate of bandwidth, which causes clustering to converge much quicker.
Step17: This takes some time
Step18: This is very close to the number of clusters mentioned in the paper
Step19: Formatting Features for Bcolz iterator / garbage
Step20: MODEL
Step21: Validation cuts
Step22: The equirectangular loss function mentioned in the paper.
Step23: The following returns a fully-connected model as mentioned in the paper. Takes as input k as defined before, and the cluster centers.
Step24: As mentioned, construction of repeated cluster longs/lats for input
Step25: Of course, k in the model needs to match k from feature construction. We again use 5 as they did in the paper
Step26: Paper used SGD opt w/ following paramerters
Step27: original
Step28: new valid
Step29: It works, but it seems to converge unrealistically quick and the loss values are not the same. The paper does not mention what it's using as "error" in it's results. I assume the same equirectangular? Not very clear. The difference in values could be due to the missing Earth-radius factor
Step30: To-do
Step31: hd5f files
|
<ASSISTANT_TASK:>
Python Code:
data_path = "/data/datasets/taxi/"
meta = pd.read_csv(data_path+'metaData_taxistandsID_name_GPSlocation.csv', header=0)
meta.head()
train = pd.read_csv(data_path+'train/train.csv', header=0)
train.head()
train['ORIGIN_CALL'] = pd.Series(pd.factorize(train['ORIGIN_CALL'])[0]) + 1
train['ORIGIN_STAND']=pd.Series([0 if pd.isnull(x) or x=='' else int(x) for x in train["ORIGIN_STAND"]])
train['TAXI_ID'] = pd.Series(pd.factorize(train['TAXI_ID'])[0]) + 1
train['DAY_TYPE'] = pd.Series([ord(x[0]) - ord('A') for x in train['DAY_TYPE']])
polyline = pd.Series([ast.literal_eval(x) for x in train['POLYLINE']])
train['LATITUDE'] = pd.Series([np.array([point[1] for point in poly],dtype=np.float32) for poly in polyline])
train['LONGITUDE'] = pd.Series([np.array([point[0] for point in poly],dtype=np.float32) for poly in polyline])
utils.save_array(data_path+'train/train.bc', train.as_matrix())
utils.save_array(data_path+'train/meta_train.bc', meta.as_matrix())
train = pd.DataFrame(utils.load_array(data_path+'train/train.bc'), columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE'])
train.head()
train['ORIGIN_CALL'].max()
train['ORIGIN_STAND'].max()
train['TAXI_ID'].max()
train['DAY_OF_WEEK'] = pd.Series([datetime.datetime.fromtimestamp(t).weekday() for t in train['TIMESTAMP']])
train['QUARTER_HOUR'] = pd.Series([int((datetime.datetime.fromtimestamp(t).hour*60 + datetime.datetime.fromtimestamp(t).minute)/15)
for t in train['TIMESTAMP']])
train['WEEK_OF_YEAR'] = pd.Series([datetime.datetime.fromtimestamp(t).isocalendar()[1] for t in train['TIMESTAMP']])
train['TARGET'] = pd.Series([[l[1][0][-1], l[1][1][-1]] if len(l[1][0]) > 1 else numpy.nan for l in train[['LONGITUDE','LATITUDE']].iterrows()])
def start_stop_inputs(k):
result = []
for l in train[['LONGITUDE','LATITUDE']].iterrows():
if len(l[1][0]) < 2 or len(l[1][1]) < 2:
result.append(numpy.nan)
elif len(l[1][0][:-1]) >= 2*k:
result.append(numpy.concatenate([l[1][0][0:k],l[1][0][-(k+1):-1],l[1][1][0:k],l[1][1][-(k+1):-1]]).flatten())
else:
l1 = numpy.lib.pad(l[1][0][:-1], (0,20-len(l[1][0][:-1])), mode='edge')
l2 = numpy.lib.pad(l[1][1][:-1], (0,20-len(l[1][1][:-1])), mode='edge')
result.append(numpy.concatenate([l1[0:k],l1[-k:],l2[0:k],l2[-k:]]).flatten())
return pd.Series(result)
train['COORD_FEATURES'] = start_stop_inputs(5)
train.shape
train.dropna().shape
train = train.dropna()
utils.save_array(data_path+'train/train_features.bc', train.as_matrix())
train = pd.read_csv(data_path+'train/train.csv', header=0)
test = pd.read_csv(data_path+'test/test.csv', header=0)
def start_stop_inputs(k, data, test):
result = []
for l in data[['LONGITUDE','LATITUDE']].iterrows():
if not test:
if len(l[1][0]) < 2 or len(l[1][1]) < 2:
result.append(np.nan)
elif len(l[1][0][:-1]) >= 2*k:
result.append(np.concatenate([l[1][0][0:k],l[1][0][-(k+1):-1],l[1][1][0:k],l[1][1][-(k+1):-1]]).flatten())
else:
l1 = np.lib.pad(l[1][0][:-1], (0,4*k-len(l[1][0][:-1])), mode='edge')
l2 = np.lib.pad(l[1][1][:-1], (0,4*k-len(l[1][1][:-1])), mode='edge')
result.append(np.concatenate([l1[0:k],l1[-k:],l2[0:k],l2[-k:]]).flatten())
else:
if len(l[1][0]) < 1 or len(l[1][1]) < 1:
result.append(np.nan)
elif len(l[1][0]) >= 2*k:
result.append(np.concatenate([l[1][0][0:k],l[1][0][-k:],l[1][1][0:k],l[1][1][-k:]]).flatten())
else:
l1 = np.lib.pad(l[1][0], (0,4*k-len(l[1][0])), mode='edge')
l2 = np.lib.pad(l[1][1], (0,4*k-len(l[1][1])), mode='edge')
result.append(np.concatenate([l1[0:k],l1[-k:],l2[0:k],l2[-k:]]).flatten())
return pd.Series(result)
lat_mean = 41.15731
lat_std = 0.074120656
long_mean = -8.6161413
long_std = 0.057200309
def feature_ext(data, test=False):
data['ORIGIN_CALL'] = pd.Series(pd.factorize(data['ORIGIN_CALL'])[0]) + 1
data['ORIGIN_STAND']=pd.Series([0 if pd.isnull(x) or x=='' else int(x) for x in data["ORIGIN_STAND"]])
data['TAXI_ID'] = pd.Series(pd.factorize(data['TAXI_ID'])[0]) + 1
data['DAY_TYPE'] = pd.Series([ord(x[0]) - ord('A') for x in data['DAY_TYPE']])
polyline = pd.Series([ast.literal_eval(x) for x in data['POLYLINE']])
data['LATITUDE'] = pd.Series([np.array([point[1] for point in poly],dtype=np.float32) for poly in polyline])
data['LONGITUDE'] = pd.Series([np.array([point[0] for point in poly],dtype=np.float32) for poly in polyline])
if not test:
data['TARGET'] = pd.Series([[l[1][0][-1], l[1][1][-1]] if len(l[1][0]) > 1 else np.nan for l in data[['LONGITUDE','LATITUDE']].iterrows()])
data['LATITUDE'] = pd.Series([(t-lat_mean)/lat_std for t in data['LATITUDE']])
data['LONGITUDE'] = pd.Series([(t-long_mean)/long_std for t in data['LONGITUDE']])
data['COORD_FEATURES'] = start_stop_inputs(5, data, test)
data['DAY_OF_WEEK'] = pd.Series([datetime.datetime.fromtimestamp(t).weekday() for t in data['TIMESTAMP']])
data['QUARTER_HOUR'] = pd.Series([int((datetime.datetime.fromtimestamp(t).hour*60 + datetime.datetime.fromtimestamp(t).minute)/15)
for t in data['TIMESTAMP']])
data['WEEK_OF_YEAR'] = pd.Series([datetime.datetime.fromtimestamp(t).isocalendar()[1] for t in data['TIMESTAMP']])
data = data.dropna()
return data
train = feature_ext(train)
test = feature_ext(test, test=True)
test.head()
utils.save_array(data_path+'train/train_features.bc', train.as_matrix())
utils.save_array(data_path+'test/test_features.bc', test.as_matrix())
train.head()
train = pd.DataFrame(utils.load_array(data_path+'train/train_features.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE', 'DAY_OF_WEEK',
'QUARTER_HOUR', "WEEK_OF_YEAR", "TARGET", "COORD_FEATURES"])
y_targ = np.vstack(train["TARGET"].as_matrix())
from sklearn.cluster import MeanShift, estimate_bandwidth
#bw = estimate_bandwidth(y_targ, quantile=.1, n_samples=1000)
bw = 0.001
ms = MeanShift(bandwidth=bw, bin_seeding=True, min_bin_freq=5)
ms.fit(y_targ)
cluster_centers = ms.cluster_centers_
cluster_centers.shape
utils.save_array(data_path+"cluster_centers_bw_001.bc", cluster_centers)
train = pd.DataFrame(utils.load_array(data_path+'train/train_features.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE', 'TARGET',
'COORD_FEATURES', "DAY_OF_WEEK", "QUARTER_HOUR", "WEEK_OF_YEAR"])
cluster_centers = utils.load_array(data_path+"cluster_centers_bw_001.bc")
long = np.array([c[0] for c in cluster_centers])
lat = np.array([c[1] for c in cluster_centers])
X_train, X_val = train_test_split(train, test_size=0.2, random_state=42)
def get_features(data):
return [np.vstack(data['COORD_FEATURES'].as_matrix()), np.vstack(data['ORIGIN_CALL'].as_matrix()),
np.vstack(data['TAXI_ID'].as_matrix()), np.vstack(data['ORIGIN_STAND'].as_matrix()),
np.vstack(data['QUARTER_HOUR'].as_matrix()), np.vstack(data['DAY_OF_WEEK'].as_matrix()),
np.vstack(data['WEEK_OF_YEAR'].as_matrix()), np.array([long for i in range(0,data.shape[0])]),
np.array([lat for i in range(0,data.shape[0])])]
def get_target(data):
return np.vstack(data["TARGET"].as_matrix())
X_train_features = get_features(X_train)
X_train_target = get_target(X_train)
utils.save_array(data_path+'train/X_train_features.bc', get_features(X_train))
train = pd.DataFrame(utils.load_array(data_path+'train/train_features.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE', 'TARGET',
'COORD_FEATURES', "DAY_OF_WEEK", "QUARTER_HOUR", "WEEK_OF_YEAR"])
cuts = [
1376503200, # 2013-08-14 18:00
1380616200, # 2013-10-01 08:30
1381167900, # 2013-10-07 17:45
1383364800, # 2013-11-02 04:00
1387722600 # 2013-12-22 14:30
]
print(datetime.datetime.fromtimestamp(1376503200))
train.shape
val_indices = []
index = 0
for index, row in train.iterrows():
time = row['TIMESTAMP']
latitude = row['LATITUDE']
for ts in cuts:
if time <= ts and time + 15 * (len(latitude) - 1) >= ts:
val_indices.append(index)
break
index += 1
X_valid = train.iloc[val_indices]
valid.head()
for d in valid['TIMESTAMP']:
print(datetime.datetime.fromtimestamp(d))
X_train = train.drop(train.index[[val_indices]])
cluster_centers = utils.load_array(data_path+"/data/cluster_centers_bw_001.bc")
long = np.array([c[0] for c in cluster_centers])
lat = np.array([c[1] for c in cluster_centers])
utils.save_array(data_path+'train/X_train.bc', X_train.as_matrix())
utils.save_array(data_path+'valid/X_val.bc', X_valid.as_matrix())
X_train = pd.DataFrame(utils.load_array(data_path+'train/X_train.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE', 'TARGET',
'COORD_FEATURES', "DAY_OF_WEEK", "QUARTER_HOUR", "WEEK_OF_YEAR"])
X_val = pd.DataFrame(utils.load_array(data_path+'valid/X_val.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE', 'TARGET',
'COORD_FEATURES', "DAY_OF_WEEK", "QUARTER_HOUR", "WEEK_OF_YEAR"])
def equirectangular_loss(y_true, y_pred):
deg2rad = 3.141592653589793 / 180
long_1 = y_true[:,0]*deg2rad
long_2 = y_pred[:,0]*deg2rad
lat_1 = y_true[:,1]*deg2rad
lat_2 = y_pred[:,1]*deg2rad
return 6371*K.sqrt(K.square((long_1 - long_2)*K.cos((lat_1 + lat_2)/2.))
+K.square(lat_1 - lat_2))
def embedding_input(name, n_in, n_out, reg):
inp = Input(shape=(1,), dtype='int64', name=name)
return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp)
def taxi_mlp(k, cluster_centers):
shp = cluster_centers.shape[0]
nums = Input(shape=(4*k,))
center_longs = Input(shape=(shp,))
center_lats = Input(shape=(shp,))
emb_names = ['client_ID', 'taxi_ID', "stand_ID", "quarter_hour", "day_of_week", "week_of_year"]
emb_ins = [57106, 448, 64, 96, 7, 52]
emb_outs = [10 for i in range(0,6)]
regs = [0 for i in range(0,6)]
embs = [embedding_input(e[0], e[1]+1, e[2], e[3]) for e in zip(emb_names, emb_ins, emb_outs, regs)]
x = merge([nums] + [Flatten()(e[1]) for e in embs], mode='concat')
x = Dense(500, activation='relu')(x)
x = Dense(shp, activation='softmax')(x)
y = merge([merge([x, center_longs], mode='dot'), merge([x, center_lats], mode='dot')], mode='concat')
return Model(input = [nums]+[e[0] for e in embs] + [center_longs, center_lats], output = y)
def data_iter(data, batch_size, cluster_centers):
long = [c[0] for c in cluster_centers]
lat = [c[1] for c in cluster_centers]
i = 0
N = data.shape[0]
while True:
yield ([np.vstack(data['COORD_FEATURES'][i:i+batch_size].as_matrix()), np.vstack(data['ORIGIN_CALL'][i:i+batch_size].as_matrix()),
np.vstack(data['TAXI_ID'][i:i+batch_size].as_matrix()), np.vstack(data['ORIGIN_STAND'][i:i+batch_size].as_matrix()),
np.vstack(data['QUARTER_HOUR'][i:i+batch_size].as_matrix()), np.vstack(data['DAY_OF_WEEK'][i:i+batch_size].as_matrix()),
np.vstack(data['WEEK_OF_YEAR'][i:i+batch_size].as_matrix()), np.array([long for i in range(0,batch_size)]),
np.array([lat for i in range(0,batch_size)])], np.vstack(data["TARGET"][i:i+batch_size].as_matrix()))
i += batch_size
x=Lambda(thing)([x,long,lat])
model = taxi_mlp(5, cluster_centers)
model.compile(optimizer=SGD(0.01, momentum=0.9), loss=equirectangular_loss, metrics=['mse'])
X_train_feat = get_features(X_train)
X_train_target = get_target(X_train)
X_val_feat = get_features(X_valid)
X_val_target = get_target(X_valid)
tqdm = TQDMNotebookCallback()
checkpoint = ModelCheckpoint(filepath=data_path+'models/tmp/weights.{epoch:03d}.{val_loss:.8f}.hdf5', save_best_only=True)
batch_size=256
model.fit(X_train_feat, X_train_target, nb_epoch=1, batch_size=batch_size, validation_data=(X_val_feat, X_val_target), callbacks=[tqdm, checkpoint], verbose=0)
model.fit(X_train_feat, X_train_target, nb_epoch=30, batch_size=batch_size, validation_data=(X_val_feat, X_val_target), callbacks=[tqdm, checkpoint], verbose=0)
model = load_model(data_path+'models/weights.0.0799.hdf5', custom_objects={'equirectangular_loss':equirectangular_loss})
model.fit(X_train_feat, X_train_target, nb_epoch=100, batch_size=batch_size, validation_data=(X_val_feat, X_val_target), callbacks=[tqdm, checkpoint], verbose=0)
model.save(data_path+'models/current_model.hdf5')
model.fit(X_train_feat, X_train_target, nb_epoch=1, batch_size=batch_size, validation_data=(X_val_feat, X_val_target), callbacks=[tqdm, checkpoint], verbose=0)
model.fit(X_train_feat, X_train_target, nb_epoch=400, batch_size=batch_size, validation_data=(X_val_feat, X_val_target), callbacks=[tqdm, checkpoint], verbose=0)
model.save(data_path+'/models/current_model.hdf5')
len(X_val_feat[0])
best_model = load_model(data_path+'models/weights.308.0.03373993.hdf5', custom_objects={'equirectangular_loss':equirectangular_loss})
best_model.evaluate(X_val_feat, X_val_target)
test = pd.DataFrame(utils.load_array(data_path+'test/test_features.bc'),columns=['TRIP_ID', 'CALL_TYPE', 'ORIGIN_CALL', 'ORIGIN_STAND', 'TAXI_ID',
'TIMESTAMP', 'DAY_TYPE', 'MISSING_DATA', 'POLYLINE', 'LATITUDE', 'LONGITUDE',
'COORD_FEATURES', "DAY_OF_WEEK", "QUARTER_HOUR", "WEEK_OF_YEAR"])
test['ORIGIN_CALL'] = pd.read_csv(data_path+'real_origin_call.csv', header=None)
test['TAXI_ID'] = pd.read_csv(data_path+'real_taxi_id.csv',header=None)
X_test = get_features(test)
b = np.sort(X_test[1],axis=None)
test_preds = np.round(best_model.predict(X_test), decimals=6)
d = {0:test['TRIP_ID'], 1:test_preds[:,1], 2:test_preds[:,0]}
kaggle_out = pd.DataFrame(data=d)
kaggle_out.to_csv(data_path+'submission.csv', header=['TRIP_ID','LATITUDE', 'LONGITUDE'], index=False)
def hdist(a, b):
deg2rad = 3.141592653589793 / 180
lat1 = a[:, 1] * deg2rad
lon1 = a[:, 0] * deg2rad
lat2 = b[:, 1] * deg2rad
lon2 = b[:, 0] * deg2rad
dlat = abs(lat1-lat2)
dlon = abs(lon1-lon2)
al = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2)**2)
d = np.arctan2(np.sqrt(al), np.sqrt(1-al))
hd = 2 * 6371 * d
return hd
val_preds = best_model.predict(X_val_feat)
trn_preds = model.predict(X_train_feat)
er = hdist(val_preds, X_val_target)
er.mean()
K.equal()
cuts = [
1376503200, # 2013-08-14 18:00
1380616200, # 2013-10-01 08:30
1381167900, # 2013-10-07 17:45
1383364800, # 2013-11-02 04:00
1387722600 # 2013-12-22 14:30
]
np.any([train['TIMESTAMP'].map(lambda x: x in cuts)])
train['TIMESTAMP']
np.any(train['TIMESTAMP']==1381167900)
times = train['TIMESTAMP'].as_matrix()
X_train.columns
times
count = 0
for index, row in X_val.iterrows():
for ts in cuts:
time = row['TIMESTAMP']
latitude = row['LATITUDE']
if time <= ts and time + 15 * (len(latitude) - 1) >= ts:
count += 1
one = count
count + one
import h5py
h = h5py.File(data_path+'original/data.hdf5', 'r')
evrData=h['/Configure:0000/Run:0000/CalibCycle:0000/EvrData::DataV3/NoDetector.0:Evr.0/data']
c = np.load(data_path+'original/arrival-clusters.pkl')
from fuel.utils import find_in_data_path
from fuel.datasets import H5PYDataset
original_path = '/data/bckenstler/data/taxi/original/'
train_set = H5PYDataset(original_path+'data.hdf5', which_sets=('train',),load_in_memory=True)
valid_set = H5PYDataset(original_path+'valid.hdf5', which_sets=('cuts/test_times_0',),load_in_memory=True)
print(train_set.num_examples)
print(valid_set.num_examples)
data = train_set.data_sources
data[0]
valid_data = valid_set.data_sources
valid_data[4][0]
stamps = valid_data[-3]
stamps[0]
for i in range(0,304):
print(np.any([t==int(stamps[i]) for t in X_val['TIMESTAMP']]))
type(X_train['TIMESTAMP'][0])
type(stamps[0])
check = [s in stamps for s in X_val['TIMESTAMP']]
for s in X_val['TIMESTAMP']:
print(datetime.datetime.fromtimestamp(s))
for s in stamps:
print(datetime.datetime.fromtimestamp(s))
ids = valid_data[-1]
type(ids[0])
ids
X_val
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First off, Harris detector computes a squared matrix M comprised basically of derivatives of image pixels on both x and y axis.
Step3: Afterwards, Harris detector proceeds with non-maximal suppression assuming a certain threshold (0.1 in this case)
Step4: Compute Harris for ten levels of threshold
Step5: Quiz 3b - Implementation of the SIFT Feature Detector
Step6: Execute custom SIFT for every given image in the current path then show keypoints as circles in the correspoding images.
Step7: SIFT Feature Detector - Opencv Implementation
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
import glob
import random as rnd
from scipy.ndimage import filters
from PIL import Image
from numpy import *
from pylab import *
from pandas import *
np.seterr(divide='ignore', invalid='ignore')
#Compute the Algorithm Harris corner detecion for implementation in grayscale image
def compute_harris_points(img, sigma=3):
#compute derivates in the image
imx = np.zeros(img.size)
imy = np.zeros(img.size)
imx = filters.gaussian_filter(img, (sigma,sigma), (0,1))
imy = filters.gaussian_filter(img, (sigma,sigma), (1,0))
# compute the products of derivatives at every pixel
Sxx = filters.gaussian_filter(imx*imx,sigma)
Sxy = filters.gaussian_filter(imx*imy,sigma)
Syy = filters.gaussian_filter(imy*imy,sigma)
# determinant and trace
Mdet = Sxx*Syy - Sxy**2
Mtr = Sxx + Syy
harris = np.divide(Mdet, Mtr)
harris[np.isposinf(harris)] = 0
harris[np.isnan(harris)] = 0
return harris
def doHarrisNonMaxSupression(harrisim,min_dist=10,threshold=0.1):
#Return corners from a Harris response image
#min_dist is the minimum number of pixels separating
#corners and image boundary.
global t
global dist
dist=min_dist
t=threshold
#print(t)
# find top corner candidates above a threshold
corner_threshold = harrisim.max() * threshold
harrisim_t = (harrisim > corner_threshold) * 1
# get coordinates of candidates
coords = array(harrisim_t.nonzero()).T
# ...and their values
candidate_values = [harrisim[c[0],c[1]] for c in coords]
# sort candidates
index = argsort(candidate_values)
# store allowed point locations in array
allowed_locations = zeros(harrisim.shape)
allowed_locations[min_dist:-min_dist,min_dist:-min_dist] = 1
# select the best points taking min_distance into account
filtered_coords = []
for i in index:
if allowed_locations[coords[i,0],coords[i,1]] == 1:
filtered_coords.append(coords[i])
allowed_locations[(coords[i,0]-min_dist):(coords[i,0]+min_dist),
(coords[i,1]-min_dist):(coords[i,1]+min_dist)] = 0
return filtered_coords
def plot_harris_points(image,filtered_coords):
# Plots corners found in image.
plt.figure(figsize=(20,12))
gray()
plt.imshow(image)
plt.title('Harris corner detection, dist=%s and threshold=%s'%(dist,t))
plt.plot([p[1] for p in filtered_coords],[p[0] for p in filtered_coords],'*',color = 'r')
plt.axis('off')
plt.show()
im = Image.open('boat_images/img1.pgm')
plt.figure(figsize=(20,12))
gray()
plt.imshow(im, cmap = 'gray')
harrisim = compute_harris_points(im)
for i in range(1, 11,1):
xx=i* 0.01
j=10
filtered_coords = doHarrisNonMaxSupression(harrisim,j,xx)
plot_harris_points(im, filtered_coords)
def getGaussianKernel(sigma, kernelHeight=51, kernelWidth=51):
assert(kernelHeight % 2 == 1 and kernelWidth % 2 == 1)
yOffset = (kernelHeight - 1) / 2
xOffset = (kernelWidth - 1) / 2
kernel = np.ndarray((kernelHeight, kernelWidth), np.float64)
for y in range(-yOffset, yOffset+1, 1):
for x in range(-xOffset, xOffset+1, 1):
kernel[y+yOffset][x+xOffset] = (1. / (2.*np.pi*sigma**2)) * np.exp(-(x**2 + y**2) / (2 * sigma**2))
kernel /= kernel.sum()
return kernel
def calcGaussianPyramid(org_img):
img = org_img.copy()
bluredImg = img.copy()
sigma = 1.6
octaveCount = 7
sigmaCount = 4
gp = np.ndarray(shape=(octaveCount,), dtype=np.ndarray)
for o in range(0, octaveCount):
gp[o] = np.ndarray(shape=(sigmaCount+1, img.shape[0], img.shape[1]), dtype=np.float64)
gp[o][0] = bluredImg.copy()
for s in range(1, sigmaCount + 1):
#k = 2**(float(s)/float(sigmaCount))
k = np.sqrt(2.0)**s
kernel = getGaussianKernel(k*sigma)
bluredImg = cv2.filter2D(img, -1, kernel)
gp[o][s] = bluredImg.copy()
if (o < octaveCount-1):
img = downscale(img)
bluredImg = downscale(bluredImg)
return gp
def calcDifference(img0, img1, threshold = 0):
assert(img0.shape == img1.shape)
diffImg = np.ndarray(img0.shape, np.float64)
for y in range(diffImg.shape[0]):
for x in range(diffImg.shape[1]):
difference = abs(img1[y][x] - img0[y][x])
if difference > threshold:
diffImg[y][x] = difference
else:
diffImg[y][x] = 0
return diffImg
def calcDoG(gp):
DoG = np.ndarray(shape=gp.shape, dtype=np.ndarray)
for o in range(DoG.shape[0]):
DoG[o] = np.ndarray(shape=(gp[o].shape[0]-1, gp[o].shape[1], gp[o].shape[2]), dtype=np.float64)
for s in range(DoG[o].shape[0]):
DoG[o][s] = calcDifference(gp[o][s], gp[o][s+1])
return DoG
def getNeighbourhood(octave, s, y, x, radius=1):
neighbourhood = octave[s-radius:s+radius+1, y-radius:y+radius+1, x-radius:x+radius+1]
return neighbourhood
def calcExtrema(DoG, threshold=0.3, radius=1):
keypoints = np.ndarray(shape=DoG.shape, dtype=np.ndarray)
sigma = 1.6
sigmaCount = DoG[0].shape[0]
for o in range(DoG.shape[0]):
keypoints[o] = np.ndarray(shape=(DoG[o].shape[0]-(2*radius),), dtype=list)
for s in range(radius, DoG[o].shape[0]-radius):
keypoints[o][s-radius] = []
k = 2**(float(s)/float(sigmaCount))
for y in range(radius, DoG[o].shape[1]-radius):
for x in range(radius, DoG[o].shape[2]-radius):
value = DoG[o][s, y, x]
neighbourhood = getNeighbourhood(DoG[o], s, y, x, radius=radius).flatten()
neighbourhood.sort()
min2 = neighbourhood[1]
max2 = neighbourhood[-2]
if value < min2 or (value > threshold and value > max2):
scale = 2**o
keypoints[o][s-radius].append((scale * y + scale/2, scale * x + scale/2, scale * k*sigma))
return keypoints
def normalize(img):
normImg = np.ndarray(shape=img.shape, dtype=np.float64)
max_val = img.max()
if max > 0:
normImg = img/float(max_val)
normImg *= 255.
else:
return img.copy()
return normImg.astype(np.uint8)
def scale(img, factor=2):
assert(len(img.shape) == 2)
rows, cols = img.shape
scaledImg = np.ndarray((rows*factor, cols*factor), np.float64)
for y in range(0, scaledImg.shape[0]):
for x in range(0, scaledImg.shape[1]):
scaledImg[y][x] = img[y/factor][x/factor]
return scaledImg
def downscale(img):
assert(len(img.shape) == 2)
rows, cols = img.shape
scaledImg = np.ndarray((rows/2, cols/2), np.float64)
for y in range(0, scaledImg.shape[0]):
for x in range(0, scaledImg.shape[1]):
scaledImg[y][x] = img[2*y][2*x]
return scaledImg
def drawKeypoints(img, kp):
if (len(img.shape) < 3 or img.shape[2] == 1):
kpImg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
kpImg = img.copy()
for y, x, scale in kp:
r = rnd.randrange(0,255)
g = rnd.randrange(0,255)
b = rnd.randrange(0,255)
cv2.circle(kpImg, (int(x), int(y)), int(scale), (r, g, b))
return kpImg
def plotImage(title,image):
plt.figure(figsize=(20,12))
gray()
plt.imshow(image)
plt.title(title)
plt.axis('off')
plt.show()
images = glob.glob('boat_images/img_*.png')
print('Images Loaded!')
for filename in images:
img = cv2.imread(filename, 0)
gp = calcGaussianPyramid(img)
DoG = calcDoG(gp)
radius = 1
keypoints = calcExtrema(DoG, radius=radius)
kpImg = img.copy()
for o in range(keypoints.shape[0]):
for s in range(radius, DoG[o].shape[0]-radius):
kp = keypoints[o][s-radius]
kpImg = drawKeypoints(kpImg, kp)
plotImage("Custom SIFT "+ filename, kpImg)
#img = (Image.open('boat_images/img1.pgm').convert('L'))
#img.save('boat_images/img_1.png')
#img = (Image.open('boat_images/img2.pgm').convert('L'))
#img.save('boat_images/img_2.png')
#img = (Image.open('boat_images/img3.pgm').convert('L'))
#img.save('boat_images/img_3.png')
#img = (Image.open('boat_images/img4.pgm').convert('L'))
#img.save('boat_images/img_4.png')
#img = (Image.open('boat_images/img5.pgm').convert('L'))
#img.save('boat_images/img_5.png')
#img = (Image.open('boat_images/img6.pgm').convert('L'))
#img.save('boat_images/img_6.png')
images = glob.glob('boat_images//img*.pgm')
for filename in images:
img = cv2.imread(filename, 0)
sift = cv2.SIFT()
kp, desc = sift.detectAndCompute(img, None)
imgfinal=cv2.drawKeypoints(img,kp,flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
plotImage("SIFT Opencv "+filename, imgfinal)
## matching
sift = cv2.SIFT()
imgA = cv2.imread('boat_images//img1.pgm', 0)
imgB = cv2.imread('boat_images//img2.pgm', 0)
kpA, desA = sift.detectAndCompute(imgA,None)
kpB, desB = sift.detectAndCompute(imgB,None)
bf = cv2.BFMatcher()
matches = bf.knnMatch(desA,desB, k=2)
#img3 = cv2.drawMatchesKnn(imgA,kpA,imgB,kpB,good,flags=2)
#plt.imshow(img3)
#plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read and clean the data
Step2: Most of the plots below depend on the DateAcquired field being valid, so I'm going to cheat and dump all the records where it is not.
Step3: Classifications and departments
Step4: Now we can do the same thing by Department.
Step5: For obvious reasons, there are many more prints, photographs and books than any other class of work. If you're only interested in paintings, sculptures and installations then records where Department is Paintings & Sculpture provides a way to select those out.
Step6: Lots of photographers! What if we only look at the Painting & Sculpture Department?
Step7: Lots of men! (I'll revisit that in a future post.)
Step8: Lots of acquisitions in 1964, 1968 and 2008. More acquisitions in October than any other month. And Tuesdays are busy!
Step9: It turns out over 11,000 items were added to the catalog with an acquisition date of 6 October, 1964. Please <a href="mailto
Step10: Let's look at trends in the acquisition of the top few artists in the collection of the Painting & Sculpture department, i.e. the people who make paintings, sculptures and installations. First we create a list of who these poeple are.
Step11: Then we use the isin() method of a series to construct a boolean Series to filter out people who are not in that list.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_context('poster')
# Use MOMA's ID as index
# Parse `DateAcquired` column as a datetime
moma = pd.read_csv('Artworks.csv', index_col=12, parse_dates=[10])
# Show the first record to check everything is OK
moma.iloc[0]
# Convert the categorical columns to pandas `category` dtypes.
# See, e.g. http://blog.dominodatalab.com/pandas-categoricals/ for why this is desirable.
categorical_columns = ['Classification', 'Department', 'CuratorApproved']
for c in categorical_columns:
moma[c] = moma[c].astype('category')
print(c, '\n', moma[c].cat.categories)
print(sum(moma['DateAcquired'].isnull()), 'invalid DateAcquired records. Removing.')
moma = moma.dropna(subset=['DateAcquired'])
ax = (moma.groupby('Classification')
.size()
.sort_values()
.plot(kind='barh'))
ax.set_title('All items')
ax.set_xlabel('Count');
ax = (moma.groupby('Department')
.size()
.sort_values()
.plot(kind='barh'))
ax.set_title('All items')
ax.set_xlabel('Count');
(moma.groupby('Artist')
.size()
.sort_values()
.tail(20)
.plot(kind='barh'))
ax.set_title('Artists with the most items in the MOMA collection (top 20)')
ax.set_xlabel('Count');
(moma[moma['Department'] == 'Painting & Sculpture']
.groupby('Artist')
.size()
.sort_values()
.tail(20)
.plot(kind='barh'))
ax.set_title('Artists with the most items in the MOMA Painting & Sculpture department (top 20)')
ax.set_xlabel('Count');
fig, ax = plt.subplots(3, 1);
ylabel = 'Acquisitions'
(moma.groupby(pd.Grouper(freq='A', key='DateAcquired'))
.size()
.plot(ax=ax[0]))
(moma
.groupby(moma['DateAcquired'].dt.month)
.size()
.plot(ax=ax[1]))
(moma.
groupby(moma['DateAcquired'].dt.weekday)
.size()
.plot(ax=ax[2]))
months = {0: '_', 1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug', 9: 'Sep',
10: 'Oct', 11: 'Nov', 12: 'Dec'}
days = {0: 'Mon', 1: 'Tue', 2: 'Wed', 3: 'Thu', 4: 'Fri', 5: 'Sat', 6: 'Sun'}
ax[0].set_title('MOMA acquisition trends with time')
ax[1].set_xticklabels([months[i] for i in ax[1].get_xticks()]);
ax[2].set_xticklabels([days[i] for i in ax[2].get_xticks()]);
for a in ax:
a.set_xlabel('');
a.set_ylabel(ylabel);
(moma[(moma['DateAcquired'] > '1964-01-01') &
(moma['DateAcquired'] < '1964-12-31')]
.groupby([pd.Grouper(freq='D', key='DateAcquired')])
.size())
# This is a DataFrame where all items by an artist except their first acquisition are removed
firsts = moma.drop_duplicates('Artist')
fig, ax = plt.subplots(figsize=(14, 3))
(firsts.groupby(pd.Grouper(key='DateAcquired', freq='A'))
.size()
.plot())
ax.set_xlabel('');
ax.set_ylabel('Number of new artists');
top = list(moma[moma['Department'] == 'Painting & Sculpture']
.groupby('Artist')
.size()
.sort_values()
.tail(8)
.index)
with sns.color_palette(palette='husl', n_colors=8): # more than 6 colors
fig, ax = plt.subplots()
(moma[moma['Artist'].isin(top) &
(moma['Department'] == 'Painting & Sculpture')]
.groupby([pd.Grouper(freq='10A', key='DateAcquired'), 'Artist'])
.size()
.unstack()
.plot(ax=ax))
ax.set_xlabel('')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this exercise, you will work with data from the Housing Prices Competition for Kaggle Learn Users.
Step2: Use the next code cell to print the first five rows of the data.
Step3: You can already see a few missing values in the first several rows. In the next step, you'll obtain a more comprehensive understanding of the missing values in the dataset.
Step 1
Step4: Part A
Step5: Part B
Step6: To compare different approaches to dealing with missing values, you'll use the same score_dataset() function from the tutorial. This function reports the mean absolute error (MAE) from a random forest model.
Step7: Step 2
Step8: Run the next code cell without changes to obtain the MAE for this approach.
Step9: Step 3
Step10: Run the next code cell without changes to obtain the MAE for this approach.
Step11: Part B
Step12: Step 4
Step13: Run the next code cell to train and evaluate a random forest model. (Note that we don't use the score_dataset() function above, because we will soon use the trained model to generate test predictions!)
Step14: Part B
Step15: Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition.
|
<ASSISTANT_TASK:>
Python Code:
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex2 import *
print("Setup Complete")
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv('../input/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/test.csv', index_col='Id')
# Remove rows with missing target, separate target from predictors
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
# To keep things simple, we'll use only numerical predictors
X = X_full.select_dtypes(exclude=['object'])
X_test = X_test_full.select_dtypes(exclude=['object'])
# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,
random_state=0)
X_train.head()
# Shape of training data (num_rows, num_columns)
print(X_train.shape)
# Number of missing values in each column of training data
missing_val_count_by_column = (X_train.isnull().sum())
print(missing_val_count_by_column[missing_val_count_by_column > 0])
# Fill in the line below: How many rows are in the training data?
num_rows = ____
# Fill in the line below: How many columns in the training data
# have missing values?
num_cols_with_missing = ____
# Fill in the line below: How many missing entries are contained in
# all of the training data?
tot_missing = ____
# Check your answers
step_1.a.check()
#%%RM_IF(PROD)%%
num_rows = 1168
num_cols_with_missing = 3
tot_missing = 212 + 6 + 58
step_1.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_1.a.hint()
#_COMMENT_IF(PROD)_
step_1.a.solution()
# Check your answer (Run this code cell to receive credit!)
step_1.b.check()
#_COMMENT_IF(PROD)_
step_1.b.hint()
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
# Fill in the line below: get names of columns with missing values
____ # Your code here
# Fill in the lines below: drop columns in training and validation data
reduced_X_train = ____
reduced_X_valid = ____
# Check your answers
step_2.check()
#%%RM_IF(PROD)%%
# Get names of columns with missing values
cols_with_missing = [col for col in X_train.columns
if X_train[col].isnull().any()]
# Drop columns in training and validation data
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
step_2.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
print("MAE (Drop columns with missing values):")
print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))
from sklearn.impute import SimpleImputer
# Fill in the lines below: imputation
____ # Your code here
imputed_X_train = ____
imputed_X_valid = ____
# Fill in the lines below: imputation removed column names; put them back
imputed_X_train.columns = ____
imputed_X_valid.columns = ____
# Check your answers
step_3.a.check()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
step_3.a.assert_check_failed()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.fit_transform(X_valid))
# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
step_3.a.assert_check_failed()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
step_3.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.a.hint()
#_COMMENT_IF(PROD)_
step_3.a.solution()
print("MAE (Imputation):")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))
# Check your answer (Run this code cell to receive credit!)
step_3.b.check()
#_COMMENT_IF(PROD)_
step_3.b.hint()
# Preprocessed training and validation features
final_X_train = ____
final_X_valid = ____
# Check your answers
step_4.a.check()
#%%RM_IF(PROD)%%
# Imputation
final_imputer = SimpleImputer(strategy='median')
final_X_train = pd.DataFrame(final_imputer.fit_transform(X_train))
final_X_valid = pd.DataFrame(final_imputer.transform(X_valid))
# Imputation removed column names; put them back
final_X_train.columns = X_train.columns
final_X_valid.columns = X_valid.columns
step_4.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.a.hint()
#_COMMENT_IF(PROD)_
step_4.a.solution()
# Define and fit model
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(final_X_train, y_train)
# Get validation predictions and MAE
preds_valid = model.predict(final_X_valid)
print("MAE (Your approach):")
print(mean_absolute_error(y_valid, preds_valid))
# Fill in the line below: preprocess test data
final_X_test = ____
# Fill in the line below: get test predictions
preds_test = ____
# Check your answers
step_4.b.check()
#%%RM_IF(PROD)%%
# Preprocess test data
final_X_test = pd.DataFrame(final_imputer.transform(X_test))
# Get test predictions
preds_test = model.predict(final_X_test)
step_4.b.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.b.hint()
#_COMMENT_IF(PROD)_
step_4.b.solution()
# Save test predictions to file
output = pd.DataFrame({'Id': X_test.index,
'SalePrice': preds_test})
output.to_csv('submission.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ๆไปฌๅฏไปฅ็ฅ้๏ผ่ฟ้็grid search้ๅฏนๆฏไธชๅๆฐ่ฟ่กไบ10ๆฌกไบคๅ้ช่ฏ๏ผๅนถไธไธๅ
ฑๅฏน30ไธชๅๆฐ่ฟ่ก็ธๅ่ฟ็จ็ไบคๅ้ช่ฏ
Step2: 3. ๅๆถๅฏนๅคไธชๅๆฐ่ฟ่กๆ็ดข
Step3: 4. ไฝฟ็จๆไฝณๅๆฐๅๅบ้ขๆต
Step4: ่ฟ้ไฝฟ็จไนๅๅพๅฐ็ๆไฝณๅๆฐๅฏนๆจกๅ่ฟ่ก้ๆฐ่ฎญ็ป๏ผๅจ่ฎญ็ปๆถ๏ผๅฐฑๅฏไปฅๅฐๆๆ็ๆฐๆฎ้ฝไฝไธบ่ฎญ็ปๆฐๆฎๅ
จ้จๆๅ
ฅๅฐๆจกๅไธญๅป๏ผ่ฟๆ ทๅฐฑไธไผๆตช่ดนไธชๅซๆฐๆฎไบใ
Step5: 5. ไฝฟ็จRandomizeSearchCVๆฅ้ไฝ่ฎก็ฎไปฃไปท
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.grid_search import GridSearchCV
# read in the iris data
iris = load_iris()
# create X (features) and y (response)
X = iris.data
y = iris.target
# define the parameter values that should be searched
k_range = range(1, 31)
print k_range
# create a parameter grid: map the parameter names to the values that should be searched
# ไธ้ขๆฏๆๅปบparameter grid๏ผๅ
ถ็ปๆๆฏkeyไธบๅๆฐๅ็งฐ๏ผvalueๆฏๅพ
ๆ็ดข็ๆฐๅผๅ่กจ็ไธไธชๅญๅ
ธ็ปๆ
param_grid = dict(n_neighbors=k_range)
print param_grid
knn = KNeighborsClassifier(n_neighbors=5)
# instantiate the grid
# ่ฟ้GridSearchCV็ๅๆฐๅฝขๅผๅcross_val_score็ๅฝขๅผๅทฎไธๅค๏ผๅ
ถไธญparam_gridๆฏparameter gridๆๅฏนๅบ็ๅๆฐ
# GridSearchCVไธญ็n_jobs่ฎพ็ฝฎไธบ-1ๆถ๏ผๅฏไปฅๅฎ็ฐๅนถ่ก่ฎก็ฎ๏ผๅฆๆไฝ ็็ต่ๆฏๆ็ๆ
ๅตไธ๏ผ
grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')
grid.fit(X, y)
# view the complete results (list of named tuples)
grid.grid_scores_
# examine the first tuple
print grid.grid_scores_[0].parameters
print grid.grid_scores_[0].cv_validation_scores
print grid.grid_scores_[0].mean_validation_score
# create a list of the mean scores only
grid_mean_scores = [result.mean_validation_score for result in grid.grid_scores_]
print grid_mean_scores
# plot the results
plt.plot(k_range, grid_mean_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
# examine the best model
print grid.best_score_
print grid.best_params_
print grid.best_estimator_
# define the parameter values that should be searched
k_range = range(1, 31)
weight_options = ['uniform', 'distance']
# create a parameter grid: map the parameter names to the values that should be searched
param_grid = dict(n_neighbors=k_range, weights=weight_options)
print param_grid
# instantiate and fit the grid
grid = GridSearchCV(knn, param_grid, cv=10, scoring='accuracy')
grid.fit(X, y)
# view the complete results
grid.grid_scores_
# examine the best model
print grid.best_score_
print grid.best_params_
# train your model using all data and the best known parameters
knn = KNeighborsClassifier(n_neighbors=13, weights='uniform')
knn.fit(X, y)
# make a prediction on out-of-sample data
knn.predict([3, 5, 4, 2])
# shortcut: GridSearchCV automatically refits the best model using all of the data
grid.predict([3, 5, 4, 2])
from sklearn.grid_search import RandomizedSearchCV
# specify "parameter distributions" rather than a "parameter grid"
param_dist = dict(n_neighbors=k_range, weights=weight_options)
# n_iter controls the number of searches
rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10, random_state=5)
rand.fit(X, y)
rand.grid_scores_
# examine the best model
print rand.best_score_
print rand.best_params_
# run RandomizedSearchCV 20 times (with n_iter=10) and record the best score
best_scores = []
for _ in range(20):
rand = RandomizedSearchCV(knn, param_dist, cv=10, scoring='accuracy', n_iter=10)
rand.fit(X, y)
best_scores.append(round(rand.best_score_, 3))
print best_scores
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def getSubSeq(s , n ) :
res = ""
cr = 0
while(cr < n ) :
mx = s[cr ]
for i in range(cr + 1 , n ) :
mx = max(mx , s[i ] )
lst = cr
for i in range(cr , n ) :
if(s[i ] == mx ) :
res += s[i ]
lst = i
cr = lst + 1
return res
if __name__== ' __main __' :
s = "geeksforgeeks "
n = len(s )
print(getSubSeq(s , n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can also list and download other datasets interactively just typing
Step2: The fileids method provided by all the datasets in nltk.corpus gives access to a list of all the files available.
Step3: fileids can also filter the available files based on their category, which is the name of the subfolders they are located in. Therefore we can have lists of positive and negative reviews separately.
Step4: We can inspect one of the reviews using the raw method of movie_reviews, each file is split into sentences, the curators of this dataset also removed from each review from any direct mention of the rating of the movie.
Step6: Tokenize Text in Words
Step7: The first step in Natural Language processing is generally to split the text into words, this process might appear simple but it is very tedious to handle all corner cases, see for example all the issues with punctuation we have to solve if we just start with a split on whitespace
Step8: nltk has a sophisticated word tokenizer trained on English named punkt, we first have to download its parameters
Step9: Then we can use the word_tokenize function to properly tokenize this text, compare to the whitespace splitting we used above
Step10: Good news is that the movie_reviews corpus already has direct access to tokenized text with the words method
Step11: Build a bag-of-words model
Step12: This is what we wanted, but we notice that also punctuation like "!" and words useless for classification purposes like "of" or "that" are also included.
Step13: Using the Python string.punctuation list and the English stopwords we can build better features by filtering out those words that would not help in the classification
Step14: Plotting Frequencies of Words
Step15: First we want to filter out useless_words as defined in the previous section, this will reduce the length of the dataset by more than a factor of 2
Step16: The collection package of the standard library contains a Counter class that is handy for counting frequencies of words in our list
Step17: It also has a most_common() method to access the words with the higher count
Step18: Then we would like to have a visualization of this using matplotlib.
Step19: We can sort the word counts and plot their values on Logarithmic axes to check the shape of the distribution. This visualization is particularly useful if comparing 2 or more datasets, a flatter distribution indicates a large vocabulary while a peaked distribution a restricted vocabulary often due to a focused topic or specialized language.
Step20: Another related plot is the histogram of sorted_word_counts, which displays how many words have a count in a specific range.
Step21: Train a Classifier for Sentiment Analysis
Step22: One of the simplest supervised machine learning classifiers is the Naive Bayes Classifier, it can be trained on 80% of the data to learn what words are generally associated with positive or with negative reviews.
Step23: We can check after training what is the accuracy on the training set, i.e. the same data used for training, we expect this to be a very high number because the algorithm already "saw" those data. Accuracy is the fraction of the data that is classified correctly, we can turn it into percent
Step24: The accuracy above is mostly a check that nothing went very wrong in the training, the real measure of accuracy is on the remaining 20% of the data that wasn't used in training, the test data
Step25: Accuracy here is around 70% which is pretty good for such a simple model if we consider that the estimated accuracy for a person is about 80%.
|
<ASSISTANT_TASK:>
Python Code:
import nltk
nltk.download("movie_reviews")
nltk.download()
from nltk.corpus import movie_reviews
len(movie_reviews.fileids())
movie_reviews.fileids()[:5]
movie_reviews.fileids()[-5:]
negative_fileids = movie_reviews.fileids('neg')
positive_fileids = movie_reviews.fileids('pos')
len(negative_fileids), len(positive_fileids)
print(movie_reviews.raw(fileids=positive_fileids[0]))
romeo_text = Why then, O brawling love! O loving hate!
O any thing, of nothing first create!
O heavy lightness, serious vanity,
Misshapen chaos of well-seeming forms,
Feather of lead, bright smoke, cold fire, sick health,
Still-waking sleep, that is not what it is!
This love feel I, that feel no love in this.
romeo_text.split()
nltk.download("punkt")
romeo_words = nltk.word_tokenize(romeo_text)
romeo_words
movie_reviews.words(fileids=positive_fileids[0])
{word:True for word in romeo_words}
type(_)
def build_bag_of_words_features(words):
return {word:True for word in words}
build_bag_of_words_features(romeo_words)
nltk.download("stopwords")
import string
string.punctuation
useless_words = nltk.corpus.stopwords.words("english") + list(string.punctuation)
useless_words
#type(useless_words)
def build_bag_of_words_features_filtered(words):
return {
word:1 for word in words \
if not word in useless_words}
build_bag_of_words_features_filtered(romeo_words)
all_words = movie_reviews.words()
len(all_words)/1e6
filtered_words = [word for word in movie_reviews.words() if not word in useless_words]
type(filtered_words)
len(filtered_words)/1e6
from collections import Counter
word_counter = Counter(filtered_words)
most_common_words = word_counter.most_common()[:10]
most_common_words
%matplotlib inline
import matplotlib.pyplot as plt
sorted_word_counts = sorted(list(word_counter.values()), reverse=True)
plt.loglog(sorted_word_counts)
plt.ylabel("Freq")
plt.xlabel("Word Rank");
plt.hist(sorted_word_counts, bins=50);
plt.hist(sorted_word_counts, bins=50, log=True);
negative_features = [
(build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'neg') \
for f in negative_fileids
]
print(negative_features[3])
positive_features = [
(build_bag_of_words_features_filtered(movie_reviews.words(fileids=[f])), 'pos') \
for f in positive_fileids
]
print(positive_features[6])
from nltk.classify import NaiveBayesClassifier
split = 800
sentiment_classifier = NaiveBayesClassifier.train(positive_features[:split]+negative_features[:split])
nltk.classify.util.accuracy(sentiment_classifier, positive_features[:split]+negative_features[:split])*100
nltk.classify.util.accuracy(sentiment_classifier, positive_features[split:]+negative_features[split:])*100
sentiment_classifier.show_most_informative_features()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: According to the S-2 data product specifics, band 4 and band 5 are represented with rasters of different sizes and that can be easily verified
Step2: The obtained results confirm that the two bands have been detected with different resolutions and if the user wanted to implement some processing that involves those bands, he should first operate a RESAMPLING of the S-2 product, according to a selected pixel resolution. The resampling operation can be directly executed in SNAP because it is included in the SNAP Graph Processing Framework(GPF), a wide collection of data processors that can be applied to a Sentinel data product. Each data processor is called a GPF operator and it can be invoked in the desktop version of SNAP, in Python with the snappy module, or directly in the Windows/Linux command-line.
Step3: It is then possible to construct an empty HasMap object and include the selected parameters with their values in it. In this simple case only the resolution parameter will be set.
Step4: So the resolution in this specific case will be 20 meters per pixel. After the parameter definition it is possible to invoke the resampling operator using a syntax that is the same for all the GPF operators
Step5: The output variable is a new data product and all its bands are now represented with the same resolution. As a test to confirm the successful operation it is possible to look again at band 4 and 5 to see what happened with them
|
<ASSISTANT_TASK:>
Python Code:
import snappy
from snappy import ProductIO
file_path = 'C:\Program Files\snap\S2A_MSIL1C_20170202T090201_N0204_R007_T35SNA_20170202T090155.SAFE\MTD_MSIL1C.xml'
product = ProductIO.readProduct(file_path)
list(product.getBandNames())
B4 = product.getBand('B4')
B5 = product.getBand('B5')
Width_4 = B4.getRasterWidth()
Height_4 = B4.getRasterHeight()
print("Band 4 Size: " + str(Width_4) +','+ str(Height_4))
Width_5 = B5.getRasterWidth()
Height_5 = B5.getRasterHeight()
print("Band 5 Size: " + str(Width_5) +','+ str(Height_5))
from snappy import jpy
HashMap = snappy.jpy.get_type('java.util.HashMap')
parameters = HashMap()
parameters.put('targetResolution',20)
result = snappy.GPF.createProduct('Resample',parameters,product)
B4 = result.getBand('B4')
B5 = result.getBand('B5')
Width_4 = B4.getRasterWidth()
Height_4 = B4.getRasterHeight()
print("Band 4 Size: " + str(Width_4) +','+ str(Height_4))
Width_5 = B5.getRasterWidth()
Height_5 = B5.getRasterHeight()
print("Band 5 Size: " + str(Width_5) +','+ str(Height_5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's get started with some basic imports
Step2: If running in IPython notebooks, you may see a "ShimWarning" depending on the version of Jupyter you are using - this is safe to ignore.
Step3: All of these arguments are optional and will default to clevel='WARNING' if not provided. There is therefore no need to provide a filename if you don't provide a value for flevel.
Step4: This object holds all the parameters and their respective values. We'll see in this tutorial and the next tutorial on constraints how to search through these parameters and set their values.
Step5: Next, we need to define our datasets via b.add_dataset. This will be the topic of the following tutorial on datasets. Datasets store observations to compare against the model, but also tell PHOEBE at what times to compute the forward model and store passband-dependent options.
Step6: We'll then want to run our forward model to create a synthetic model of the observables defined by these datasets using b.run_compute, which will be the topic of the computing observables tutorial. The compute options tell PHOEBE how to create a synthetic model from the system parameters for the added datasets.
Step7: We can access the value of any parameter, including the arrays in the synthetic model just generated. To export arrays to a file, we could call b.export_arrays
Step8: We can then plot the resulting model with b.plot, which will be covered in the plotting tutorial.
Step9: And then lastly, if we wanted to solve the inverse problem and "fit" parameters to observational data, we may want to add distributions to our system so that we can run estimators, optimizers, or samplers.
Step10: The Bundle is just a collection of Parameter objects along with some callable methods. Here we can see that the default binary Bundle consists of over 100 individual parameters.
Step11: If we want to view or edit a Parameter in the Bundle, we first need to know how to access it. Each Parameter object has a number of tags which can be used to filter (similar to a database query). When filtering the Bundle, a ParameterSet is returned - this is essentially just a subset of the Parameters in the Bundle and can be further filtered until eventually accessing a single Parameter.
Step12: Here we filtered on the context tag for all Parameters with context='compute' (i.e. the options for computing a model). If we want to see all the available options for this tag in the Bundle, we can use the plural form of the tag as a property on the Bundle or any ParameterSet.
Step13: Although there is no strict hierarchy or order to the tags, it can be helpful to think of the context tag as the top-level tag and is often very helpful to filter by the appropriate context first.
Step14: This then tells us what can be used to filter further.
Step15: The qualifier tag is the shorthand name of the Parameter itself. If you don't know what you're looking for, it is often useful to list all the qualifiers of the Bundle or a given ParameterSet.
Step16: Now that we know the options for the qualifier within this filter, we can choose to filter on one of those. Let's look filter by the 'ntriangles' qualifier.
Step17: Once we filter far enough to get to a single Parameter, we can use get_parameter to return the Parameter object itself (instead of a ParameterSet).
Step18: As a shortcut, get_parameter also takes filtering keywords. So the above line is also equivalent to the following
Step19: Each Parameter object contains several keys that provide information about that Parameter. The keys "description" and "value" are always included, with additional keys available depending on the type of Parameter.
Step20: We can also see a top-level view of the filtered parameters and descriptions (note
Step21: Since the Parameter for ntriangles is a FloatParameter, it also includes a key for the allowable limits.
Step22: In this case, we're looking at the Parameter called ntriangles with the component tag set to 'primary'. This Parameter therefore defines how many triangles should be created when creating the mesh for the star named 'primary'. By default, this is set to 1500 triangles, with allowable values above 100.
Step23: If we choose the distortion_method qualifier from that same ParameterSet, we'll see that it has a few different keys in addition to description and value.
Step24: Since the distortion_method Parameter is a ChoiceParameter, it contains a key for the allowable choices.
Step25: We can only set a value if it is contained within this list - if you attempt to set a non-valid value, an error will be raised.
Step26: Parameter types include
Step27: However, this dictionary-style twig access will never return a ParameterSet with a single Parameter, instead it will return the Parameter itself. This can be seen in the different output between the following two lines
Step28: Because of this, this dictionary-style twig access can also set the value directly
Step29: And can even provide direct access to the keys/attributes of the Parameter (value, description, limits, etc)
Step30: As with the tags, you can call .twigs on any ParameterSet to see the "smallest unique twigs" of the contained Parameters
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
logger = phoebe.logger(clevel='WARNING')
b = phoebe.default_binary()
b.set_value(qualifier='teff', component='primary', value=6500)
b.add_dataset('lc', compute_times=phoebe.linspace(0,1,101))
b.run_compute()
print(b.get_value(qualifier='fluxes', context='model'))
afig, mplfig = b.plot(show=True)
b = phoebe.default_binary()
b
b.filter(context='compute')
b.contexts
b.filter(context='compute').components
b.filter(context='compute').filter(component='primary')
b.filter(context='compute', component='primary').qualifiers
b.filter(context='compute', component='primary', qualifier='ntriangles')
b.filter(context='compute', component='primary', qualifier='ntriangles').get_parameter()
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_value()
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_description()
print(b.filter(context='compute', component='primary').info)
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_limits()
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').set_value(2000)
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
b.get_parameter(context='compute', component='primary', qualifier='distortion_method')
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_description()
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_choices()
try:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('blah')
except Exception as e:
print(e)
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('rotstar')
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
b.filter(context='compute', component='primary')
b['primary@compute']
b['compute@primary']
b.filter(context='compute', component='primary', qualifier='distortion_method')
b['distortion_method@primary@compute']
b['distortion_method@primary@compute'] = 'roche'
print(b['distortion_method@primary@compute'])
print(b['value@distortion_method@primary@compute'])
print(b['description@distortion_method@primary@compute'])
b['compute'].twigs
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Root Search
Step2: This notebook demonstrates the use of low level Tensorflow Quant Finance tools for root finding using Brent's method with emphasis on the following aspects
Step3: Brent's Method
Step4: Speedup from XLA
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title Upgrade to TensorFlow 2.5+
!pip install --upgrade tensorflow
#@title Install TF Quant Finance
!pip install tf-quant-finance
#@title Imports { display-mode: "form" }
import tensorflow as tf
# tff for Tensorflow Finance
import tf_quant_finance as tff
root_search = tff.math.root_search
import warnings
warnings.filterwarnings("ignore",
category=FutureWarning) # suppress printing warnings
!nvidia-smi
#@title Search Range
number_of_tenors = 100 #@param
swap_value = 0.5 #@param
dtype = tf.float64
tenors = range(1, number_of_tenors + 1)
t = tf.constant(tenors, dtype=dtype)
v = tf.constant(swap_value, dtype=dtype)
def valuation_fn(x):
return tf.reduce_logsumexp(-x * t) - v
# Wrap with TF function for better performance
root_search_tf = tf.function(root_search.brentq)
def run_on_device(device):
with tf.device(device):
return root_search_tf(
valuation_fn, tf.constant(0, dtype=dtype), tf.constant(1, dtype=dtype))
## TFF on CPU
brent_result = run_on_device('/cpu:0')
estimated_root, objective_at_root, num_iterations, converged = brent_result
print("------------------------")
print("Tensorflow CPU (with auto-threading)")
print("Converged:", converged)
print("Estimated root:", estimated_root)
print("Objective at root:", objective_at_root)
print("Number of search steps:", num_iterations)
print("Timing:")
%timeit -n 100 run_on_device('/cpu:0')
print("------------------------")
## TFF on GPU
brent_result = run_on_device('/gpu:0')
estimated_root, objective_at_root, num_iterations, converged = brent_result
print("------------------------")
print("Tensorflow GPU")
print("Converged:", converged)
print("Estimated root:", estimated_root)
print("Objective at root:", objective_at_root)
print("Number of search steps:", num_iterations)
print("Timing:")
%timeit -n 100 run_on_device('/gpu:0')
print("------------------------")
#@title Search Range
number_of_tenors = 100 #@param
swap_value = 0.5 #@param
dtype = tf.float64
tenors = range(1, number_of_tenors + 1)
tf.compat.v1.reset_default_graph()
t = tf.constant(tenors, dtype=dtype)
v = tf.constant(swap_value, dtype=dtype)
def valuation_fn(x):
return tf.reduce_logsumexp(-x * t) - v
# Wrap with TF function for better performance
root_search_xla = tf.function(root_search.brentq, jit_compile=True)
def run_on_device(device):
with tf.device(device):
return root_search_xla(
valuation_fn, tf.constant(0, dtype=dtype), tf.constant(1, dtype=dtype))
## TFF on CPU compiled with XLA
brent_result = run_on_device('/cpu:0')
estimated_root, objective_at_root, num_iterations, converged = brent_result
print("------------------------")
print("Tensorflow CPU (compiled with XLA)")
print("Converged:", converged)
print("Estimated root:", estimated_root)
print("Objective at root:", objective_at_root)
print("Number of search steps:", num_iterations)
print("Timing:")
%timeit -n 100 brent_result = run_on_device('/cpu:0')
print("------------------------")
## TFF on GPU compiled with XLA
brent_result = run_on_device('/gpu:0')
estimated_root, objective_at_root, num_iterations, converged = brent_result
print("------------------------")
print("Tensorflow GPU (compiled with XLA)")
print("Converged:", converged)
print("Estimated root:", estimated_root)
print("Objective at root:", objective_at_root)
print("Number of search steps:", num_iterations)
print("Timing:")
%timeit -n 100 brent_result = run_on_device('/gpu:0')
print("------------------------")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now I am going to explode the and see what it does...
Step2: The next operation explores the edges of the concept
Step3: Expansion
Step4: Lets Start over with Python shall we? Yes we shall...
|
<ASSISTANT_TASK:>
Python Code:
py = ConceptModel(['Python'])
py.concepts()
py.explode()
len(py.concepts())
py.edges()
database = ConceptModel(['Database'])
database.explode(limit=2000, level=1)
len(database.concepts())
database.edges()[:20]
py.edges()[:20]
py.augment('Standard Library')
len(py.concepts())
py.neighborhood('Standard Library')
py.abridge('Standard Library')
len(py.concepts())
py.abridge('Python')
len(py.concepts())
py.abridge('Database')
len(py.concepts())
py.remove()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unsere Imports
Step2: Zur Dokumentation von requestst geht es hier lang.
Step3: Wieviele hat es letzte Nacht von 22 bis 6 Uhr morgens gegeben? Dafรผr mรผssen wir in der Dokumentation Time nachfragen.
Step4: Wegen Zeitverschiebung Zeit anpassen.
Step5: Grosse, komibinierte Abfrage
Step6: Alle Erbeben mit den ungefรคhren Koordinaten von Mexiko. Hier kรถnnte man die ungefรคhr nachschlagen.
Step7: Dieser Wulst lรคsst sich aber ziemlich schwer lesen. Deshalb mรผssen wir das zuerst umformatieren und dann verstehen, wie das ganze strukturiert ist.
Step8: Um sicher zu gehen, prรผfen wir nochmals den Datentyp.
Step9: Schauen wir uns alle keys und values an
Step10: Features interessiert uns.
Step11: Schauen wir uns das erste Element an
Step12: Lesen wir folgendes aus
Step13: Dealing with Unix time
Step14: Erste Erfahrung mit Pandas
Step15: Speichern wir das ab.
|
<ASSISTANT_TASK:>
Python Code:
!pip install requests
!pip install pandas
import requests
import pandas as pd
import calendar, datetime, time
url = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-09-20&endtime=2017-09-21'
response = requests.get(url)
response
response.text
url = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-09-20T22:00:00&endtime=2017-09-21T06:00:00'#Funktion bis ? Dann Parameter mit & verbunden
url = 'https://earthquake.usgs.gov/fdsnws/event/1/count?starttime=2017-09-20T24:00:00&endtime=2017-09-21T08:00:00'
response = requests.get(url)
response.text
url1 = 'https://earthquake.usgs.gov/fdsnws/event/1/query?format=geojson'#query heisst wir suchen bestimmte erdbeben
urlzeit = '&starttime=2017-09-17T24:00:00&endtime=2017-09-21T08:00:00'
response = requests.get(url1+urlzeit)#Ich hรคnge die beiden Variablen zusammen
response.text#Alle Erdbeben auf der ganzen Welt in diesem Zeitraum
urlloc = '&minlatitude=13&maxlatitude=33&minlongitude=-120&maxlongitude=-85'
response = requests.get(url1+urlzeit+urlloc)
response.text
response.json()#json strukturiert die daten. Ich sehe es ist ein dictionyry von zwei listen, die wiederum dictionaries enthรคlt mit keys und values - wo beginnt sie; es nur zwei keys: bbox und features. features ist interessanter
dct = response.json()
type(dct)
for key, value in dct.items() :# mit .items angehรคngt an dictionnary-name druckt value und keys aus
print (key, value)
type(dct['features'][0])#type zeigt mir den Datentyp; mit (0) sage ich, dass er mir erstes der Features
len(dct['features'])
for key in dct['features'][0]:#so frage ich die Keys im Value Feature ab
print(key)
dct['features'][0]#so frage ich das erste element von features ab
earthquakes = []#Leere Liste kreieren
for elem in dct['features']:
t = elem['properties']['type']#in den properties (die ich ja vorhin als element gefunden habe) will ich
m = elem['properties']['mag']
p = elem['properties']['place']
tm = elem['properties']['time']
mini_dict = {'Type': t,
'Mag': m,
'Place': p,
'Time': tm}#Hier baue ich mein eigenes Dictionary auf; die Begriffe kann ich selbst wรคhlen
earthquakes.append(mini_dict)
earthquakes
earthquakes = []
for elem in dct['features']:
t = elem['properties']['type']
m = elem['properties']['mag']
p = elem['properties']['place']
#Ints bearbeiten, die letzten drei Stellen abschneiden (weil der Converter von Unixtime in Normalzeit die Angabe nur lesen kann, wenn die letzten drei Zahlen weg sind); ich muss zahl zuerst in string verwandeln, weil man nur dort die drei letzten zeichen abschneiden kann; dann verwandle ich es wieder in eine zahl zurรผck und mit dt
tm = dt(int(str(elem['properties']['time'])[:-3]))
mini_dict = {'Type': t,
'Mag': m,
'Place': p,
'Time': tm}
earthquakes.append(mini_dict)
earthquakes
pd.DataFrame(earthquakes)#es muss aber eine dictionary-Liste sein; kann auch nur eine Liste sein, dann gibt es nur eine Spalte
df = pd.DataFrame(earthquakes)
df.sort_values(by='Mag', ascending=False)#startet von 'Mag'; default Einstellung: beginnt beim kleinsten, wenn ich beim stรคrksten beginnen will, muss ich ascending=false eingeben
df.to_csv('erdbeben.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from cs231n.features import color_histogram_hsv, hog_feature
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
from cs231n.features import *
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation. This ensures that each feature
# has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones((X_train_feats.shape[0], 1))])
X_val_feats = np.hstack([X_val_feats, np.ones((X_val_feats.shape[0], 1))])
X_test_feats = np.hstack([X_test_feats, np.ones((X_test_feats.shape[0], 1))])
# Use the validation set to tune the learning rate and regularization strength
from cs231n.classifiers.linear_classifier import LinearSVM
learning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [1e5, 1e6, 1e7]
results = {}
best_val = -1
best_svm = None
pass
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained classifer in best_svm. You might also want to play #
# with different numbers of bins in the color histogram. If you are careful #
# you should be able to get accuracy of near 0.44 on the validation set. #
################################################################################
for lr in learning_rates:
for reg in regularization_strengths:
svm = LinearSVM()
loss_hist = svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg,
num_iters=1500, verbose=True)
training_accuracy = np.mean(svm.predict(X_train_feats) == y_train)
validation_accuracy = np.mean(svm.predict(X_val_feats) == y_val)
if best_val < validation_accuracy:
best_val = validation_accuracy
best_svm = svm
results[(lr, reg)] = (training_accuracy, validation_accuracy)
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Evaluate your trained SVM on the test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print test_accuracy
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print X_train_feats.shape
from cs231n.classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 50
num_classes = 10
net = TwoLayerNet(input_dim, hidden_dim, num_classes)
best_net = None
################################################################################
# TODO: Train a two-layer neural network on image features. You may want to #
# cross-validate various parameters as in previous sections. Store your best #
# model in the best_net variable. #
################################################################################
## Identical to visualization code above
def visualize(stats):
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
## Train the network
stats = net.train(X_train_feats, y_train, X_val_feats, y_val,
num_iters=15000, batch_size=200,
learning_rate=1e-3, learning_rate_decay=0.91,
reg=0.028, verbose=True)
best_net = net
## Best accuracy on the validation set
stats['best_val_acc'] = max(stats['val_acc_history'])
visualize(stats)
test_acc = np.mean(net.predict(X_test_feats) == y_test)
print 'Validation accuracy: ', stats['best_val_acc'], 'Test accuracy: ', test_acc
################################################################################
# END OF YOUR CODE #
################################################################################
# Run your neural net classifier on the test set. You should be able to
# get more than 55% accuracy.
test_acc = (net.predict(X_test_feats) == y_test).mean()
print test_acc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: OSM allows a very flexible tagging system, which gives user freedom but causing problem with consistency.
Step2: Below I list the top 20 tag keys sorted descending, as the whole list will be too exhaustive to read.
Step3: Looking through the data I see several things
Step4: We see for address there are three related tags
Step5: Address spelling problem
Step6: We still need to manually check and replace the street name, but it is a much simpler task.
Step7: There are 44 unique different names for city, after looking at the result, many of them are invalid.
|
<ASSISTANT_TASK:>
Python Code:
from osm_dataauditor import OSMDataAuditor
osm_data = OSMDataAuditor('jakarta_indonesia.osm')
# Basic element check
osm_data.count_element()
# Check the tag key and element
tag_keys = osm_data.get_tag_keys()
sorted(tag_keys, key=lambda x: x[1], reverse=True)[:20]
import re
# Name vs Nama (Nama is Indonesian for Name)
[item for item in tag_keys if re.match('(name|nama)$', item[0], re.I)]
# Province vs propinsi (Propinsi is Indonesian for Province)
[item for item in tag_keys if re.match('(province|propinsi)$', item[0], re.I)]
# Alamat vs address, (Alamat is Indonesian for address)
sorted([item for item in tag_keys if re.match('(addr|alamat)', item[0], re.I)], key=lambda x: x[1], reverse=True)
abbreviated_st = osm_data.audit_abbreviated_street_prefixes()
print "Total abbreviated street names:", len(abbreviated_st)
# Print the first 10 rows
abbreviated_st[:10]
abbreviated_alley = osm_data.audit_abbreviated_alley_prefixes()
print "Total abbreviated alley names:", len(abbreviated_alley)
# Print the first 10 rows
abbreviated_alley[:10]
closely_matched = osm_data.audit_address_similar_names()
closely_matched = sorted(list(closely_matched), key=lambda x: x[2], reverse=True)
print "Total similiar item found: ", len(closely_matched)
# Display top 10 with score
[(reference, found, score) for reference, found, score in closely_matched[:10]]
city_names = osm_data.audit_city()
print "Different city names: ", len(city_names)
from pymongo import MongoClient
client = MongoClient()
db = client['osm_data_import']
# Number of document
db.jakarta.find().count()
# Number of nodes
db.jakarta.find({'type': 'node'}).count()
# Number of way
db.jakarta.find({'type': 'way'}).count()
# Number of unique user
len(db.jakarta.distinct('created.user'))
# Top 10 contributing user
list(db.jakarta.aggregate([{'$group': {'_id': '$created.user', 'count': {'$sum': 1}}}, {'$sort':{'count':-1}}, {'$limit':10}]))
# Place of worship breakdown
list(db.jakarta.aggregate([
{"$match":{"amenity":{"$exists":1}, "amenity":"place_of_worship"}},
{"$group":{"_id":"$religion", "count":{"$sum":1}}},
{"$sort":{"count":-1}}
]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature Evaluation Pipeline
Step2: The function above does a few things. First, it downsamples the negative values. This was reported by a number of top-50 people on the forums, and gold medal winners in previous competitions have used this technique as well. The idea being that fraud is unique and it shouldn't matter much which non-frauds we train against, since the frauds are a distinct class and should have some level of 'difference' about them. Think outliers. This would also be a good time to mention that when we train for submission, we can add diversity to our models by emsembling various batches trained with different samples non-fraud values =).
Step3: Do It
Step4: If we were to look at regular gain or splits, the V columns would dominate the show. But here we accurately see they really aren't that important in the grand scheme of things. Notice how some of our variables have a very high CVS score, especially the high cardinality categorical variables, like card2 for example. While the unique values present for card2 in train and test are very similar, their distributions shift a bit
Step5: Not much we can do there. I'm not advocating removing card2 as a variable; but I am advocating we use these results to draw our attention to possible issues. So for example, the appropriate thing to do here would be to attempt the removal of card2 and observe how it affects the model's mean and std AUC. That is the ultimate 'measure'.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from time import time
import datetime
import lightgbm as lgb
import gc, warnings, json
import seaborn as sns
from sklearn.metrics import precision_score, recall_score, confusion_matrix, accuracy_score
from sklearn.metrics import roc_auc_score, f1_score, roc_curve, auc,precision_recall_curve
from scipy import interp
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
traintr = pd.read_csv('input/train_transaction.csv.zip')
trainid = pd.read_csv('input/train_identity.csv.zip')
testtr = pd.read_csv('input/test_transaction.csv.zip')
testid = pd.read_csv('input/test_identity.csv.zip')
# For each categorical variable, we'd like to experiment with
# the count of appearances within that day's hour
# This will only work if the distributions (counts) are similar in train + test
START_DATE = '2017-11-30'
startdate = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
traintr['tdt'] = traintr['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x)))
traintr['thour'] = traintr.tdt.dt.hour
traintr['tmonth'] = (traintr.tdt.dt.year-2017) * 12 + traintr.tdt.dt.month
traintr['tweek'] = 52 * (traintr.tdt.dt.year-2017) + traintr.tdt.dt.weekofyear
traintr['tdoy'] = 365 * (traintr.tdt.dt.year-2017) + traintr.tdt.dt.dayofyear
traintr.tdoy -= traintr.tdoy.min() # 0-offset
testtr['tdt'] = testtr['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x)))
testtr['thour'] = testtr.tdt.dt.hour
testtr['tmonth'] = (testtr.tdt.dt.year-2017) * 12 + testtr.tdt.dt.month
testtr['tweek'] = 52 * (testtr.tdt.dt.year-2017) + testtr.tdt.dt.weekofyear
testtr['tdoy'] = 365 * (testtr.tdt.dt.year-2017) + testtr.tdt.dt.dayofyear
traintr.tdt.min()
traintr.groupby('tmonth').size()
traintr.tdoy.min(),traintr.tdoy.max()
SEED = 2**8+1
# TODO: Optimize
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric':'auc',
'n_jobs':-1,
'learning_rate':0.01,
'num_leaves': 2**5, # 5-8
'max_depth':-1,
'tree_learner':'serial',
'colsample_bytree': 0.7,
'subsample_freq':1,
'subsample':0.7,
'max_bin':255,
'verbose':-1,
'seed': SEED,
'feature_fraction_seed': SEED + 2,
'bagging_seed': SEED + 3,
'drop_seed': SEED + 4,
'data_random_seed': SEED + 5,
}
def display_report(report):
print('{} Folds Used'.format(len(report['folds'])))
print('{} Neg DownSample Frac with {} Seed'.format(report['downsample_frac'], report['downsample_seed']))
print('{} AVG AUC, {} STD'.format(np.round(report['avg_auc'],3), np.round(report['std_auc'],3)))
print('{} AVG Rounds, {} Rounds'.format(report['avg_iterations'], report['std_iterations']), end='\n\n')
features = pd.DataFrame({
'feature': report['features'],
'adversarial': list(report['cvs'].values()),
'perm_import': list(report['avg_permutation_importance'].values()),
'perm_import_std': list(report['std_permutation_importance'].values()),
})
features.sort_values(['perm_import','adversarial'], ascending=False, inplace=True)
sns_df = pd.DataFrame({
'feature' : sum([list(fold['permutation_importance'].keys()) for fold in results['folds']], []),
'perm_import': sum([list(fold['permutation_importance'].values()) for fold in results['folds']], []),
})
sns_df.sort_values(['feature','perm_import'], ascending=False, inplace=True)
print(report['params'])
return features, sns_df
def compare_reports(report1, report2):
pass
def run_evaluation(data, features, params, downsample_seed=None, downsample_frac=0.2, save_file_path=None):
# NOTE: data should contain, at minimal, all train + test samples,
# along with the isFraud column, for separation and scoring purposes.
gc.collect()
# Run evaluation and store results in a report
# Steps:
# 1) [x] Negative Downsample non-frauds
# 2) [x] Run adversarial validation on features + record scores
# 3) [x] Train on 50% overlapping folds on the trainset
# 3b) [x] Perform permutation importance (soon to be drop importance) each fold
# 4) [x] Aggregate and save results
report = {
'features': features,
'params': params,
'downsample_seed': downsample_seed,
'downsample_frac': downsample_frac,
'cvs': {},
'folds': [],
'avg_permutation_importance': {},
'std_permutation_importance': {},
}
######################
print('\n# 1) [x] Negative Downsample (non-frauds)')
if downsample_seed is None:
selection = data.copy()
else:
np.random.seed(downsample_seed)
normies = data[data.isFraud==0].index.values
normies = np.random.choice(
normies,
int(data.shape[0]*downsample_frac),
replace=False
)
selection = data[data.index.isin(
# All fruds and a number of normies
np.concatenate([normies, data[data.isFraud==1].index.values])
)].copy()
print(selection.shape[0], 'total train samples!')
if selection.shape[0] > data.isFraud.isna().sum():
# If we have more train samples than test samples, use all test samples
selection_test = data[data.isFraud.isna()]
else:
# Use a balanced set of test samples
selection_test = np.random.choice(
data[data.isFraud.isna()].index.values,
selection.shape[0],
replace=False
)
selection_test = data[data.index.isin(selection_test)]
######################
print('\n# 2) [x] Run adversarial validation (CVS) on features + record scores')
# Build CVS dataset
cvsdata = selection.append(selection_test, sort=False)
cvsdata.reset_index(inplace=True)
cvsdata['which_set'] = (np.arange(cvsdata.shape[0]) >= selection.shape[0]).astype(np.uint8)
cvsdata = cvsdata.sample(frac=1).reset_index(drop=True) # Shuffle the thing
trn_cvs = cvsdata.index < (cvsdata.shape[0] // 2)
for col in features:
trn_lgb = lgb.Dataset(cvsdata[trn_cvs][[col]], label=cvsdata[trn_cvs].which_set)
val_lgb = lgb.Dataset(cvsdata[~trn_cvs][[col]], label=cvsdata[~trn_cvs].which_set)
clf = lgb.train(
params,
trn_lgb,
valid_sets = [trn_lgb, val_lgb],
verbose_eval = 200,
early_stopping_rounds = 25,
num_boost_round = 80000,
)
report['cvs'][col] = clf.best_score['valid_1']['auc'] - 0.5 # 0.5 = 0, best score
del cvsdata, trn_lgb, val_lgb, trn_cvs; gc.collect()
######################
print('\n#3) [x] Train on 50% overlapping folds on the trainset')
for fold_, i in enumerate(range(0,57,14)):
gc.collect()
fold = {
'fold_num': fold_,
'trn_range': [i,i+90],
'val_range': [i+90+15,i+90+15+20],
}
print('\nFold', fold_+1, 'โ Train', fold['trn_range'], 'โ Test', fold['val_range'])
trn = selection[selection.tdoy.between(i, 90+i)]
val = selection[selection.tdoy.between(90+i+15, 90+i+15+20)].copy()
trn_lgb = lgb.Dataset(trn[features], label=trn.isFraud)
val_lgb = lgb.Dataset(val[features], label=val.isFraud)
clf = lgb.train(
params,
trn_lgb,
valid_sets = [trn_lgb, val_lgb],
verbose_eval = 200,
early_stopping_rounds = 25,
num_boost_round = 80000,
#categorical_feature=[]
)
baseline = clf.best_score['valid_1']['auc']
fold['auc'] = baseline
fold['iterations'] = clf.best_iteration
print('baseline - ', baseline)
######################
# TODO: Repalce with Drop importance
print('\n#3b) [x] Perform permutation importance (soon to be drop importance) each fold')
perm = {}
for col in features:
backup = val[col].values.copy()
val[col] = np.random.permutation(val[col].values)
y_true = clf.predict(val[features])
perm[col] = baseline - roc_auc_score(val.isFraud, y_true)
val[col] = backup
fold['permutation_importance'] = perm
report['folds'].append(fold)
######################
print('\n# 4) [x] Aggregate and save results')
aucs = [fold['auc'] for fold in report['folds']]
report['avg_auc'] = np.mean(aucs)
report['std_auc'] = np.std(aucs)
iterations = [fold['iterations'] for fold in report['folds']]
report['avg_iterations'] = np.mean(iterations)
report['std_iterations'] = np.std(iterations)
for feature in features:
pi = [fold['permutation_importance'][feature] for fold in report['folds']]
report['avg_permutation_importance'][feature] = np.mean(pi)
report['std_permutation_importance'][feature] = np.std(pi)
if save_file_path is not None:
with open(save_file_path, 'w', encoding='utf-8') as f:
json.dump(report, f, ensure_ascii=False, indent=4)
gc.collect()
return report
data = traintr.append(testtr, sort=False)
data.reset_index(inplace=True)
features = [
'TransactionAmt',
'ProductCD',
'card1',
'card2',
'card3',
'card4',
'card5',
'card6',
'addr1',
'addr2',
'dist1',
'dist2',
'P_emaildomain',
'R_emaildomain',
'D3',
'D1',
'V286',
'V100',
'thour',
]
# LE:
for col in features:
if data[col].dtype!='O': continue
print('Found str', col, '... encoding!')
mapper = {key:val for val,key in enumerate(data[col].unique())}
data[col] = data[col].map(mapper)
results = run_evaluation(
data,
features,
lgb_params,
downsample_seed=1773,
downsample_frac=0.2,
save_file_path='./report_test.json' # persist the results to a file
)
results
report, sns_features = display_report(results)
plt.figure(figsize=(16, 16))
sns.barplot(x="perm_import", y="feature", data=sns_features, edgecolor=('white'), linewidth=2)#, palette="rocket")
plt.title('Permutation Importance (averaged/folds)', fontsize=18)
plt.tight_layout()
report
a = set(traintr.card2.unique())
b = set(testtr.card2.unique())
len(a-b), len(b-a)
traintr.card2.value_counts().head(15)
testtr.card2.value_counts().head(15)
def submission(num_boost_rounds=0):
# We train using fixed num_boost_rounds
train_groups = []
for month_start in range(4):
# using 3x dif seeds each
months = [12 + month_start, 12 + month_start + 1, 12 + month_start + 2]
train_groups.append(months)
# Then using double num_boost_rounds
train_groups += [[12,13,14,15,16,17]] #dif seed 2x
train_groups += [[12,13,14,15,16,17]] #dif seed 2x
return train_groups
submission()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: That command downloads the file and because of the redirection operator, >, the output gets written to tsla.pdf up in /tmp directory.
Step2: (Don't worry about those warnings.)
Step3: Once you have text output, you can perform whatever analysis you'd like without having to worry about the data coming in PDF form. For example, you might want to run some analysis on financial documents but they are all in PDF. First, convert to text and then perform your analysis.
Step4: Text processing from the command line
Step5: Exercise
Step6: Exercise
Step7: Exercise
Step8: Exercise
|
<ASSISTANT_TASK:>
Python Code:
! curl https://www.tesla.com/sites/default/files/tesla-model-s.pdf > /tmp/tsla.pdf
! pdftotext /tmp/tsla.pdf # saves into /tmp/tsla.txt
! head -10 /tmp/tsla.txt
with open('/tmp/tsla.txt') as f:
print(f.read().split()[:100])
! tr -s '\n' ' ' < /tmp/tsla.txt | head -c 200
! tr -s '\n' ' ' < /tmp/tsla.txt | fold -s | head -10
! tr -s '\n' ' ' < /tmp/tsla.txt | fold -s | nl | head -10
! tr 'A-Z' 'a-z' < /tmp/tsla.txt | head -c 150
! tr -s '\n' ' ' < /tmp/tsla.txt | tr 'A-Z' 'a-z' | head -c 150
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'miroc-es2h', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Your task starts here
Step4: The loss you should try to minimize is the Hinge Loss
Step5: Training
Step6: Implement gradient descent with momentum and test it's performance for different learning rate and momentum values.
Step7: Same task but for Nesterov's accelerated gradient
Step8: Same task but for AdaGrad
Step9: Same task but for AdaDelta
Step10: Same task for Adam algorithm. You can start with beta = 0.9 and mu = 0.999
|
<ASSISTANT_TASK:>
Python Code:
def expand(X):
X_ = tf.zeros((X.shape[0], 6))
X0 = tf.transpose(tf.gather(tf.transpose(X), [0]))
X1 = tf.transpose(tf.gather(tf.transpose(X), [1]))
X_ = tf.concat([X, X ** 2, X0 * X1, tf.ones(shape=(X.shape[0], 1))], axis=1)
return X_
def classify(X, w):
Given feature matrix X [n_samples,2] and weight vector w [6],
return an array of +1 or -1 predictions
pass
def compute_loss(X, y, w):
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute scalar loss function using formula above.
pass
def compute_grad(X, y, w):
Given feature matrix X [n_samples,2], target vector [n_samples] of +1/-1,
and weight vector w [6], compute vector [6] of derivatives of L over each weights.
pass
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
mu = 0 # momentum
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
mu = 0 # momentum
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
mu = 0 # momentum
eps = 1e-8 # A small constant for numerical stability
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
beta = 0
mu = 0 # momentum
eps = 1e-8 # A small constant for numerical stability
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
w = np.array([1,0,0,0,0,0], dtype=tf.float32)
alpha = 0 # learning rate
beta = 0 # (beta1 coefficient in original paper) exponential decay rate for the 1st moment estimates
mu = 0 # (beta2 coefficient in original paper) exponential decay rate for the 2nd moment estimates
eps = 1e-8 # A small constant for numerical stability
n_iter = 50
batch_size = 4
loss = np.zeros(n_iter)
plt.figure(figsize=(12,5))
with tf.Session() as sess:
for i in range(n_iter):
ind = random.sample(range(X.shape[0]), batch_size)
loss[i] sess.run(#
visualize(X[ind,:], y[ind], w, loss, n_iter)
w = sess.run(#)
visualize(X, y, w, loss, n_iter)
plt.clf()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ๋ฐ์ดํฐ ์์ฑ
Step2: ๋ ๊ฐ์ ๋ฆฌ์คํธ๋ฅผ ํฉํ์ฌ ์ด๋ฆ๊ณผ ์ซ์๋ฅผ ์์ผ๋ก ๋ฌถ๊ธฐ ์ํด์ zip ํจ์๋ฅผ ์ด์ฉํ๋ค.
Step3: zip ๊ฐ์ฒด๋ฅผ ๋ฆฌ์คํธ ์๋ฃํ์ผ๋ก ํ๋ณํ์ ํ๋ฉด ์๋ค์ ๋ฆฌ์คํธ๋ก ํ์ฉํ ์ ์์ผ๋ฉฐ, ์ฌ๊ธฐ์๋
Step4: ์ฃผ์
Step5: df์ ์ ์ฅ๋ ๋ฐ์ดํฐ ํ๋ ์์ ํ์ธํ๋ฉด ๋ค์๊ณผ ๊ฐ๋ค.
Step6: ์ด ๋ฐ์ดํฐ ํ๋ ์์ births 1880.csv๋ผ๋ ์ด๋ฆ์ csv ํ์ผ๋ก ์ ์ฅํด๋ณด์.
Step7: ๋ฐ์ดํฐ ํธ์ถ
Step8: df๋ฅผ ํ์ธํ๋ฉด ๊ธฐ์กด์ ๋ฐ์ดํฐ์ ๋น์ทํ๊ฒ ๋ณด์ธ๋ค.
Step9: ๋ฌธ์ ๊ฐ ํ๋ ์๋ค.
Step10: ์ด ํญ๋ชฉ์ผ๋ก ์ฌ์ฉ๋ ์ด๋ฆ๋ค์ ์ง์ ํ์ง ์์๊ธฐ ๋๋ฌธ์ 0, 1, 2 ๋ฑ์ ์์ธ์ ๊ธฐ๋ณธ๊ฐ์ผ๋ก ์ฌ์ฉํ์๋ค.
Step11: ํ ๋ฒํธ 0, 1, 2, 3, 4๋ ๋ฐ์ดํฐ ํ๋ ์ ๊ฐ์ฒด์ ๊ธฐ๋ณธ์ ์ผ๋ก ํฌํจ๋ ์์ธ(index) ๊ธฐ๋ฅ์ผ๋ก ์ฌ์ฉ๋๋ค.
Step12: ๋ฐ์ดํฐ ํด๋ฆฌ๋(cleaning)
Step13: ์ ๊ฒฐ๊ณผ๋ ์๋ ๋ด์ฉ์ ๋ด๊ณ ์๋ค.
Step14: ๋ฐ์ดํฐ ๋ถ์
Step15: ์ด์ ์ฒซ์งธ ํ์ ํ์ธํ๋ฉด ๋๋ค.
Step16: ๋ฐฉ๋ฒ 2
Step17: ๋ฐ์ดํฐ ์๊ฐํ
Step18: ๋ฐ๋ผ์ Births ์ด์ ์ป๊ณ ์ ํ๋ฉด ์๋์ ๊ฐ์ด ๋ช
๋ นํ๋ฉด ๋๋ค.
Step19: ์ด์ ์์ ์ต๋ ์ถ์์๋ฅผ ํ์ธํ๋ df['Births'].max()๋ฅผ ํ์ฉํ์ฌ
Step20: ์ ๊ฒฐ๊ณผ์ ์ํ๋ฉด 4๋ฒ ์์ธ ํ, ์ฆ, ๋ค์ฏ ๋ฒ์งธ ์ค์์๋ง ์์ ํ์ธํ ์ต๋ ์ถ์์ 973๋ช
์ด ์ฌ์ฉ๋์๋ค.
Step21: ์๊ฐํ ๊ทธ๋ํ์ ์ข ๋ ๋ค์ํ ์ ๋ณด๋ฅผ ์ ๊ณตํ ์ ๋ ์๋ค.
|
<ASSISTANT_TASK:>
Python Code:
# pandas ๋ชจ๋์์ DataFrame ํจ์์ read_csv ํจ์ ์ํฌํธ
from pandas import DataFrame, read_csv
# matplolib.pyplot ๋ชจ๋๊ณผ pandas ๋ชจ๋์ ๊ฐ๊ฐ plt์ pd๋ผ๋ ๋ณ์นญ์ผ๋ก ์ํฌํธ
import matplotlib.pyplot as plt
import pandas as pd
# ์ฅฌํผํฐ ๋
ธํธ๋ถ์์ ๊ทธ๋ํ๋ฅผ ์ง์ ๋ํ๋ด๊ธฐ ์ํด ์ฌ์ฉํ๋ ์ฝ๋
# ํ์ด์ฌ ์ ๋ฌธ ์๋ํฐ์์๋ ์ฌ์ฉํ์ง ์์
%matplotlib inline
# ์์ด ์ด๋ฆ๊ณผ ์ถ์์ ๊ณ ์ซ์ ๋ฆฌ์คํธ
names = ['Bob', 'Jessica', 'Mary', 'John', 'Mel']
births = [968, 155, 77, 578, 973]
for item in zip(names, births):
name, num = item
print(name, "์ด๋ฆ์ผ๋ก",num, "๋ช
์ด ์ ๊ณ ๋์๋ค.")
BabyDataSet = list(zip(names, births))
print(BabyDataSet)
df = pd.DataFrame(data = BabyDataSet, columns = ['Names', 'Births'])
df
df.to_csv('births1880.csv', index = False, header = False)
Location = 'births1880.csv'
df = pd.read_csv(Location)
df
df = pd.read_csv(Location, header=None)
df
df = pd.read_csv(Location, names=['Names','Births'])
df
import os
os.remove('births1880.csv')
df.dtypes
df.Births.dtype
Sorted = df.sort_values(['Births'], ascending=False)
Sorted.head(1)
df['Births'].max()
df['Births'].plot()
plt.xlabel("์๊ฐ")
plt.show()
df['Births']
df['Names'][df['Births'] == df['Births'].max()]
Sorted['Names'].head(1)
# ๊ทธ๋ํ ๋ง๋ค๊ธฐ
df['Births'].plot()
# ๋ฐ์ดํฐ์
์ ์๋ ์ต๋๊ฐ
MaxValue = df['Births'].max()
# ์ต๋๊ฐ์ ๊ฐ์ง ์ด๋ฆ ํ์ธ
MaxName = df['Names'][df['Births'] == df['Births'].max()].values
# ๊ทธ๋ํ ์์ ๋ณด์ฌ์ค ํ
์คํธ
Text = str(MaxValue) + " - " + MaxName
# ๊ทธ๋ํ์ ํ
์คํธ ์ถ๊ฐํ๊ธฐ
plt.annotate(Text, xy=(1, MaxValue), xytext=(8, 0),
xycoords=('axes fraction', 'data'), textcoords='offset points')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recurrent Neural Networks (RNN) with Keras
Step2: Built-in RNN layers
Step3: Built-in RNNs support a number of useful features
Step4: In addition, a RNN layer can return its final internal state(s). The returned states
Step5: RNN layers and RNN cells
Step6: RNN State Reuse
Step7: Bidirectional RNNs
Step8: Under the hood, Bidirectional will copy the RNN layer passed in, and flip the
Step9: Let's load the MNIST dataset
Step10: Let's create a model instance and train it.
Step11: Now, let's compare to a model that does not use the CuDNN kernel
Step12: When running on a machine with a NVIDIA GPU and CuDNN installed,
Step13: RNNs with list/dict inputs, or nested inputs
Step14: Build a RNN model with nested input/output
Step15: Train the model with randomly generated data
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential()
# Add an Embedding layer expecting input vocab of size 1000, and
# output embedding dimension of size 64.
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# Add a LSTM layer with 128 internal units.
model.add(layers.LSTM(128))
# Add a Dense layer with 10 units.
model.add(layers.Dense(10))
model.summary()
model = keras.Sequential()
model.add(layers.Embedding(input_dim=1000, output_dim=64))
# The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256)
model.add(layers.GRU(256, return_sequences=True))
# The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128)
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(10))
model.summary()
encoder_vocab = 1000
decoder_vocab = 2000
encoder_input = layers.Input(shape=(None,))
encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)(
encoder_input
)
# Return states in addition to output
output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")(
encoder_embedded
)
encoder_state = [state_h, state_c]
decoder_input = layers.Input(shape=(None,))
decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)(
decoder_input
)
# Pass the 2 states to a new LSTM layer, as initial state
decoder_output = layers.LSTM(64, name="decoder")(
decoder_embedded, initial_state=encoder_state
)
output = layers.Dense(10)(decoder_output)
model = keras.Model([encoder_input, decoder_input], output)
model.summary()
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
output = lstm_layer(paragraph3)
# reset_states() will reset the cached state to the original initial_state.
# If no initial_state was provided, zero-states will be used by default.
lstm_layer.reset_states()
paragraph1 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph2 = np.random.random((20, 10, 50)).astype(np.float32)
paragraph3 = np.random.random((20, 10, 50)).astype(np.float32)
lstm_layer = layers.LSTM(64, stateful=True)
output = lstm_layer(paragraph1)
output = lstm_layer(paragraph2)
existing_state = lstm_layer.states
new_lstm_layer = layers.LSTM(64)
new_output = new_lstm_layer(paragraph3, initial_state=existing_state)
model = keras.Sequential()
model.add(
layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10))
)
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(10))
model.summary()
batch_size = 64
# Each MNIST image batch is a tensor of shape (batch_size, 28, 28).
# Each input sequence will be of size (28, 28) (height is treated like time).
input_dim = 28
units = 64
output_size = 10 # labels are from 0 to 9
# Build the RNN model
def build_model(allow_cudnn_kernel=True):
# CuDNN is only available at the layer level, and not at the cell level.
# This means `LSTM(units)` will use the CuDNN kernel,
# while RNN(LSTMCell(units)) will run on non-CuDNN kernel.
if allow_cudnn_kernel:
# The LSTM layer with default options uses CuDNN.
lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim))
else:
# Wrapping a LSTMCell in a RNN layer will not use CuDNN.
lstm_layer = keras.layers.RNN(
keras.layers.LSTMCell(units), input_shape=(None, input_dim)
)
model = keras.models.Sequential(
[
lstm_layer,
keras.layers.BatchNormalization(),
keras.layers.Dense(output_size),
]
)
return model
mnist = keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
sample, sample_label = x_train[0], y_train[0]
model = build_model(allow_cudnn_kernel=True)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
noncudnn_model = build_model(allow_cudnn_kernel=False)
noncudnn_model.set_weights(model.get_weights())
noncudnn_model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
)
noncudnn_model.fit(
x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1
)
import matplotlib.pyplot as plt
with tf.device("CPU:0"):
cpu_model = build_model(allow_cudnn_kernel=True)
cpu_model.set_weights(model.get_weights())
result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1)
print(
"Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label)
)
plt.imshow(sample, cmap=plt.get_cmap("gray"))
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])]
super(NestedCell, self).__init__(**kwargs)
def build(self, input_shapes):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
i1 = input_shapes[0][1]
i2 = input_shapes[1][1]
i3 = input_shapes[1][2]
self.kernel_1 = self.add_weight(
shape=(i1, self.unit_1), initializer="uniform", name="kernel_1"
)
self.kernel_2_3 = self.add_weight(
shape=(i2, i3, self.unit_2, self.unit_3),
initializer="uniform",
name="kernel_2_3",
)
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
input_1, input_2 = tf.nest.flatten(inputs)
s1, s2 = states
output_1 = tf.matmul(input_1, self.kernel_1)
output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = (output_1, output_2_3)
new_states = (state_1, state_2_3)
return output, new_states
def get_config(self):
return {"unit_1": self.unit_1, "unit_2": unit_2, "unit_3": self.unit_3}
unit_1 = 10
unit_2 = 20
unit_3 = 30
i1 = 32
i2 = 64
i3 = 32
batch_size = 64
num_batches = 10
timestep = 50
cell = NestedCell(unit_1, unit_2, unit_3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((None, i1))
input_2 = keras.Input((None, i2, i3))
outputs = rnn((input_1, input_2))
model = keras.models.Model([input_1, input_2], outputs)
model.compile(optimizer="adam", loss="mse", metrics=["accuracy"])
input_1_data = np.random.random((batch_size * num_batches, timestep, i1))
input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3))
target_1_data = np.random.random((batch_size * num_batches, unit_1))
target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3))
input_data = [input_1_data, input_2_data]
target_data = [target_1_data, target_2_data]
model.fit(input_data, target_data, batch_size=batch_size)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: <h3> Create a simple machine learning model </h3>
Step4: <h3> What is wrong with calculating RMSE on the training and test data as follows? </h3>
Step6: Hint
Step8: <h2> Using HASH of date to split the data </h2>
Step10: We can now use the alpha to compute RMSE. Because the alpha value is repeatable, we don't need to worry that the alpha in the compute_rmse will be different from the alpha computed in the compute_alpha.
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import bigquery
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
(
SELECT
RAND() AS splitfield,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
)
WHERE
splitfield < 0.8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results["alpha"][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' )
GROUP BY
dataset
bigquery.Client().query(
compute_rmse.replace("ALPHA", str(alpha))
).to_dataframe()
train_and_eval_rand =
#standardSQL
WITH
alldata AS (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' ),
training AS (
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
alldata
WHERE
dataset = 'train' )
SELECT
MAX(alpha) AS alpha,
dataset,
SQRT(
AVG(
(arrival_delay - alpha * departure_delay) *
(arrival_delay - alpha * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
alldata,
training
GROUP BY
dataset
bigquery.Client().query(train_and_eval_rand).to_dataframe()
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
AND ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results["alpha"][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
IF(ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8, 'train', 'eval') AS dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
GROUP BY
dataset
print(
bigquery.Client()
.query(compute_rmse.replace("ALPHA", str(alpha)))
.to_dataframe()
.head()
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Exercise 2
Step3: Exercise 3
Step4: Exercise 3.1
Step5: Exercise 3.2
|
<ASSISTANT_TASK:>
Python Code:
birds = pd.read_csv('../../data/bird_tracking.csv')
birds.head()
# TODO
# TODO
import cartopy.crs as ccrs
plt.figure(figsize=(10,10))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
# TODO 1: comment in/out and see, what happens
# ax.set_extent((-25,20,52,10))
# TODO 2: draw the single tracks with title and legend as in Exercise 2
import cartopy.feature as cfeature
# TODO add your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.
Step2: We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).
Step3: Polynomial_sframe function
Step4: To test your function consider the smaller tmp variable and what you would expect the outcome of the following call
Step5: Visualizing polynomial regression
Step6: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
Step7: Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.
Step8: NOTE
Step9: Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'.
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
tmp = graphlab.SArray([1., 2., 3.])
tmp_cubed = tmp.apply(lambda x: x**3)
print tmp
print tmp_cubed
ex_sframe = graphlab.SFrame()
ex_sframe['power_1'] = tmp
print ex_sframe
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
return poly_sframe
print polynomial_sframe(tmp, 3)
sales = graphlab.SFrame('kc_house_data.gl/')
sales = sales.sort(['sqft_living', 'price'])
poly1_data = polynomial_sframe(sales['sqft_living'], 1)
poly1_data['price'] = sales['price'] # add price to the data since it's the target
model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)
#let's take a look at the weights before we plot
model1.get("coefficients")
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(poly1_data['power_1'],poly1_data['price'],'.',
poly1_data['power_1'], model1.predict(poly1_data),'-')
poly2_data = polynomial_sframe(sales['sqft_living'], 2)
my_features = poly2_data.column_names() # get the name of the features
poly2_data['price'] = sales['price'] # add price to the data since it's the target
model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)
model2.get("coefficients")
plt.plot(poly2_data['power_1'],poly2_data['price'],'.',
poly2_data['power_1'], model2.predict(poly2_data),'-')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.