Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
14,000
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
IMPORT_FILE = "gs://cloud-samples-data/vision/salads.csv"
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
dataset = aip.ImageDataset.create(
display_name="Salads" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.image.bounding_box,
)
print(dataset.resource_name)
dag = aip.AutoMLImageTrainingJob(
display_name="salads_" + TIMESTAMP,
prediction_type="object_detection",
multi_label=False,
model_type="CLOUD",
base_model=None,
)
print(dag)
model = dag.run(
dataset=dataset,
model_display_name="salads_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=20000,
disable_early_stopping=False,
)
# Get model resource ID
models = aip.Model.list(filter="display_name=salads_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
test_items = !gsutil cat $IMPORT_FILE | head -n2
cols_1 = str(test_items[0]).split(",")
cols_2 = str(test_items[1]).split(",")
if len(cols_1) == 11:
test_item_1 = str(cols_1[1])
test_label_1 = str(cols_1[2])
test_item_2 = str(cols_2[1])
test_label_2 = str(cols_2[2])
else:
test_item_1 = str(cols_1[0])
test_label_1 = str(cols_1[1])
test_item_2 = str(cols_2[0])
test_label_2 = str(cols_2[1])
print(test_item_1, test_label_1)
print(test_item_2, test_label_2)
file_1 = test_item_1.split("/")[-1]
file_2 = test_item_2.split("/")[-1]
! gsutil cp $test_item_1 $BUCKET_NAME/$file_1
! gsutil cp $test_item_2 $BUCKET_NAME/$file_2
test_item_1 = BUCKET_NAME + "/" + file_1
test_item_2 = BUCKET_NAME + "/" + file_2
import json
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.jsonl"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
data = {"content": test_item_1, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
data = {"content": test_item_2, "mime_type": "image/jpeg"}
f.write(json.dumps(data) + "\n")
print(gcs_input_uri)
! gsutil cat $gcs_input_uri
batch_predict_job = model.batch_predict(
job_display_name="salads_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
sync=False,
)
print(batch_predict_job)
batch_predict_job.wait()
import json
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
prediction_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("prediction"):
prediction_results.append(blob.name)
tags = list()
for prediction_result in prediction_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
line = json.loads(line)
print(line)
break
endpoint = model.deploy()
test_items = !gsutil cat $IMPORT_FILE | head -n1
cols = str(test_items[0]).split(",")
if len(cols) == 11:
test_item = str(cols[1])
test_label = str(cols[2])
else:
test_item = str(cols[0])
test_label = str(cols[1])
print(test_item, test_label)
import base64
import tensorflow as tf
with tf.io.gfile.GFile(test_item, "rb") as f:
content = f.read()
# The format of each instance should conform to the deployed model's prediction input schema.
instances = [{"content": base64.b64encode(content).decode("utf-8")}]
prediction = endpoint.predict(instances=instances)
print(prediction)
endpoint.undeploy_all()
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Location of Cloud Storage training data.
Step13: Quick peek at your data
Step14: Create a dataset
Step15: Example Output
Step16: Example output
Step17: Example output
Step18: Example output
Step19: Copy test item(s)
Step20: Make the batch input file
Step21: Make the batch prediction request
Step22: Example output
Step23: Example Output
Step24: Example Output
Step25: Example output
Step26: Make the prediction
Step27: Example output
Step28: Cleaning up
|
14,001
|
<ASSISTANT_TASK:>
Python Code:
rows = [
{'address': '5412 N CLARK', 'date': '07/01/2012'},
{'address': '5148 N CLARK', 'date': '07/04/2012'},
{'address': '5800 E 58TH', 'date': '07/02/2012'},
{'address': '2122 N CLARK', 'date': '07/03/2012'},
{'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},
{'address': '1060 W ADDISON', 'date': '07/02/2012'},
{'address': '4801 N BROADWAY', 'date': '07/01/2012'},
{'address': '1039 W GRANVILLE', 'date': '07/04/2012'},
]
from operator import itemgetter
from itertools import groupby
# Sort by the desired field first
rows.sort(key = itemgetter('address'))
# Iterate in groups
for date, items in groupby(rows, key = itemgetter('date')):
print(date)
for i in items:
print(" ", i)
from collections import defaultdict
rows_by_date = defaultdict(list)
for row in rows:
rows_by_date[row["date"]].append(row)
for r in rows_by_date["07/01/2012"]:
print(r)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 现在假设你想在按 date 分组后的数据块上进行迭代。为了这样做,你首先需要按照指定的字段(这里就是 date )排序, 然后调用 itertools.groupby() 函数:
Step2: 讨论
Step3: 这样的话你可以很轻松的就能对每个指定日期访问对应的记录:
|
14,002
|
<ASSISTANT_TASK:>
Python Code:
import nltk
pos_tweets = [('I love this car', 'positive'),
('This view is amazing', 'positive'),
('I feel great this morning', 'positive'),
('I am so excited about the concert', 'positive'),
('He is my best friend', 'positive')]
neg_tweets = [('I do not like this car', 'negative'),
('This view is horrible', 'negative'),
('I feel tired this morning', 'negative'),
('I am not looking forward to the concert', 'negative'),
('He is my enemy', 'negative')]
tweets = []
for (words, sentiment) in pos_tweets + neg_tweets:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
tweets[:2]
test_tweets = [
(['feel', 'happy', 'this', 'morning'], 'positive'),
(['larry', 'friend'], 'positive'),
(['not', 'like', 'that', 'man'], 'negative'),
(['house', 'not', 'great'], 'negative'),
(['your', 'song', 'annoying'], 'negative')]
# get the word lists of tweets
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
# get the unique word from the word list
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
word_features = get_word_features(get_words_in_tweets(tweets))
' '.join(word_features)
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
help(nltk.classify.util.apply_features)
training_set[0]
training_set = nltk.classify.util.apply_features(extract_features,\
tweets)
classifier = nltk.NaiveBayesClassifier.train(training_set)
# You may want to know how to define the ‘train’ method in NLTK here:
def train(labeled_featuresets, estimator=nltk.probability.ELEProbDist):
# Create the P(label) distribution
label_probdist = estimator(label_freqdist)
# Create the P(fval|label, fname) distribution
feature_probdist = {}
model = NaiveBayesClassifier(label_probdist, feature_probdist)
return model
tweet_positive = 'Harry is my friend'
classifier.classify(extract_features(tweet_positive.split()))
tweet_negative = 'Larry is not my friend'
classifier.classify(extract_features(tweet_negative.split()))
# Don’t be too positive, let’s try another example:
tweet_negative2 = 'Your song is annoying'
classifier.classify(extract_features(tweet_negative2.split()))
def classify_tweet(tweet):
return classifier.classify(extract_features(tweet))
# nltk.word_tokenize(tweet)
total = accuracy = float(len(test_tweets))
for tweet in test_tweets:
if classify_tweet(tweet[0]) != tweet[1]:
accuracy -= 1
print('Total accuracy: %f%% (%d/20).' % (accuracy / total * 100, accuracy))
# nltk有哪些分类器呢?
nltk_classifiers = dir(nltk)
for i in nltk_classifiers:
if 'Classifier' in i:
print(i)
from sklearn.svm import LinearSVC
from nltk.classify.scikitlearn import SklearnClassifier
classif = SklearnClassifier(LinearSVC())
svm_classifier = classif.train(training_set)
# Don’t be too positive, let’s try another example:
tweet_negative2 = 'Your song is annoying'
svm_classifier.classify(extract_features(tweet_negative2.split()))
from textblob import TextBlob
text = '''
The titular threat of The Blob has always struck me as the ultimate movie
monster: an insatiably hungry, amoeba-like mass able to penetrate
virtually any safeguard, capable of--as a doomed doctor chillingly
describes it--"assimilating flesh on contact.
Snide comparisons to gelatin be damned, it's a concept with the most
devastating of potential consequences, not unlike the grey goo scenario
proposed by technological theorists fearful of
artificial intelligence run rampant.
'''
blob = TextBlob(text)
blob.tags # [('The', 'DT'), ('titular', 'JJ'),
# ('threat', 'NN'), ('of', 'IN'), ...]
blob.noun_phrases # WordList(['titular threat', 'blob',
# 'ultimate movie monster',
# 'amoeba-like mass', ...])
for sentence in blob.sentences:
print(sentence.sentiment.polarity)
# 0.060
# -0.341
blob.translate(to="es") # 'La amenaza titular de The Blob...'
import turicreate as tc
train_data = tc.SFrame.read_csv(traindata_path,header=True,
delimiter='\t',quote_char='"',
column_type_hints = {'id':str,
'sentiment' : int,
'review':str } )
train_data['1grams features'] = tc.text_analytics.count_ngrams(
train_data['review'],1)
train_data['2grams features'] = tc.text_analytics.count_ngrams(
train_data['review'],2)
cls = tc.classifier.create(train_data, target='sentiment',
features=['1grams features',
'2grams features'])
import turicreate as tc
from IPython.display import display
from IPython.display import Image
traindata_path = "/Users/datalab/bigdata/cjc/kaggle_popcorn_data/labeledTrainData.tsv"
testdata_path = "/Users/datalab/bigdata/cjc/kaggle_popcorn_data/testData.tsv"
movies_reviews_data = tc.SFrame.read_csv(traindata_path,header=True,
delimiter='\t',quote_char='"',
column_type_hints = {'id':str,
'sentiment' : str,
'review':str } )
movies_reviews_data
movies_reviews_data['1grams features'] = tc.text_analytics.count_ngrams(movies_reviews_data ['review'],1)
movies_reviews_data#[['review','1grams features']]
train_set, test_set = movies_reviews_data.random_split(0.8, seed=5)
model_1 = tc.classifier.create(train_set, target='sentiment', \
features=['1grams features'])
result1 = model_1.evaluate(test_set)
def print_statistics(result):
print( "*" * 30)
print( "Accuracy : ", result["accuracy"])
print( "Confusion Matrix: \n", result["confusion_matrix"])
print_statistics(result1)
movies_reviews_data['2grams features'] = tc.text_analytics.count_ngrams(movies_reviews_data['review'],2)
movies_reviews_data
train_set, test_set = movies_reviews_data.random_split(0.8, seed=5)
model_2 = tc.classifier.create(train_set, target='sentiment', features=['1grams features','2grams features'])
result2 = model_2.evaluate(test_set)
print_statistics(result2)
traindata_path = "/Users/datalab/bigdata/cjc/kaggle_popcorn_data/labeledTrainData.tsv"
testdata_path = "/Users/datalab/bigdata/cjc/kaggle_popcorn_data/testData.tsv"
#creating classifier using all 25,000 reviews
train_data = tc.SFrame.read_csv(traindata_path,header=True, delimiter='\t',quote_char='"',
column_type_hints = {'id':str, 'sentiment' : int, 'review':str } )
train_data['1grams features'] = tc.text_analytics.count_ngrams(train_data['review'],1)
train_data['2grams features'] = tc.text_analytics.count_ngrams(train_data['review'],2)
cls = tc.classifier.create(train_data, target='sentiment', features=['1grams features','2grams features'])
#creating the test dataset
test_data = tc.SFrame.read_csv(testdata_path,header=True, delimiter='\t',quote_char='"',
column_type_hints = {'id':str, 'review':str } )
test_data['1grams features'] = tc.text_analytics.count_ngrams(test_data['review'],1)
test_data['2grams features'] = tc.text_analytics.count_ngrams(test_data['review'],2)
#predicting the sentiment of each review in the test dataset
test_data['sentiment'] = cls.classify(test_data)['class'].astype(int)
#saving the prediction to a CSV for submission
test_data[['id','sentiment']].save("/Users/datalab/bigdata/cjc/kaggle_popcorn_data/predictions.csv", format="csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extracting Features
Step2: To create a classifier, we need to decide what features are relevant. To do that, we first need a feature extractor.
Step3: 使用sklearn的分类器
Step4: 作业1:
Step5: Sentiment Analysis Using Turicreate
Step6: In the rest of this notebook, we will explain this code recipe in details, by demonstrating how this recipe can used to create IMDB movie reviews sentiment classifier.
Step7: IMDB movies reviews Dataset
Step8: Loading Data
Step9: By using the SFrame show function, we can visualize the data and notice that the train dataset consists of 12,500 positive and 12,500 negative, and overall 24,932 unique reviews.
Step10: Constructing Bag-of-Words Classifier
Step11: By running the last command, we created a new column in movies_reviews_data SFrame object. In this column each value is a dictionary object, where each dictionary's keys are the different words which appear in the corresponding review, and the dictionary's values are the frequency of each word.
Step12: We are now ready to construct and evaluate the movie reviews sentiment classifier using the calculated above features. But first, to be able to perform a quick evaluation of the constructed classifier, we need to create labeled train and test datasets. We will create train and test datasets by randomly splitting the train dataset into two parts. The first part will contain 80% of the labeled train dataset and will be used as the training dataset, while the second part will contain 20% of the labeled train dataset and will be used as the testing dataset. We will create these two dataset by using the following command
Step13: We are now ready to create a classifier using the following command
Step14: We can evaluate the performence of the classifier by evaluating it on the test dataset
Step15: In order to get an easy view of the classifier's prediction result, we define and use the following function
Step16: As can be seen in the results above, in just a few relatively straight foward lines of code, we have developed a sentiment classifier that has accuracy of about ~0.88. Next, we demonstrate how we can improve the classifier accuracy even more.
Step17: As before, we will construct and evaluate a movie reviews sentiment classifier. However, this time we will use both the '1grams features' and the '2grams features' features
Step18: Indeed, the new constructed classifier seems to be more accurate with an accuracy of about ~0.9.
|
14,003
|
<ASSISTANT_TASK:>
Python Code:
# Load the needed packages
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import awot
from awot.graph.common import create_basemap
from awot.graph import RadarHorizontalPlot, RadarVerticalPlot, FlightLevel
%matplotlib inline
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Set the project name
Project="DYNAMO"
# Choose what file to process
yymmdd, modn = '111124', '0351'
# Set the data directory
fDir = "/Users/guy/data/dynamo/"+yymmdd+"I/"
# Construct the full path name for windsyn NetCDF file
P3Radf = str(glob(fDir+"/*"+modn+"*windsyn*.nc")).strip('[]')
# Construct the full path name for Flight level NetCDF file
FltLevf = str(glob(fDir+"20*"+yymmdd+"*_DJ*.nc")).strip('[]')
corners = [77.8, -2.0, 79.6, -0.2]
figtitle = '24 Nov RCE'
# Set up some characteristics for plotting
# Set map projection to use
proj = 'cea'
Wbarb_Spacing = 300 # Spacing of wind barbs along flight path (sec)
# Choose the X-axis time step (in seconds) where major labels will be
XlabStride = 60
# Optional settings
start_time = "2011-11-24 03:51:00"
end_time = "2011-11-24 04:57:00"
# Map spacing
dLon = 0.5
dLat = 0.5
# Should landmarks be plotted? [If yes, then modify the section below
Lmarks=True
if Lmarks:
# Create a list of Landmark data
LocMark = []
# Add locations as [ StringName, Longitude, Latitude ,XlabelOffset, YlabelOffset]
LocMark.append(['Diego Garcia', 72.4160, -7.3117, 0.1, -0.6])
LocMark.append(['R/V Revelle', 80.5010, 0.12167, -0.4, -0.6])
LocMark.append(['Gan', 73.1017, -0.6308, -0.9, 0.0])
LocMark.append(['R/V Marai', 80.50, -7.98, -0.1, -0.6])
# Build a few variables for plotting the labels
# Build arrays for plotting
Labels = []
LabLons = []
LabLats = []
XOffset = []
YOffset = []
for L1, L2, L3, L4, L5 in LocMark:
Labels.append(L1)
LabLons.append(L2)
LabLats.append(L3)
XOffset.append(L4)
YOffset.append(L5)
# Add PPI plot at 2 km level
cappi_ht = 2000.
fl1 = awot.io.read_netcdf(fname=FltLevf[1:-1], platform='p-3')
r1 = awot.io.read_windsyn_tdr_netcdf(fname=P3Radf[1:-1], field_mapping=None)
fig, (axPPI, axXS) = plt.subplots(2, 1, figsize=(8, 8))
# Set the map for plotting
bm1 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI)
# Create a Flightlevel instance for the track
flp1 = FlightLevel(fl1, basemap=bm1)
flp1.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI)
# Create a RadarGrid
rgp1 = RadarHorizontalPlot(r1, basemap=bm1)
rgp1.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
#rgp1.plot_cappi('Uwind', 2., vmin=-20., vmax=20., title=' ',
# cmap='RdBu_r',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI)
rgp1.overlay_wind_vector(height_level=cappi_ht, vscale=200, vtrim=6, qcolor='0.50',
refUposX=.75, refUposY=.97, plot_km=True)
flp1.plot_radar_cross_section(r1, 'Wwind', plot_km=True,
start_time=start_time, end_time=end_time,
vmin=-3., vmax=3., title=' ',
cmap='RdBu_r',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
x_axis_array='time',
ax=axXS)
fig, (axPPI2, axXS2) = plt.subplots(2, 1, figsize=(7, 7))
# Set the map for plotting
bm2 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI2)
# Create a Flightlevel instance for the track
flp2 = FlightLevel(fl1, basemap=bm2)
flp2.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI2)
# Create a RadarGrid
rgph = RadarHorizontalPlot(r1, basemap=bm2)
# Add PPI plot at 2 km
rgph.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI2)
rgph.overlay_wind_vector(height_level=2., vscale=200, vtrim=6, qcolor='0.50')
# Add Cross-sectional line to horizontal plot
rgph.plot_line_geo([78.3, 79.0], [-1.1, -1.5], lw=4, alpha=.8, line_style='w-',
label0=True, label_offset=(0.05,-0.05))
rgph.plot_cross_section('reflectivity', (78.3, -1.1), (79.0, -1.5),
vmin=15., vmax=60., title=' ',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
plot_km=True, ax=axXS2)
# Alternatively the commented out code below will also display the plot
#rgpv = RadarVerticalPlot(fl1, instrument='tdr_grid')
# Add the cross-section along those coordinates
#rgpv.plot_cross_section('dBZ', (78.3, -1.1), (79.0, -1.5),
# vmin=15., vmax=60., title=' ',
# color_bar=False, cb_orient='vertical', cb_tick_int=4,
# ax=axXS)
fig, (axPPI3, axXS3) = plt.subplots(2, 1, figsize=(7, 7))
# Set the map for plotting
bm3 = create_basemap(corners=corners, proj=proj, resolution='l', area_thresh=1.,
lat_spacing=dLat, lon_spacing=dLon, ax=axPPI3)
# Create a Flightlevel instance for the track
flp2 = FlightLevel(fl1, basemap=bm3)
flp2.plot_trackmap(start_time=start_time, end_time=end_time,
min_altitude=50., max_altitude= 8000.,
addlegend=False, addtitle=False, ax=axPPI3)
# Create a RadarGrid
rgph = RadarHorizontalPlot(r1, basemap=bm3)
# Add PPI plot at 2 km level
rgph.plot_cappi('reflectivity', cappi_ht, vmin=15., vmax=60., title=' ',
color_bar=True, cb_pad="10%", cb_loc='right', cb_tick_int=4,
ax=axPPI3)
rgpv = RadarVerticalPlot(r1, basemap=bm3)
# Add the cross-section along those coordinates
rgpv.plot_cross_section('reflectivity', (78.3, -1.1), (79.0, -1.5),
vmin=15., vmax=60., title=' ',
color_bar=True, cb_orient='vertical', cb_tick_int=4,
discrete_cmap_levels=[10., 15., 20., 25., 30., 35., 40., 45., 50., 55., 60.], ax=axXS3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>Supply input data and set some plotting parameters.</b>
Step2: <b>Set up some characteristics for plotting.</b>
Step3: <b>Read in the flight and radar data</b>
Step4: <b>Make a cross-section following the flight track displayed in the top panel and use the vertical wind velocity field.</b>
Step5: <b>Now let's make a vertical cross-section along lon/lat pairs of reflectivity</b>
Step6: <b>Here's an alternative method to produce the same plot above. And notice the second plot has discrete levels by setting the <i>discrete_cmap_levels</i> keyword.</b>
|
14,004
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
df = pd.read_csv('data/anuncios.csv')
print(df.shape)
df.head()
x, y = df.idade.values.reshape(-1,1), df.comprou.values.reshape(-1,1)
print(x.shape, y.shape)
plt.scatter(x, y, c=y, cmap='bwr')
plt.xlabel('idade')
plt.ylabel('comprou?')
minmax = MinMaxScaler(feature_range=(-1,1))
x = minmax.fit_transform(x.astype(np.float64))
print(x.min(), x.max())
clf_sk = LogisticRegression(C=1e15)
clf_sk.fit(x, y.ravel())
print(clf_sk.coef_, clf_sk.intercept_)
print(clf_sk.score(x, y))
x_test = np.linspace(x.min(), x.max(), 100).reshape(-1,1)
y_sk = clf_sk.predict_proba(x_test)
plt.scatter(x, y, c=y, cmap='bwr')
plt.plot(x_test, y_sk[:,1], color='black')
plt.xlabel('idade')
plt.ylabel('comprou?')
# implemente a função sigmoid aqui
# implemente o neurônio sigmoid aqui
x_test = np.linspace(x.min(), x.max(), 100).reshape(-1,1)
y_sk = clf_sk.predict_proba(x_test)
y_pred = sigmoid(np.dot(x_test, w.T) + b)
plt.scatter(x, y, c=y, cmap='bwr')
plt.plot(x_test, y_sk[:,1], color='black', linewidth=7.0)
plt.plot(x_test, y_pred, color='yellow')
plt.xlabel('idade')
plt.ylabel('comprou?')
print('Acurácia pelo Scikit-learn: {:.2f}%'.format(clf_sk.score(x, y)*100))
y_pred = np.round(sigmoid(np.dot(x, w.T) + b))
print('Acurária pela nossa implementação: {:.2f}%'.format(accuracy_score(y, y_pred)*100))
x, y = df[['idade', 'salario']].values, df.comprou.values.reshape(-1,1)
print(x.shape, y.shape)
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(111, projection='3d')
ax.scatter3D(x[:,0], x[:,1], y, c=y.ravel())
minmax = MinMaxScaler(feature_range=(-1,1))
x = minmax.fit_transform(x.astype(np.float64))
print(x.min(), x.max())
clf_sk = LogisticRegression(C=1e15)
clf_sk.fit(x, y.ravel())
print(clf_sk.coef_, clf_sk.intercept_)
print(clf_sk.score(x, y))
D = x.shape[1]
w = 2*np.random.random((1, D))-1 # [1x2]
b = 2*np.random.random()-1 # [1x1]
learning_rate = 1.0 # <- tente estimar a learning rate
for step in range(1): # <- tente estimar a #epochs
# calcule a saida do neuronio sigmoid
z =
y_pred =
error = y - y_pred # [400x1]
w = w + learning_rate*np.dot(error.T, x)
b = b + learning_rate*error.sum()
if step%100 == 0:
# implemente a entropia cruzada (1 linhas)
cost =
print('step {0}: {1}'.format(step, cost))
print('w: ', w)
print('b: ', b)
x1 = np.linspace(x[:, 0].min(), x[:, 0].max())
x2 = np.linspace(x[:, 1].min(), x[:, 1].max())
x1_mesh, x2_mesh = np.meshgrid(x1, x2)
x1_mesh = x1_mesh.reshape(-1, 1)
x2_mesh = x2_mesh.reshape(-1, 1)
x_mesh = np.hstack((x1_mesh, x2_mesh))
y_pred = sigmoid(np.dot(x_mesh, w.T) + b)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, projection='3d')
ax.scatter3D(x[:,0], x[:,1], y, c=y.ravel())
ax.plot_trisurf(x1_mesh.ravel(), x2_mesh.ravel(), y_pred.ravel(), alpha=0.3, shade=False)
print('Acurácia pelo Scikit-learn: {:.2f}%'.format(clf_sk.score(x, y)*100))
y_pred = np.round(sigmoid(np.dot(x, w.T) + b))
print('Acurária pela nossa implementação: {:.2f}%'.format(accuracy_score(y, y_pred)*100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introdução
Step2: Vamos utilizar o sklearn como gabarito para nossa implementação. Entretanto, como a Regressão Logística do sklearn faz uma regularização L2 automaticamente, temos de definir $C=10^{15}$ para "anular" a regularização. O parâmetro $C$ define a inversa da força da regularização (ver documentação). Logo, quanto menor for o $C$, maior será a regularização e menores serão os valores dos pesos e bias.
Step3: Numpy
Step4: Exercícios
Step5: Numpy
|
14,005
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import colab
!pip install --upgrade pip
except:
pass
!pip install -U tfx
import os
import pprint
import tempfile
import urllib
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
from tfx import v1 as tfx
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
%load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
# This is the root directory for your TFX pip package installation.
_tfx_root = tfx.__path__[0]
# This is the directory containing the TFX Chicago Taxi Pipeline example.
_taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline')
# This is the path where your model will be pushed for serving.
_serving_model_dir = os.path.join(
tempfile.mkdtemp(), 'serving_model/taxi_simple')
# Set up logging.
absl.logging.set_verbosity(absl.logging.INFO)
_data_root = tempfile.mkdtemp(prefix='tfx-data')
DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv'
_data_filepath = os.path.join(_data_root, "data.csv")
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
!head {_data_filepath}
# Here, we create an InteractiveContext using default parameters. This will
# use a temporary directory with an ephemeral ML Metadata database instance.
# To use your own pipeline root or database, the optional properties
# `pipeline_root` and `metadata_connection_config` may be passed to
# InteractiveContext. Calls to InteractiveContext are no-ops outside of the
# notebook.
context = InteractiveContext()
example_gen = tfx.components.CsvExampleGen(input_base=_data_root)
context.run(example_gen)
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
statistics_gen = tfx.components.StatisticsGen(examples=example_gen.outputs['examples'])
context.run(statistics_gen)
context.show(statistics_gen.outputs['statistics'])
schema_gen = tfx.components.SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
context.run(schema_gen)
context.show(schema_gen.outputs['schema'])
example_validator = tfx.components.ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
context.show(example_validator.outputs['anomalies'])
_taxi_constants_module_file = 'taxi_constants.py'
%%writefile {_taxi_constants_module_file}
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
_taxi_transform_module_file = 'taxi_transform.py'
%%writefile {_taxi_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_FARE_KEY = taxi_constants.FARE_KEY
_LABEL_KEY = taxi_constants.LABEL_KEY
def preprocessing_fn(inputs):
tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# If sparse make it dense, setting nan's to 0 or '', and apply zscore.
outputs[key] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[key] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[key] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[key] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_LABEL_KEY] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
def _fill_in_missing(x):
Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
transform = tfx.components.Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_taxi_transform_module_file))
context.run(transform)
transform.outputs
train_uri = transform.outputs['transform_graph'].get()[0].uri
os.listdir(train_uri)
# Get the URI of the output artifact representing the transformed examples, which is a directory
train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Iterate over the first 3 records and decode them.
for tfrecord in dataset.take(3):
serialized_example = tfrecord.numpy()
example = tf.train.Example()
example.ParseFromString(serialized_example)
pp.pprint(example)
_taxi_trainer_module_file = 'taxi_trainer.py'
%%writefile {_taxi_trainer_module_file}
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx_bsl.tfxio import dataset_options
import taxi_constants
_DENSE_FLOAT_FEATURE_KEYS = taxi_constants.DENSE_FLOAT_FEATURE_KEYS
_VOCAB_FEATURE_KEYS = taxi_constants.VOCAB_FEATURE_KEYS
_VOCAB_SIZE = taxi_constants.VOCAB_SIZE
_OOV_SIZE = taxi_constants.OOV_SIZE
_FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT
_BUCKET_FEATURE_KEYS = taxi_constants.BUCKET_FEATURE_KEYS
_CATEGORICAL_FEATURE_KEYS = taxi_constants.CATEGORICAL_FEATURE_KEYS
_MAX_CATEGORICAL_FEATURE_VALUES = taxi_constants.MAX_CATEGORICAL_FEATURE_VALUES
_LABEL_KEY = taxi_constants.LABEL_KEY
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _build_estimator(config, hidden_units=None, warm_start_from=None):
Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _DENSE_FLOAT_FEATURE_KEYS
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _VOCAB_FEATURE_KEYS
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _BUCKET_FEATURE_KEYS
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_CATEGORICAL_FEATURE_KEYS,
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf.estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_graph, schema):
Build the serving in inputs.
Args:
tf_transform_graph: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_graph.transform_raw_features(
serving_input_receiver.features)
return tf.estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_graph, schema):
Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_graph: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add a parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_graph.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_LABEL_KEY])
def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200):
Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
tf_transform_output.transformed_metadata.schema)
# TFX will call this function
def trainer_fn(trainer_fn_args, schema):
Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_graph = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_graph,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_graph,
batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_graph, schema)
exporter = tf.estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=trainer_fn_args.base_model)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_graph, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
}
from tfx.components.trainer.executor import Executor
from tfx.dsl.components.base import executor_spec
trainer = tfx.components.Trainer(
module_file=os.path.abspath(_taxi_trainer_module_file),
custom_executor_spec=executor_spec.ExecutorClassSpec(Executor),
examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=tfx.proto.TrainArgs(num_steps=10000),
eval_args=tfx.proto.EvalArgs(num_steps=5000))
context.run(trainer)
# Get the URI of the output artifact representing the training logs, which is a directory
model_run_dir = trainer.outputs['model_run'].get()[0].uri
%load_ext tensorboard
%tensorboard --logdir {model_run_dir}
eval_config = tfma.EvalConfig(
model_specs=[
# Using signature 'eval' implies the use of an EvalSavedModel. To use
# a serving model remove the signature to defaults to 'serving_default'
# and add a label_key.
tfma.ModelSpec(signature_name='eval')
],
metrics_specs=[
tfma.MetricsSpec(
# The metrics added here are in addition to those saved with the
# model (assuming either a keras model or EvalSavedModel is used).
# Any metrics added into the saved model (for example using
# model.compile(..., metrics=[...]), etc) will be computed
# automatically.
metrics=[
tfma.MetricConfig(class_name='ExampleCount')
],
# To add validation thresholds for metrics saved with the model,
# add them keyed by metric name to the thresholds map.
thresholds = {
'accuracy': tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
}
)
],
slicing_specs=[
# An empty slice spec means the overall slice, i.e. the whole dataset.
tfma.SlicingSpec(),
# Data can be sliced along a feature column. In this case, data is
# sliced along feature column trip_start_hour.
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
# Use TFMA to compute a evaluation statistics over features of a model and
# validate them against a baseline.
# The model resolver is only required if performing model validation in addition
# to evaluation. In this case we validate against the latest blessed model. If
# no model has been blessed before (as in this case) the evaluator will make our
# candidate the first blessed model.
model_resolver = tfx.dsl.Resolver(
strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy,
model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model),
model_blessing=tfx.dsl.Channel(
type=tfx.types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
context.run(model_resolver)
evaluator = tfx.components.Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
context.run(evaluator)
evaluator.outputs
context.show(evaluator.outputs['evaluation'])
import tensorflow_model_analysis as tfma
# Get the TFMA output result path and load the result.
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
tfma_result = tfma.load_eval_result(PATH_TO_RESULT)
# Show data sliced along feature column trip_start_hour.
tfma.view.render_slicing_metrics(
tfma_result, slicing_column='trip_start_hour')
blessing_uri = evaluator.outputs['blessing'].get()[0].uri
!ls -l {blessing_uri}
PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri
print(tfma.load_validation_result(PATH_TO_RESULT))
pusher = tfx.components.Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=tfx.proto.PushDestination(
filesystem=tfx.proto.PushDestination.Filesystem(
base_directory=_serving_model_dir)))
context.run(pusher)
pusher.outputs
push_uri = pusher.outputs['pushed_model'].get()[0].uri
model = tf.saved_model.load(push_uri)
for item in model.signatures.items():
pp.pprint(item)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TFX Estimator コンポーネントのチュートリアル
Step2: TFX をインストールする
Step3: ランタイムを再起動しましたか?
Step4: ライブラリのバージョンを確認します。
Step5: パイプライン パスを設定
Step6: サンプルデータのダウンロード
Step7: CSV ファイルを見てみましょう。
Step8: 注:この Web サイトは、シカゴ市の公式 Web サイト www.cityofchicago.org で公開されたデータを変更して使用するアプリケーションを提供します。シカゴ市は、この Web サイトで提供されるデータの内容、正確性、適時性、または完全性について一切の表明を行いません。この Web サイトで提供されるデータは、随時変更される可能性があり、提供されるデータはユーザーの自己責任で利用されるものとします。
Step9: TFX コンポーネントをインタラクティブに実行する
Step10: ExampleGenの出力アーティファクトを調べてみましょう。このコンポーネントは、トレーニングサンプルと評価サンプルの 2 つのアーティファクトを生成します。
Step11: また、最初の 3 つのトレーニングサンプルも見てみます。
Step12: ExampleGenがデータの取り込みを完了したので、次のステップ、データ分析に進みます。
Step13: StatisticsGenの実行が完了すると、出力された統計を視覚化できます。色々なプロットを試してみてください!
Step14: SchemaGen
Step15: SchemaGenの実行が完了すると、生成されたスキーマを表として視覚化できます。
Step16: データセットのそれぞれの特徴量は、スキーマ表のプロパティの横に行として表示されます。スキーマは、ドメインとして示される、カテゴリカル特徴量が取るすべての値もキャプチャします。
Step17: ExampleValidatorの実行が完了すると、異常を表として視覚化できます。
Step18: 異常の表では、異常がないことがわかります。これは、分析した最初のデータセットで、スキーマはこれに合わせて調整されているため、異常がないことが予想されます。このスキーマは確認する必要があります。予期されないものがある場合は、データに異常があることを意味します。確認されたスキーマを使用することにより将来のデータを保護できます。ここで生成された異常は、モデルのパフォーマンスをデバッグし、データが時間の経過とともにどのように変化するかを理解し、データ エラーを特定するために使用できます。
Step21: 次に、生データを入力として受け取り、モデルのトレーニングに使用できる変換された特徴量を返すpreprocessing_fnを記述します。
Step22: 次に、この特徴量エンジニアリング コードを Transformコンポーネントに渡し、実行してデータを変換します。
Step23: Transformの出力アーティファクトを調べてみましょう。このコンポーネントは、2 種類の出力を生成します。
Step24: transform_graphアーティファクトを見てみましょう。これは、3 つのサブディレクトリを含むディレクトリを指しています。
Step25: transformed_metadataサブディレクトリには、前処理されたデータのスキーマが含まれています。transform_fnサブディレクトリには、実際の前処理グラフが含まれています。metadataサブディレクトリには、元のデータのスキーマが含まれています。
Step31: Transformコンポーネントがデータを特徴量に変換したら、次にモデルをトレーニングします。
Step32: 次に、このモデル コードをTrainerコンポーネントに渡し、それを実行してモデルをトレーニングします。
Step33: TensorBoard でトレーニングを分析する
Step34: Evaluator
Step35: 次に、この構成をEvaluatorに渡して実行します。
Step36: Evaluatorの出力アーティファクトを調べてみましょう。
Step37: evaluation出力を使用すると、評価セット全体のグローバル指標のデフォルトの視覚化を表示できます。
Step38: スライスされた評価メトリクスの視覚化を表示するには、TensorFlow Model Analysis ライブラリを直接呼び出します。
Step39: この視覚化は同じ指標を示していますが、評価セット全体ではなく、trip_start_hourのすべての特徴値で計算されています。
Step40: 検証結果レコードを読み込み、成功を確認することもできます。
Step41: Pusher
Step42: 次にPusherの出力アーティファクトを調べてみましょう。
Step43: 特に、Pusher はモデルを次のような SavedModel 形式でエクスポートします。
|
14,006
|
<ASSISTANT_TASK:>
Python Code:
import re
def tokenize(s):
'''Transform the string s into a list of tokens. The string s
is supposed to represent an arithmetic expression.
'''
lexSpec = r'''([ \t\n]+) | # blanks and tabs
([1-9][0-9]*|0) | # number
([-+*/()]) | # arithmetical operators
(.) # unrecognized character
'''
tokenList = re.findall(lexSpec, s, re.VERBOSE)
result = []
for ws, number, operator, error in tokenList:
if ws: # skip blanks and tabs
continue
elif number:
result += [ 'NUMBER' ]
elif operator:
result += [ operator ]
else:
result += [ f'ERROR({error})']
return result
tokenize('1 + 2 * (3 - 4)')
class ShiftReduceParser():
def __init__(self, actionTable, gotoTable, stateTable):
self.mActionTable = actionTable
self.mGotoTable = gotoTable
self.mStateTable = stateTable
def parse(self, TL):
index = 0 # points to next token
Symbols = [] # stack of symbols
States = ['s0'] # stack of states, s0 is start state
TL += ['EOF']
while True:
q = States[-1]
t = TL[index]
print(f'States: [ {", ".join(States)} ]')
print('Symbols:', ' '.join(Symbols + ['|'] + TL[index:]).strip())
print('State: {', ", ".join(self.mStateTable[q]), '}')
match self.mActionTable.get((q, t), 'error'):
case 'error':
print(f'Action({q}, {t}) undefined.')
print('Syntax error!\n')
return False
case 'accept':
print('Accepting!\n')
return True
case 'shift', s:
print(f'Shifting state {s}')
print('State: {', ', '.join(self.mStateTable[s]), '}\n')
Symbols += [t]
States += [s]
index += 1
case 'reduce', rule:
head, body = rule
print(f'Reducing with rule {head} → {" ".join(body)}')
n = len(body)
Symbols = Symbols[:-n]
States = States [:-n]
Symbols = Symbols + [head]
state = States[-1]
States += [ self.mGotoTable[state, head] ]
print('State: {', ', '.join(self.mStateTable[self.mGotoTable[state, head]]), '}\n')
ShiftReduceParser.parse = parse
del parse
%run Parse-Table.ipynb
def test(s):
parser = ShiftReduceParser(actionTable, gotoTable, stateTable)
TL = tokenize(s)
print(f'tokenlist: {TL}\n')
if parser.parse(TL):
print('Parse successful!')
else:
print('Parse failed!')
test('1 + 2 * 3')
test('1 + 2 * (3 - 4)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function tokenize transforms the string s into a list of tokens. See below for an example.
Step2: Testing
|
14,007
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-3', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
14,008
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
arrays = []
for x_ in x:
array = np.array(x_)
arrays.append(array)
return np.stack(arrays, axis=0) / 256.
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
# class_num = np.array(x).max()
class_num = 10
num = len(x)
out = np.zeros((num, class_num))
for i in range(num):
out[i, x[i]-1] = 1
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
# print ('image_shape')
# print (image_shape)
shape = (None, )
shape = shape + image_shape
# print ('shape')
# print (shape)
inputs = tf.placeholder(tf.float32, shape=shape, name='x')
# print ('inputs')
# print (inputs)
return inputs
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
shape = (None, )
shape = shape + (n_classes, )
return tf.placeholder(tf.float32, shape=shape, name='y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
return tf.placeholder(tf.float32, name='keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides, maxpool=True):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
input_channel = x_tensor.get_shape().as_list()[-1]
weights_size = conv_ksize + (input_channel,) + (conv_num_outputs,)
conv_strides = (1,) + conv_strides + (1,)
pool_ksize = (1,) + pool_ksize + (1,)
pool_strides = (1,) + pool_strides + (1,)
weights = tf.Variable(tf.random_normal(weights_size, stddev=0.01))
biases = tf.Variable(tf.zeros(conv_num_outputs))
out = tf.nn.conv2d(x_tensor, weights, conv_strides, padding='SAME')
out = out + biases
out = tf.nn.relu(out)
if maxpool:
out = tf.nn.max_pool(out, pool_ksize, pool_strides, padding='SAME')
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
num, hight, width, channel = tuple(x_tensor.get_shape().as_list())
new_shape = (-1, hight * width * channel)
# print ('new_shape')
# print (new_shape)
return tf.reshape(x_tensor, new_shape)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
num, dim = x_tensor.get_shape().as_list()
weights = tf.Variable(tf.random_normal((dim, num_outputs), stddev=np.sqrt(2 / num_outputs)))
biases = tf.Variable(tf.zeros(num_outputs))
return tf.nn.relu(tf.matmul(x_tensor, weights) + biases)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
num, dim = x_tensor.get_shape().as_list()
weights = tf.Variable(tf.random_normal((dim, num_outputs), np.sqrt(2 / num_outputs)))
biases = tf.Variable(tf.zeros(num_outputs))
return tf.matmul(x_tensor, weights) + biases
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv_ksize3 = (3, 3)
conv_ksize1 = (1, 1)
conv_ksize5 = (5, 5)
conv_ksize7 = (7, 7)
conv_strides1 = (1, 1)
conv_strides2 = (2, 2)
pool_ksize = (2, 2)
pool_strides = (2, 2)
channels = [32,128,512,512]
# L = 4
out = x
# 6 layers
# for i in range(int(L / 4)):
out = conv2d_maxpool(out, channels[0], conv_ksize7, conv_strides1, pool_ksize, pool_strides, maxpool=True)
out = conv2d_maxpool(out, channels[1], conv_ksize5, conv_strides1, pool_ksize, pool_strides, maxpool=True)
out = conv2d_maxpool(out, channels[2], conv_ksize3, conv_strides1, pool_ksize, pool_strides, maxpool=True)
# out = conv2d_maxpool(out, channels[3], conv_ksize5, conv_strides2, pool_ksize, pool_strides, maxpool=True)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
out = flatten(out)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
# by remove this fully connected layer can improve performance
out = fully_conn(out, 256)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
out = tf.nn.dropout(out, keep_prob)
out = output(out, 10)
# TODO: return output
return out
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
feed_dict = {keep_prob: keep_probability, x: feature_batch, y: label_batch}
session.run(optimizer, feed_dict=feed_dict)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
# here will print loss, train_accuracy, and val_accuracy
# I implemented the val_accuracy, please read them all, thanks
# print train_accuracy to see overfit
loss = session.run(cost, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
train_accuracy = session.run(accuracy, feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.0})
batch = feature_batch.shape[0]
num_valid = valid_features.shape[0]
val_accuracy = 0
for i in range(0, num_valid, batch):
end_i = i + batch
if end_i > num_valid:
end_i = num_valid
batch_accuracy = session.run(accuracy, feed_dict={
x: valid_features[i:end_i], y: valid_labels[i:end_i], keep_prob: 1.0})
batch_accuracy *= (end_i - i)
val_accuracy += batch_accuracy
val_accuracy /= num_valid
print ('loss is {}, train_accuracy is {}, val_accuracy is {}'.format(loss, train_accuracy, val_accuracy))
# TODO: Tune Parameters
epochs = 10
batch_size = 128
keep_probability = 0.8
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
14,009
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
from IPython.display import Image
from IPython.display import HTML
assert True # leave this to grade the import statements
Image(url='https://english.tau.ac.il/sites/default/files/styles/reaserch_main_image_580_x_330/public/sackler%20physics%20cropped.jpg?itok=oanzfnK-')
assert True # leave this to grade the image display
s = <table>
<tr>
<th>Header 1</th>
<th>Header 2</th>
</tr>
<tr>
<td>row 1, cell 1</td>
<td>row 1, cell 2</td>
</tr>
<tr>
<td>row 2, cell 1</td>
<td>row 2, cell 2</td>
</tr>
</table>
h = HTML(s)
display(h)
assert True # leave this here to grade the quark table
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic rich display
Step3: Use the HTML object to display HTML in the notebook that reproduces the table of Quarks on this page. This will require you to learn about how to create HTML tables and then pass that to the HTML object for display. Don't worry about styling and formatting the table, but you should use LaTeX where appropriate.
|
14,010
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
#To import all Shogun classes
from shogun import *
import shogun as sg
#Load the file
data_file=LibSVMFile(os.path.join(SHOGUN_DATA_DIR, 'uci/diabetes/diabetes_scale.svm'))
f=SparseRealFeatures()
trainlab=f.load_with_labels(data_file)
mat=f.get_full_feature_matrix()
#exatract 2 attributes
glucose_conc=mat[1]
BMI=mat[5]
#generate a numpy array
feats=array(glucose_conc)
feats=vstack((feats, array(BMI)))
print(feats, feats.shape)
#convert to shogun format
feats_train = features(feats)
#Get number of features(attributes of data) and num of vectors(samples)
feat_matrix=feats_train.get_feature_matrix()
num_f=feats_train.get_num_features()
num_s=feats_train.get_num_vectors()
print('Number of attributes: %s and number of samples: %s' %(num_f, num_s))
print('Number of rows of feature matrix: %s and number of columns: %s' %(feat_matrix.shape[0], feat_matrix.shape[1]))
print('First column of feature matrix (Data for first individual):')
print(feats_train.get_feature_vector(0))
#convert to shogun format labels
labels=BinaryLabels(trainlab)
n=labels.get_num_labels()
print('Number of labels:', n)
preproc=PruneVarSubMean(True)
preproc.init(feats_train)
feats_train.add_preprocessor(preproc)
feats_train.apply_preprocessor()
# Store preprocessed feature matrix.
preproc_data=feats_train.get_feature_matrix()
# Plot the raw training data.
figure(figsize=(13,6))
pl1=subplot(121)
gray()
_=scatter(feats[0, :], feats[1,:], c=labels, s=50)
vlines(0, -1, 1, linestyle='solid', linewidths=2)
hlines(0, -1, 1, linestyle='solid', linewidths=2)
title("Raw Training Data")
_=xlabel('Plasma glucose concentration')
_=ylabel('Body mass index')
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
pl1.legend((p1, p2), ["Non-diabetic", "Diabetic"], loc=2)
#Plot preprocessed data.
pl2=subplot(122)
_=scatter(preproc_data[0, :], preproc_data[1,:], c=labels, s=50)
vlines(0, -5, 5, linestyle='solid', linewidths=2)
hlines(0, -5, 5, linestyle='solid', linewidths=2)
title("Training data after preprocessing")
_=xlabel('Plasma glucose concentration')
_=ylabel('Body mass index')
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
pl2.legend((p1, p2), ["Non-diabetic", "Diabetic"], loc=2)
gray()
#prameters to svm
C=0.9
svm=LibLinear(C, feats_train, labels)
svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
#train
svm.train()
size=100
x1=linspace(-5.0, 5.0, size)
x2=linspace(-5.0, 5.0, size)
x, y=meshgrid(x1, x2)
#Generate X-Y grid test data
grid=features(array((ravel(x), ravel(y))))
#apply on test grid
predictions = svm.apply(grid)
#get output labels
z=predictions.get_values().reshape((size, size))
#plot
jet()
figure(figsize=(9,6))
title("Classification")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
_=scatter(preproc_data[0, :], preproc_data[1,:], c=trainlab, cmap=gray(), s=50)
_=xlabel('Plasma glucose concentration')
_=ylabel('Body mass index')
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
legend((p1, p2), ["Non-diabetic", "Diabetic"], loc=2)
gray()
w=svm.get_w()
b=svm.get_bias()
x1=linspace(-2.0, 3.0, 100)
#solve for w.x+b=0
def solve (x1):
return -( ( (w[0])*x1 + b )/w[1] )
x2=list(map(solve, x1))
#plot
figure(figsize=(7,6))
plot(x1,x2, linewidth=2)
title("Decision boundary using w and bias")
_=scatter(preproc_data[0, :], preproc_data[1,:], c=trainlab, cmap=gray(), s=50)
_=xlabel('Plasma glucose concentration')
_=ylabel('Body mass index')
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
legend((p1, p2), ["Non-diabetic", "Diabetic"], loc=2)
print('w :', w)
print('b :', b)
#split features for training and evaluation
num_train=700
feats=array(glucose_conc)
feats_t=feats[:num_train]
feats_e=feats[num_train:]
feats=array(BMI)
feats_t1=feats[:num_train]
feats_e1=feats[num_train:]
feats_t=vstack((feats_t, feats_t1))
feats_e=vstack((feats_e, feats_e1))
feats_train = features(feats_t)
feats_evaluate = features(feats_e)
label_t=trainlab[:num_train]
labels=BinaryLabels(label_t)
label_e=trainlab[num_train:]
labels_true=BinaryLabels(label_e)
svm=LibLinear(C, feats_train, labels)
svm.set_liblinear_solver_type(L2R_L2LOSS_SVC)
#train and evaluate
svm.train()
output=svm.apply(feats_evaluate)
#use AccuracyMeasure to get accuracy
acc=AccuracyMeasure()
acc.evaluate(output,labels_true)
accuracy=acc.get_accuracy()*100
print('Accuracy(%):', accuracy)
temp_feats = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/fm_housing.dat')))
labels=RegressionLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/housing_label.dat')))
#rescale to 0...1
preproc=RescaleFeatures()
preproc.init(temp_feats)
temp_feats.add_preprocessor(preproc)
temp_feats.apply_preprocessor(True)
mat = temp_feats.get_feature_matrix()
dist_centres=mat[7]
lower_pop=mat[12]
feats=array(dist_centres)
feats=vstack((feats, array(lower_pop)))
print(feats, feats.shape)
#convert to shogun format features
feats_train = features(feats)
from mpl_toolkits.mplot3d import Axes3D
size=100
x1=linspace(0, 1.0, size)
x2=linspace(0, 1.0, size)
x, y=meshgrid(x1, x2)
#Generate X-Y grid test data
grid = features(array((ravel(x), ravel(y))))
#Train on data(both attributes) and predict
width=1.0
tau=0.5
kernel=sg.kernel("GaussianKernel", log_width=np.log(width))
krr=KernelRidgeRegression(tau, kernel, labels)
krr.train(feats_train)
kernel.init(feats_train, grid)
out = krr.apply().get_labels()
#create feature objects for individual attributes.
feats_test = features(x1.reshape(1,len(x1)))
feats_t0=array(dist_centres)
feats_train0 = features(feats_t0.reshape(1,len(feats_t0)))
feats_t1=array(lower_pop)
feats_train1 = features(feats_t1.reshape(1,len(feats_t1)))
#Regression with first attribute
kernel=sg.kernel("GaussianKernel", log_width=np.log(width))
krr=KernelRidgeRegression(tau, kernel, labels)
krr.train(feats_train0)
kernel.init(feats_train0, feats_test)
out0 = krr.apply().get_labels()
#Regression with second attribute
kernel=sg.kernel("GaussianKernel", log_width=np.log(width))
krr=KernelRidgeRegression(tau, kernel, labels)
krr.train(feats_train1)
kernel.init(feats_train1, feats_test)
out1 = krr.apply().get_labels()
#Visualization of regression
fig=figure(figsize(20,6))
#first plot with only one attribute
fig.add_subplot(131)
title("Regression with 1st attribute")
_=scatter(feats[0, :], labels.get_labels(), cmap=gray(), s=20)
_=xlabel('Weighted distances to employment centres ')
_=ylabel('Median value of homes')
_=plot(x1,out0, linewidth=3)
#second plot with only one attribute
fig.add_subplot(132)
title("Regression with 2nd attribute")
_=scatter(feats[1, :], labels.get_labels(), cmap=gray(), s=20)
_=xlabel('% lower status of the population')
_=ylabel('Median value of homes')
_=plot(x1,out1, linewidth=3)
#Both attributes and regression output
ax=fig.add_subplot(133, projection='3d')
z=out.reshape((size, size))
gray()
title("Regression")
ax.plot_wireframe(y, x, z, linewidths=2, alpha=0.4)
ax.set_xlabel('% lower status of the population')
ax.set_ylabel('Distances to employment centres ')
ax.set_zlabel('Median value of homes')
ax.view_init(25, 40)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In a general problem setting for the supervised learning approach, the goal is to learn a mapping from inputs $x_i\in\mathcal{X} $ to outputs $y_i \in \mathcal{Y}$, given a labeled set of input-output pairs $ \mathcal{D} = {(x_i,y_i)}^{\text N}{i=1} $$\subseteq \mathcal{X} \times \mathcal{Y}$. Here $ \mathcal{D}$ is called the training set, and $\text N$ is the number of training examples. In the simplest setting, each training input $x_i$ is a $\mathcal{D}$ -dimensional vector of numbers, representing, say, the height and weight of a person. These are called $\textbf {features}$, attributes or covariates. In general, however, $x_i$ could be a complex structured object, such as an image.<ul><li>When the response variable $y_i$ is categorical and discrete, $y_i \in$ {1,...,C} (say male or female) it is a classification problem.</li><li>When it is continuous (say the prices of houses) it is a regression problem.</li></ul>
Step2: This results in a LibSVMFile object which we will later use to access the data.
Step3: In numpy, this is a matrix of 2 row-vectors of dimension 768. However, in Shogun, this will be a matrix of 768 column vectors of dimension 2. This is beacuse each data sample is stored in a column-major fashion, meaning each column here corresponds to an individual sample and each row in it to an atribute like BMI, Glucose concentration etc. To convert the extracted matrix into Shogun format, RealFeatures are used which are nothing but the above mentioned Dense features of 64bit Float type. To do this call the factory method, features with the matrix (this should be a 64bit 2D numpy array) as the argument.
Step4: Some of the general methods you might find useful are
Step5: Assigning labels
Step6: The labels can be accessed using get_labels and the confidence vector using get_values. The total number of labels is available using get_num_labels.
Step7: Preprocessing data
Step8: Horizontal and vertical lines passing through zero are included to make the processing of data clear. Note that the now processed data has zero mean.
Step9: We will now apply on test features to get predictions. For visualising the classification boundary, the whole XY is used as test data, i.e. we predict the class on every point in the grid.
Step10: Let us have a look at the weight vector of the separating hyperplane. It should tell us about the linear relationship between the features. The decision boundary is now plotted by solving for $\bf{w}\cdot\bf{x}$ + $\text{b}=0$. Here $\text b$ is a bias term which allows the linear function to be offset from the origin of the used coordinate system. Methods get_w() and get_bias() are used to get the necessary values.
Step11: For this problem, a linear classifier does a reasonable job in distinguishing labelled data. An interpretation could be that individuals below a certain level of BMI and glucose are likely to have no Diabetes.
Step12: Let's see the accuracy by applying on test features.
Step13: To evaluate more efficiently cross-validation is used. As you might have wondered how are the parameters of the classifier selected? Shogun has a model selection framework to select the best parameters. More description of these things in this notebook.
Step14: The tool we will use here to perform regression is Kernel ridge regression. Kernel Ridge Regression is a non-parametric version of ridge regression where the kernel trick is used to solve a related linear ridge regression problem in a higher-dimensional space, whose results correspond to non-linear regression in the data-space. Again we train on the data and apply on the XY grid to get predicitions.
Step15: The out variable now contains a relationship between the attributes. Below is an attempt to establish such relationship between the attributes individually. Separate feature instances are created for each attribute. You could skip the code and have a look at the plots directly if you just want the essence.
|
14,011
|
<ASSISTANT_TASK:>
Python Code:
def solve(i , tight , sum_so_far , Sum , number , length ) :
if i == length :
if sum_so_far == Sum :
return 1
else :
return 0
ans = dp[i ][tight ][sum_so_far ]
if ans != - 1 :
return ans
ans = 0
for currdigit in range(0 , 10 ) :
currdigitstr = str(currdigit )
if not tight and currdigitstr > number[i ] :
break
ntight = tight or currdigitstr < number[i ]
nsum_so_far = sum_so_far + currdigit
ans += solve(i + 1 , ntight , nsum_so_far , Sum , number , length )
return ans
if __name__== "__main __":
count , Sum = 0 , 4
number = "100"
dp =[[[- 1 for i in range(162 ) ] for j in range(2 ) ] for k in range(18 ) ]
print(solve(0 , 0 , 0 , Sum , number , len(number ) ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,012
|
<ASSISTANT_TASK:>
Python Code:
## import Python libraries
import ipyrad as ip
%%bash
## this will take about XX minutes to run, sorry, the code is not parallelized
## we simulate 360 tips by using the default 12 taxon tree and requesting 40
## individuals per taxon. Default is theta=0.002. Crown age= 5*2Nu (check this)
simrrls -L 100000 -f pairddrad -l 150 -n 30 -o Big_i360_L100K
## because it takes a long time to simulate this data set, you can alternatively
## just download the data set that we already simulated using the code above.
## The data set is hosted on anaconda, just run the following to get it.
# conda download -c ipyrad bigData
data = ip.Assembly("bigHorsePaired")
data.set_params("project_dir", "bigdata")
data.set_params("raw_fastq_path", "bigHorsePaired_R*_.fastq.gz")
data.set_params("barcodes_path", "bigHorsePaired_barcodes.txt")
data.set_params("datatype", "pairddrad")
data.set_params("restriction_overhang", ("TGCAG", "CCGG"))
data.get_params()
data.run('1')
ls -l bigdata/bigHorsePaired_fastqs/
data.run("234567")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data set with ipyrad
Step2: Demultiplex the data files
Step3: The data files
Step4: Assemble the data set with ipyrad
|
14,013
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import sys
import numpy as np
import scipy as sp
import matplotlib as mpl
print('System: {}'.format(sys.version))
print('numpy version: {}'.format(np.__version__))
print('scipy version: {}'.format(sp.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
from numpy import linalg as LA
from scipy import signal
import matplotlib.pyplot as plt
%matplotlib inline
MM = np.asmatrix(np.diag([1., 2.]))
print(MM)
KK = np.asmatrix([[20., -10.],[-10., 10.]])
print(KK)
C1 = 0.1*MM+0.02*KK
print(C1)
A = np.bmat([[np.zeros_like(MM), np.identity(MM.shape[0])], [LA.solve(-MM,KK), LA.solve(-MM,C1)]])
print(A)
Bf = KK*np.asmatrix(np.ones((2, 1)))
B = np.bmat([[np.zeros_like(Bf)],[LA.solve(MM,Bf)]])
print(B)
Cd = np.matrix((1,0))
Cv = np.asmatrix(np.zeros((1,MM.shape[1])))
Ca = np.asmatrix(np.zeros((1,MM.shape[1])))
C = np.bmat([Cd-Ca*LA.solve(MM,KK),Cv-Ca*LA.solve(MM,C1)])
print(C)
D = Ca*LA.solve(MM,Bf)
print(D)
system = signal.lti(A, B, C, D)
w1, v1 = LA.eig(A)
ix = np.argsort(np.absolute(w1)) # order of ascending eigenvalues
w1 = w1[ix] # sorted eigenvalues
v1 = v1[:,ix] # sorted eigenvectors
zw = -w1.real # damping coefficient time angular frequency
wD = w1.imag # damped angular frequency
zn = 1./np.sqrt(1.+(wD/-zw)**2) # the minus sign is formally correct!
wn = zw/zn # undamped angular frequency
print('Angular frequency: {}'.format(wn[[0,2]]))
print('Damping coefficient: {}'.format(zn[[0,2]]))
w, H = system.freqresp()
fig, ax = plt.subplots(2, 1)
fig.suptitle('Real and imaginary plots')
# Real part plot
ax[0].plot(w, H.real, label='FRF')
ax[0].axvline(wn[0], color='k', label='First mode', linestyle='--')
ax[0].axvline(wn[2], color='k', label='Second mode', linestyle='--')
ax[0].set_ylabel('Real [-]')
ax[0].grid(True)
ax[0].legend()
# Imaginary part plot
ax[1].plot(w, H.imag, label='FRF')
ax[1].axvline(wn[0], color='k', label='First mode', linestyle='--')
ax[1].axvline(wn[2], color='k', label='Second mode', linestyle='--')
ax[1].set_ylabel('Imaginary [-]')
ax[1].set_xlabel('Frequency [rad/s]')
ax[1].grid(True)
ax[1].legend()
plt.show()
plt.figure()
plt.title('Nyquist plot')
plt.plot(H.real, H.imag, 'b')
plt.plot(H.real, -H.imag, 'r')
plt.xlabel('Real [-]')
plt.ylabel('Imaginary[-]')
plt.grid(True)
plt.axis('equal')
plt.show()
w, mag, phase = system.bode()
fig, ax = plt.subplots(2, 1)
fig.suptitle('Bode plot')
# Magnitude plot
ax[0].plot(w, mag, label='FRF')
ax[0].axvline(wn[0], color='k', label='First mode', linestyle='--')
ax[0].axvline(wn[2], color='k', label='Second mode', linestyle='--')
ax[0].set_ylabel('Magnitude [dB]')
ax[0].grid(True)
ax[0].legend()
# Phase plot
ax[1].plot(w, phase*np.pi/180., label='FRF')
ax[1].axvline(wn[0], color='k', label='First mode', linestyle='--')
ax[1].axvline(wn[2], color='k', label='Second mode', linestyle='--')
ax[1].set_ylabel('Phase [rad]')
ax[1].set_xlabel('Frequency [rad/s]')
ax[1].grid(True)
ax[1].legend()
plt.show()
plt.figure()
plt.title('Nichols plot')
plt.plot(phase*np.pi/180., mag)
plt.xlabel('Phase [rad/s]')
plt.ylabel('Magnitude [dB]')
plt.grid(True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will also need some specific modules and a litle "IPython magic" to show the plots
Step2: Back to top
Step3: For the LTI system we will use a state space formulation. For that we will need the four matrices describing the system (A), the input (B), the output (C) and the feedthrough (D)
Step4: The LTI system is simply defined as
Step5: To check the results presented ahead we will need the angular frequencies and damping coefficients of this system. The eigenanalysis of the system matrix yields them after some computations
Step6: Back to top
Step7: Back to top
Step8: Back to top
Step9: Back to top
|
14,014
|
<ASSISTANT_TASK:>
Python Code:
from ipyparallel import Client
cl = Client()
cl.ids
%%px --local
# run whole cell on all engines a well as in the local IPython session
import numpy as np
import sys
sys.path.insert(0, '/home/claudius/Downloads/dadi')
import dadi
%%px --local
# import 1D spectrum of ery on all engines:
fs_ery = dadi.Spectrum.from_file('ERY_modified.sfs')
# import 1D spectrum of par on all engines:
fs_par = dadi.Spectrum.from_file('PAR_modified.sfs')
%matplotlib inline
import pylab
pylab.rcParams['figure.figsize'] = [12, 10]
pylab.rcParams['font.size'] = 14
pylab.plot(fs_ery, 'ro--', label='ery', markersize=12)
pylab.plot(fs_par, 'g>--', label='par', markersize=12)
pylab.grid()
pylab.xlabel('minor allele count')
pylab.ylabel('')
pylab.legend()
pylab.title('1D spectra - Ludivics correction applied')
def run_dadi(p_init): # for the function to be called with map, it needs to have one input variable
p_init: initial parameter values to run optimisation from
if perturb == True:
p_init = dadi.Misc.perturb_params(p_init, fold=fold,
upper_bound=upper_bound, lower_bound=lower_bound)
# note upper_bound and lower_bound variables are expected to be in the namespace of each engine
# run optimisation of paramters
popt = dadi_opt_func(p0=p_init, data=sfs, model_func=func_ex, pts=pts_l, \
lower_bound=lower_bound, upper_bound=upper_bound, \
verbose=verbose, maxiter=maxiter, full_output=full_output, \
fixed_params=fixed_params)
# pickle to file
import dill
name = outname[:] # make copy of file name stub!
for p in p_init:
name += "_%.4f" % (p)
with open(name + ".dill", "w") as fh:
dill.dump((p_init, popt), fh)
return p_init, popt
from glob import glob
import dill
from utility_functions import *
import pandas as pd
lbview = cl.load_balanced_view()
from itertools import repeat
dadi.Demographics1D.growth?
%%px --local
func_ex = dadi.Numerics.make_extrap_log_func(dadi.Demographics1D.growth)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 4]
lower_bound = [1e-4, 0]
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_ery
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 300 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/expgrowth" # set file name stub for opt. result files
fixed_params = None
# set starting values for perturbation
p0 = [1, 1]
#ar_ery = lbview.map(run_dadi, repeat(p0, 10))
ar_ery.get()
# set starting values for perturbation
p0 = [0.1, 0.1]
#ar_ery = lbview.map(run_dadi, repeat(p0, 10))
# set starting values for perturbation
p0 = [10, 0.1]
#ar_ery1 = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/expgrowth*dill"):
ar_ery.append(dill.load(open(filename)))
get_flag_count(ar_ery, NM=True)
import pandas as pd
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 6] # increasing upper bound of time parameter
lower_bound = [1e-4, 0]
# set starting values for perturbation
p0 = [2, 1]
#ar_ery1 = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/expgrowth*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 8] # increasing upper bound of time parameter
lower_bound = [1e-4, 0]
# set starting values for perturbation
p0 = [2, 1]
ar_ery1 = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/expgrowth*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True).head(15)
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_par
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 300 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/PAR_expgrowth" # set file name stub for opt. result files
fixed_params = None
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 4]
lower_bound = [1e-4, 0]
# set starting values for perturbation
p0 = [1, 1]
ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_expgrowth*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True).head(15)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 8]
lower_bound = [1e-4, 0]
# set starting values for perturbation
p0 = [1, 1]
ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_expgrowth*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True).head(15)
dadi.Demographics1D.two_epoch?
%%px --local
func_ex = dadi.Numerics.make_extrap_log_func(dadi.Demographics1D.two_epoch)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 4]
lower_bound = [1e-4, 0]
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_ery
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/ERY_twoEpoch" # set file name stub for opt. result files
fixed_params = None
# set starting values for perturbation
p0 = [1, 1]
ar_ery = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/ERY_twoEpoch*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 8]
lower_bound = [1e-4, 0]
# set starting values for perturbation
p0 = [1, 1]
ar_ery = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/ERY_twoEpoch*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_par
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/PAR_twoEpoch" # set file name stub for opt. result files
fixed_params = None
p0 = [1, 1]
ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_twoEpoch*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nu1_0', 'T_0', 'nu1_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
dadi.Demographics1D.bottlegrowth?
%%px --local
func_ex = dadi.Numerics.make_extrap_log_func(dadi.Demographics1D.bottlegrowth)
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 1e4, 8]
lower_bound = [1e-4, 1e-4, 0]
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_ery
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/ERY_bottlegrowth" # set file name stub for opt. result files
fixed_params = None
# set starting values for perturbation
p0 = [1, 1, 1]
#ar_ery = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/ERY_bottlegrowth*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'T_0', 'nuB_opt', 'nuF_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
# set starting values for perturbation
p0 = [55, 1.3, 1.5]
#ar_ery = lbview.map(run_dadi, repeat(p0, 10))
ar_ery = []
for filename in glob("OUT_1D_models/ERY_bottlegrowth*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'T_0', 'nuB_opt', 'nuF_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
popt = np.array( df.sort_values(by='-logL', ascending=True).iloc[0, 3:6] )
popt
# calculate best-fit model spectrum
model_spectrum = func_ex(popt, ns, pts_l)
theta = dadi.Inference.optimal_sfs_scaling(model_spectrum, fs_ery)
mu = 3e-9
L = fs_ery.data.sum()
print "The optimal value of theta per site for the ancestral population is {0:.4f}.".format(theta/L)
Nref = theta/L/mu/4
Nref
print "At time {0:,} generations ago, the ERY population size instantaneously increased by almost 55-fold (to {1:,}).".format(int(popt[2]*2*Nref), int(popt[0]*Nref))
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_par
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/PAR_bottlegrowth" # set file name stub for opt. result files
fixed_params = None
%%px --local
# set lower and upper bounds to nu1 and T
upper_bound = [1e4, 1e4, 6]
lower_bound = [1e-4, 1e-4, 0]
# set starting values for perturbation
p0 = [1, 1, 1]
#ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_bottlegrowth*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'T_0', 'nuB_opt', 'nuF_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
cl[:]['maxiter'] = 100
# set starting values for perturbation
p0 = [100, 2, 1.2]
ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_bottlegrowth*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'T_0', 'nuB_opt', 'nuF_opt', 'T_opt', '-logL'])
df.sort_values(by='-logL', ascending=True).head(10)
popt = np.array( df.sort_values(by='-logL', ascending=True).iloc[0, 3:6] )
popt
# calculate best-fit model spectrum
model_spectrum = func_ex(popt, ns, pts_l)
theta = dadi.Inference.optimal_sfs_scaling(model_spectrum, fs_par)
mu = 3e-9
L = fs_par.data.sum()
print "The optimal value of theta per site for the ancestral population is {0:.4f}.".format(theta/L)
Nref = theta/L/mu/4
Nref
print "At time {0:,} generations ago, the PAR population size instantaneously increased by almost 124-fold (to {1:,}).".format(int(popt[2]*2*Nref), int(popt[0]*Nref))
dadi.Demographics1D.three_epoch?
%%px --local
func_ex = dadi.Numerics.make_extrap_log_func(dadi.Demographics1D.three_epoch)
%%px --local
# set lower and upper bounds to nuB, nuF, TB and TF
upper_bound = [1e4, 1e4, 6, 6]
lower_bound = [1e-4, 1e-4, 0, 0]
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_ery
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/ERY_threeEpoch" # set file name stub for opt. result files
fixed_params = None
# set starting values for perturbation
p0 = [10, 1, 1, 1]
#ar_ery = lbview.map(run_dadi, repeat(p0, 20))
ar_ery = []
for filename in glob("OUT_1D_models/ERY_threeEpoch*dill"):
ar_ery.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_ery]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'TB_0', 'TF_0', 'nuB_opt', 'nuF_opt', 'TB_opt', 'TF_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
popt = np.array( df.sort_values(by='-logL', ascending=True).iloc[0, 4:8] )
popt
# calculate best-fit model spectrum
model_spectrum = func_ex(popt, ns, pts_l)
theta = dadi.Inference.optimal_sfs_scaling(model_spectrum, fs_ery)
mu = 3e-9
L = fs_ery.data.sum()
print "The optimal value of theta per site for the ancestral population is {0:.4f}.".format(theta/L)
Nref = theta/L/mu/4
Nref
print "At time {0:,} generations ago, the ERY population size instantaneously increased by almost 10-fold (to {1:,}).".format(int((popt[2]+popt[3])*2*Nref), int(popt[0]*Nref)),
print "It then kept this population constant for {0:,} generations.".format(int(popt[2]*2*Nref)),
print "At time {0:,} generations in the past, the ERY population then decreased to 1.3 fold of the ancient population size or {1:,}.".format(int(popt[3]*2*Nref), int(popt[1]*Nref))
%%px --local
# set up global variables on engines required for run_dadi function call
ns = fs_ery.sample_sizes # both populations have the same sample size
# setting the smallest grid size slightly larger than the largest population sample size (36)
pts_l = [40, 50, 60]
dadi_opt_func = dadi.Inference.optimize_log_fmin # uses Nelder-Mead algorithm
sfs = fs_par
perturb = True
fold = 2 # perturb randomly up to `fold` times 2-fold
maxiter = 100 # run a maximum of 100 iterations
verbose = 0
full_output = True # need to have full output to get the warnflags (see below)
outname = "MODIFIED_SPECTRA/OUT_1D_models/PAR_threeEpoch" # set file name stub for opt. result files
fixed_params = None
# set starting values for perturbation
p0 = [100, 2, 1, 1]
#ar_par = lbview.map(run_dadi, repeat(p0, 20))
ar_par = []
for filename in glob("OUT_1D_models/PAR_threeEpoch*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'TB_0', 'TF_0', 'nuB_opt', 'nuF_opt', 'TB_opt', 'TF_opt', '-logL'])
df.sort_values(by='-logL', ascending=True)
%%px --local
fold = 1
maxiter = 300
# set starting values for perturbation
p0 = [20, 1e-2, 0.8, 1e-3]
ar_par = lbview.map(run_dadi, repeat(p0, 10))
ar_par = []
for filename in glob("OUT_1D_models/PAR_threeEpoch*dill"):
ar_par.append(dill.load(open(filename)))
l = 2*len(p0)+1
# show all parameter combinations
returned = [flatten(out)[:l] for out in ar_par]
df = pd.DataFrame(data=returned, \
columns=['nuB_0', 'nuF_0', 'TB_0', 'TF_0', 'nuB_opt', 'nuF_opt', 'TB_opt', 'TF_opt', '-logL'])
df.sort_values(by='-logL', ascending=True).head(20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table of Contents
Step2: Exponential growth
Step3: ERY
Step4: The time parameter is hitting the upper boundary that I set. The exponential growth model cannot be fit to the ery spectrum with Ludovics correction applied.
Step5: The time parameter is hitting the upper boundary that I set. The exponential growth model can therefore not be fit to the PAR spectrum with Ludovic's correction applied.
Step6: ERY
Step7: Still hitting upper boundary on time. The two epoch model cannot be fit to the ERY spectrum with Ludovic's correction applied.
Step8: All hitting the upper boundary on the time parameter. The two epoch model cannot be fit to the PAR spectrum with Ludovic's correction applied.
Step9: ERY
Step10: This looks like convergence.
Step11: I think such a high effective population size is not realistic.
Step12: This looks like convergence.
Step13: An effective population size of 36 million is obviously to high. I therefore cannot regard this model fitting as successful.
Step14: ERY
Step15: Reasonable convergence. Divergent parameter value combinations have the same likelihood. The optimal parameter values from the optimisation runs 16 and 10 (adjacent in the table) show that quite different demographic scenarios can have almost identical likelihood. This is not unusual.
Step16: PAR
Step17: There is no convergence. The three epoch model could not be fit to the PAR spectrum with Ludivic's correction.
|
14,015
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import tensorflow as tf
import numpy as np
from datetime import date
date.today()
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
tf.__version__
np.__version__
sess = tf.InteractiveSession()
x = tf.constant([[1, 0, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 0]], dtype=tf.int32)
sp = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
print(sp.eval())
print("dtype:", sp.dtype)
print("indices:", sp.indices.eval())
print("dense_shape:", sp.dense_shape.eval())
print("values:", sp.values.eval())
def dense_to_sparse(tensor):
indices = tf.where(tf.not_equal(tensor, 0))
return tf.SparseTensor(indices=indices,
values=tf.gather_nd(tensor, indices) - 1, # for zero-based index
dense_shape=tf.to_int64(tf.shape(tensor)))
# Test
print(dense_to_sparse(x).eval())
output = tf.sparse_to_dense(sparse_indices=[[0, 0], [1, 2]], sparse_values=[1, 2], output_shape=[3, 4])
print(output.eval())
print("Check if this is identical with x:\n", x.eval())
output = tf.sparse_tensor_to_dense(s)
print(output.eval())
print("Check if this is identical with x:\n", x.eval())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sparse Tensor Representation & Conversion
Step2: Q2. Investigate the dtype, indices, dense_shape and values of the SparseTensor sp in Q1.
Step3: Q3. Let's write a custom function that converts a SparseTensor to Tensor. Complete it.
Step4: Q4. Convert the SparseTensor sp to a Tensor using tf.sparse_to_dense.
Step5: Q5. Convert the SparseTensor sp to a Tensor using tf.sparse_tensor_to_dense.
|
14,016
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interact
ALPHA = 0.5
BETA = 0.7
TBAR = 100
LBAR = 100
def F(T,L,alpha=ALPHA):
return (T**alpha)*(L**(1-alpha))
def FL(T,L,alpha=ALPHA):
Shadow price of labor
return (1-alpha)*F(T,L,alpha=ALPHA)/L
def FT(T,L,alpha=ALPHA):
Shadow price of labor
return alpha*F(T,L,alpha=ALPHA)/T
def U(c, l, beta=BETA):
return (c**beta)*(l**(1-beta))
def indif(l, ubar, beta=BETA):
return ( ubar/(l**(1-beta)) )**(1/beta)
def leisure(Lbar,alpha=ALPHA, beta=BETA):
a = (1-alpha)*beta/(1-beta)
return Lbar/(1+a)
def HH(Tbar,Lbar,alpha=ALPHA, beta=BETA):
Household optimum leisure, consumption and utility
a = (1-alpha)*beta/(1-beta)
leisure = Lbar/(1+a)
output = F(Tbar,Lbar-leisure, alpha)
utility = U(output, leisure, beta)
return leisure, output, utility
def chayanov(Tbar,Lbar,alpha=ALPHA, beta=BETA):
leis = np.linspace(0.1,Lbar,num=100)
q = F(Tbar,Lbar-leis,alpha)
l_opt, Q, U = HH(Tbar, Lbar, alpha, beta)
print("Leisure, Consumption, Utility =({:5.2f},{:5.2f},{:5.2f})"
.format(l_opt, Q, U))
print("shadow price labor:{:5.2f}".format(FL(Tbar,Lbar-l_opt,beta)))
c = indif(leis,U,beta)
fig, ax = plt.subplots(figsize=(8,8))
ax.plot(leis, q, lw=2.5)
ax.plot(leis, c, lw=2.5)
ax.plot(l_opt,Q,'ob')
ax.vlines(l_opt,0,Q, linestyles="dashed")
ax.hlines(Q,0,l_opt, linestyles="dashed")
ax.set_xlim(0, 110)
ax.set_ylim(0, 150)
ax.set_xlabel(r'$l - leisure$', fontsize=16)
ax.set_ylabel('$c - consumption$', fontsize=16)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.grid()
ax.set_title("Chayanovian Household Optimum")
plt.show()
chayanov(TBAR,LBAR,0.5,0.5)
Tb = np.linspace(1,LBAR)
le, q, _ = HH(Tb,LBAR)
fig, ax1 = plt.subplots(figsize=(6,6))
ax1.plot(q/Tb,label='output per unit land')
ax1.set_title("Chayanov -- Inverse Farm Size Productivity")
ax1.set_xlabel('Farm Size '+r'$\bar T$')
ax1.set_ylabel('Output per unit land')
ax1.grid()
ax2 = ax1.twinx()
ax2.plot(FL(Tb,LBAR-le),'k--',label='shadow price labor')
ax2.set_ylabel('Shadow Price of Labor')
legend = ax1.legend(loc='upper left', shadow=True)
legend = ax2.legend(loc='upper right', shadow=True)
plt.show()
interact(chayanov,Tbar=(50,200,1),Lbar=(24,100,1),alpha=(0.1,0.9,0.1),beta=(0.1,0.9,0.1))
def farm_optimum(Tbar, w, alpha=ALPHA, beta=BETA):
returns optimal labor demand and profits
LD = Tbar * ((1-alpha)/w)**(1/alpha)
profit = F(Tbar, LD) - w*LD
return LD, profit
def HH_optimum(Tbar, Lbar, w, alpha=ALPHA, beta=BETA):
returns optimal consumption, leisure and utility.
Simple Cobb-Douglas choices from calculated income
_, profits = farm_optimum(Tbar, w, alpha)
income = profits + w*Lbar
consumption = beta * income
leisure = (1-beta) * income/w
utility = U(consumption, leisure, beta)
return consumption, leisure, utility
W = 1
def plot_production(Tbar,Lbar,w):
lei = np.linspace(1, Lbar)
q = F(Tbar, Lbar-lei)
ax.plot(lei, q, lw=2.5)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
def plot_farmconsumption(Tbar, Lbar, w, alpha=ALPHA, beta=BETA):
lei = np.linspace(1, Lbar)
LD, profits = farm_optimum(Tbar, w)
q_opt = F(Tbar,LD)
yline = profits + w*Lbar - w*lei
c_opt, l_opt, u_opt = HH_optimum(Tbar, Lbar, w)
ax.plot(Lbar-LD,q_opt,'ob')
ax.plot(lei, yline)
ax.plot(lei, indif(lei,u_opt, beta),'k')
ax.plot(l_opt, c_opt,'ob')
ax.vlines(l_opt,0,c_opt, linestyles="dashed")
ax.hlines(c_opt,0,l_opt, linestyles="dashed")
ax.vlines(Lbar - LD,0,q_opt, linestyles="dashed")
ax.hlines(profits,0,Lbar, linestyles="dashed")
ax.vlines(Lbar,0,F(Tbar,Lbar))
ax.hlines(q_opt,0,Lbar, linestyles="dashed")
ax.text(Lbar+1,profits,r'$\Pi ^*$',fontsize=16)
ax.text(Lbar+1,q_opt,r'$F(\bar T, L^{*})$',fontsize=16)
ax.text(-6,c_opt,r'$c^*$',fontsize=16)
ax.annotate('',(Lbar-LD,2),(Lbar,2),arrowprops={'arrowstyle':'->'})
ax.text((2*Lbar-LD)/2,3,r'$L^{*}$',fontsize=16)
ax.text(l_opt/2,8,'$l^*$',fontsize=16)
ax.annotate('',(0,7),(l_opt,7),arrowprops={'arrowstyle':'<-'})
fig, ax = plt.subplots(figsize=(10,8))
plot_production(TBAR,LBAR,W)
plot_farmconsumption(TBAR, LBAR, W)
ax.set_title("The Separable Household Model")
ax.set_xlim(0,LBAR+20)
ax.set_ylim(0,F(TBAR,LBAR)+20)
plt.show()
fig, ax = plt.subplots(figsize=(10,8))
plot_production(TBAR,LBAR, W*0.55)
plot_farmconsumption(TBAR, LBAR, W*0.55)
ax.set_title("The Separable Household Model")
ax.set_xlim(0,LBAR+20)
ax.set_ylim(0,F(TBAR,LBAR)+20)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Farm Household Models
Step4: Inverse farm size productivity relationship
Step5: If you are running this notebook in interactive mode you can play with the sliders
Step8: The Separable Farm Household
Step9: Let's assume the market real wage starts at unity
Step10: Here is an example of a household that works and their own farm and sells labor to the market
Step11: Here is the same household when facing a much lower wage (55% of the wage above). They will want to expand output on their own farm and cut back on labor sold to the market (expand leisure). This household will on net hire workers to operate their farm.
|
14,017
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame.from_dict(results)
# Transpose
df = df.T
# NaN -> 0
df = df.fillna(0)
df['pass_rate'] = df['success'] / (df['failed'] + df['success'])
# sort by failed
df = df.sort_values(by=["pass_rate"])
df
import requests
cache = {}
build_info_cache = {}
build_map = {}
def parse_build(build):
tests = []
failed = set()
passed = set()
skipped = set()
try:
output_url = build["steps"][5]["actions"][0]["output_url"]
except Exception as e:
print(f"Got error: {e}, continuing...")
return set(), set(), set()
if output_url in cache:
lines = cache[output_url]
else:
lines = requests.get(output_url).json()
cache[output_url] = lines
for line in lines[0]["message"].split("\n"):
#print(line)
if "RUN" in line:
parts = line.split("RUN")
if len(parts) != 2:
continue
preamble, test = parts
test = test.strip()
# ignore docker RUN lines which contain e.g. "#10"
if "#" in preamble or "Step" in preamble:
continue
if len(test) > 100:
# some base64 gunk
continue
if "AcceptEnv" in test or "cd pachyderm" in test \
or "git clone" in test or "NING" in test or "_BAD_TESTS" in test:
continue
tests.append(test)
if "FAIL" in line:
test = line.split("FAIL")[1].replace(":", "").strip().split("(")[0].strip()
if "github.com" in test or test == "":
# filter out some more noise
continue
if len(test) > 100:
# some base64 gunk
continue
failed.add(test)
if "PASS" in line:
test = line.split("PASS")[1].replace(":", "").strip().split("(")[0].strip()
if test == "":
# This happens when all the tests pass, we get a "PASS" on its own.
continue
if len(test) > 100:
# some base64 gunk
continue
if "\\n" in test:
continue
passed.add(test)
if "SKIP" in line:
test = line.split("SKIP")[1].replace(":", "").strip().split("(")[0].strip()
if test == "":
# This happens when all the tests pass, we get a "PASS" on its own.
continue
if len(test) > 100:
# some base64 gunk
continue
if "\\n" in test:
continue
skipped.add(test)
all_tests = set(tests)
for test in all_tests:
if test not in build_map:
build_map[test] = build["workflows"]["job_name"], \
f"<a target='_blank' href='{build['build_url']}'>{build['build_num']}</a>"
hung = all_tests - failed - passed - skipped
assert all_tests == (failed | passed | hung | skipped), \
f"all={all_tests}, failed={failed}, passed={passed}, hung={hung}, skipped={skipped}"
return passed, failed, hung
build_results = defaultdict(lambda: defaultdict(int))
for build in builds:
print(".", end="")
if build["build_num"] in build_info_cache:
build_info = build_info_cache[build["build_num"]]
else:
build_info = circleci.get_build_info("pachyderm", "pachyderm", build["build_num"])
build_info_cache[build["build_num"]] = build_info
passed, failed, hung = parse_build(build_info)
for b in passed:
build_results[b]["pass"] += 1
build_results[b]["bucket"] = build_map[b][0]
for b in failed:
build_results[b]["fail"] += 1
build_results[b]["bucket"] = build_map[b][0]
build_results[b]["example"] = build_map[b][1]
for b in hung:
build_results[b]["hung"] += 1
build_results[b]["bucket"] = build_map[b][0]
build_results[b]["example"] = build_map[b][1]
import pandas as pd
df = pd.DataFrame.from_dict(build_results)
# Transpose
df = df.T
# NaN -> 0
df = df.fillna(0)
df['hang_rate'] = df['hung'] / (df['pass'] + df['fail'] + df['hung'])
df['fail_rate'] = df['fail'] / (df['pass'] + df['fail'] + df['hung'])
df['hang_or_fail'] = df['hang_rate'] + df['fail_rate']
hangy = df[df["hang_rate"] > 0]
faily = df[df["fail_rate"] > 0]
# sort by failed
hangy = hangy.sort_values(by=["hang_rate"], ascending=False)
faily = hangy.sort_values(by=["fail_rate"], ascending=False)
bad = df[df["hang_or_fail"] > 0]
bad = bad.sort_values(by=["hang_or_fail"], ascending=False)
pandas.set_option('display.max_rows', None)
from IPython.display import HTML
HTML(bad.to_html(escape=False))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Finding flakiest individual tests
|
14,018
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np #importing numpy
import pandas as pd #importing pandas
from bs4 import BeautifulSoup #importing Beautiful Soup
import requests
import html5lib #importing html5lib, as per Pandas read_html request
import re
path_to_bday_frequencies = '/Users/alexfreedman/Desktop/Stern/Data_Bootcamp/bday_frequencies 2.csv'
bday_densities = pd.read_csv(path_to_bday_frequencies)
bday_densities.sort_values('month_num')
ceo = requests.get("http://en.wikipedia.org/wiki/List_of_chief_executive_officers") #use the
# requests package 'get' function to pull the html of the wikipedia page of CEO's
ceo_soup = BeautifulSoup(ceo.content)
ceo_table = ceo_soup.find('table', attrs={'class':'wikitable sortable'})
#wikitable sortable is the beginning of the table in the html (viewed with view source of chrome)
dfceo = pd.read_html(str(ceo_table),skiprows=0,encoding="utf-8")[0]
#the pandas read_html function reads the ceo_table html and turns it into a dataframe
dfceo.columns = dfceo.iloc[0] #make the first row the columns of the df
dfceo = dfceo.reindex(dfceo.index.drop(0)) #reindex the df to start at line 1
def find_link(s, prestr = 'https://wikipedia.org'): #grab each wiki link from wiki table rows
return [prestr + ceo_soup.find('a', text=elem)['href'] for elem in s]
def remove_footnote(s): #use regex to remove footnotes (from links, but can be used elsewhere)
return [re.sub('\[.*?\]','',elem) for elem in s]
def find_birthday(wl): #span with class=bday
#relevancy is determined from Chrome's View Source
bdays = [BeautifulSoup(requests.get(elem).content)
.find('span', attrs={'class':'bday'}) for elem in wl]
bday_stage = ["" if elem is None else elem.text for elem in bdays]
bday_format = pd.to_datetime(bday_stage, format='%Y-%m-%d', errors='coerce') #format
#the string birthday's into pandas datetime objects. errors='coerce' is required
#to not throw errors when trying to read empty ("") birthdays, and instead convert to NaN
return bday_format
dfceo_final = (dfceo.assign(wiki_link = lambda x: find_link(s=remove_footnote(x.Executive)))
.assign(birth_date = lambda x: find_birthday(x.wiki_link))
.assign(birth_year = lambda x: x.birth_date.dt.year,
birth_quarter = lambda x: x.birth_date.dt.quarter,
birth_month = lambda x: x.birth_date.dt.month,
birth_day = lambda x: x.birth_date.dt.day))
dfceo_final
%matplotlib inline
from ggplot import *
import seaborn as sns
import matplotlib.pyplot as plt
dfceo_final.columns
plot1_cols = ['month_num', 'freq']
df_plot1 = (bday_densities[plot1_cols]
.dropna())
plot1_breaks = (df_plot1
.month_num
.unique()
.astype(int)
.tolist())
ggplot(aes(x='month_num', y='freq'),
data=df_plot1) +\
geom_histogram(stat='identity',
fill="blue",
alpha=.8) +\
xlab('Birthday') + ylab('Count') + ggtitle('Histogram of USA Birthdays By Month')
plot2_cols = ['birth_quarter']
df_plot2 = (dfceo_final[plot2_cols]
.dropna())
plot2_breaks = [1,2,3,4]
plot2_labels = ['Q1','Q2','Q3','Q4']
ggplot(aes(x='birth_quarter'),
data=df_plot2) +\
geom_density(colour="darkblue",
size=2,
fill="purple",
alpha=.2) +\
scale_x_continuous(breaks = plot2_breaks,
labels = plot2_labels) +\
xlab('Birthday') + ylab('Density') + ggtitle('Density Plot of CEO Birthday Quarters')
plot3_cols = ['birth_quarter', 'birth_year']
df_plot3 = (dfceo_final[plot3_cols]
.dropna())
ax = sns.violinplot(x="birth_quarter", y="birth_year", data=df_plot3)
ax.set(xlabel='Birth Quarter', ylabel='Birth Year')
ax.set(xticklabels=['Q1','Q2','Q3','Q4'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following data was gathered from http
Step2: Using the help of my data scientist friend I was able to scrape a wikipedia table of prominent CEO's who will serve as my dataset for this project. The process underwent is as follows
Step3: The next three functions will do the heavy lifting associated with
Step4: Using the functions defined above, create new (final) df called dfceo_final. find_birthday() converts the string birthday elements into pandas datetime objects in birth_date. Once they are formatted as such, creating birth_year, birth_quarter, birth_month, and birth_day variables are straightforward.
Step5: Per the advice of my data scientist friend, I used the ggplot package to plot the first two graphs instead of matplotlib. I reviewed the following websites to get to know ggplot
Step6: The below graph shows that there is a relatively uniform distribution of births throughout a given year. As shown in the graph, if anything, there would be a slight liklihood for births occuring in the third quarter of a given year. This data was included to show that there is not a natural propensity for first quarter births.
Step7: Below is a density plot for CEO birthdays by quarter using the scraped data from the wikipedia table. It is interesting to note that this dataset shows a disproportionate number of Q1 and Q2 births compared to the population plotted above. Further, Q3, which had the most births above, has the least births in the CEO dataset.
Step8: The violin plot below shows the distribution of CEO birth years across the quarters in which those CEO's were born. Q4 CEO's appear to be slightly younger, on average. Q2 has outliers, both young and old, because Q2 births make up the majority of the dataset.
|
14,019
|
<ASSISTANT_TASK:>
Python Code:
PATH_NEWS_ARTICLES="/home/phoenix/Documents/HandsOn/Final/news_articles.csv"
ARTICLES_READ=[4,5,7,8]
NUM_RECOMMENDED_ARTICLES=5
ALPHA = 0.5
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import re
from nltk.stem.snowball import SnowballStemmer
import nltk
import numpy
from nltk import word_tokenize, pos_tag, ne_chunk
from nltk.chunk import tree2conlltags
stemmer = SnowballStemmer("english")
news_articles = pd.read_csv(PATH_NEWS_ARTICLES)
news_articles.head()
#Select relevant columns and remove rows with missing values
news_articles = news_articles[['Article_Id','Title','Content']].dropna()
articles = news_articles['Content'].tolist()
articles[0] #an uncleaned article
def clean_tokenize(document):
document = re.sub('[^\w_\s-]', ' ',document) #remove punctuation marks and other symbols
tokens = nltk.word_tokenize(document) #Tokenize sentences
cleaned_article = ' '.join([stemmer.stem(item) for item in tokens]) #Stemming each token
return cleaned_article
cleaned_articles = map(clean_tokenize, articles)
cleaned_articles[0] #a cleaned, tokenized and stemmed article
#Generate tfidf matrix model
tfidf_matrix = TfidfVectorizer(stop_words='english', min_df=2)
articles_tfidf_matrix = tfidf_matrix.fit_transform(cleaned_articles)
articles_tfidf_matrix #tfidf vector of an article
def get_ner(article):
ne_tree = ne_chunk(pos_tag(word_tokenize(article)))
iob_tagged = tree2conlltags(ne_tree)
ner_token = ' '.join([token for token,pos,ner_tag in iob_tagged if not ner_tag==u'O']) #Discarding tokens with 'Other' tag
return ner_token
#Represent user in terms of cleaned content of read articles
user_articles = ' '.join(cleaned_articles[i] for i in ARTICLES_READ)
print "User Article =>", user_articles
print '\n'
#Represent user in terms of NERs assciated with read articles
user_articles_ner = ' '.join([get_ner(articles[i]) for i in ARTICLES_READ])
print "NERs of Read Article =>", user_articles_ner
#Get vector representation for both of the user read article representation
user_articles_tfidf_vector = tfidf_matrix.transform([user_articles])
user_articles_ner_tfidf_vector = tfidf_matrix.transform([user_articles_ner])
user_articles_tfidf_vector
# User_Vector => (Alpha) [TF-IDF Vector] + (1-Alpha) [NER Vector]
alpha_tfidf_vector = ALPHA * user_articles_tfidf_vector
alpha_ner_vector = (1-ALPHA) * user_articles_ner_tfidf_vector
user_vector = np.sum(zip(alpha_tfidf_vector,alpha_ner_vector))
user_vector
user_vector.toarray()
def calculate_cosine_similarity(articles_tfidf_matrix, user_vector):
articles_similarity_score=cosine_similarity(articles_tfidf_matrix, user_vector.toarray())
recommended_articles_id = articles_similarity_score.flatten().argsort()[::-1]
#Remove read articles from recommendations
final_recommended_articles_id = [article_id for article_id in recommended_articles_id
if article_id not in ARTICLES_READ ][:NUM_RECOMMENDED_ARTICLES]
return final_recommended_articles_id
recommended_articles_id = calculate_cosine_similarity(articles_tfidf_matrix, user_vector)
recommended_articles_id
#Recommended Articles and their title
#df_news = pd.read_csv(PATH_NEWS_ARTICLES)
print 'Articles Read'
print news_articles.loc[news_articles['Article_Id'].isin(ARTICLES_READ)]['Title']
print '\n'
print 'Recommender '
print news_articles.loc[news_articles['Article_Id'].isin(recommended_articles_id)]['Title']
ALPHA = 0
alpha_tfidf_vector = ALPHA *user_articles_tfidf_vector # ==> 0
alpha_ner_vector = (1-ALPHA) * user_articles_ner_tfidf_vector
user_vector = np.sum(zip(alpha_tfidf_vector,alpha_ner_vector))
user_vector
user_vector.toarray()
recommended_articles_id = calculate_cosine_similarity(articles_tfidf_matrix, user_vector)
recommended_articles_id
#Recommended Articles and their title
#df_news = pd.read_csv(PATH_NEWS_ARTICLES)
print 'Articles Read'
print news_articles.loc[news_articles['Article_Id'].isin(ARTICLES_READ)]['Title']
print '\n'
print 'Recommender '
print news_articles.loc[news_articles['Article_Id'].isin(recommended_articles_id)]['Title']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Represent articles in terms TF-IDF Matrix
Step2: 2. Represent user in terms of articles read
Step3: 3. Calculate cosine similarity between user vector and articles TF-IDF matrix
Step4: 4. Get the recommended articles
Step5: 5. Case when Alpha=0
|
14,020
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
corpus = [
'We are looking for Java developer',
'Frontend developer with knowledge in SQL and Jscript',
'And this is the third one.',
'Is this the first document?',
]
vectorizer = CountVectorizer(stop_words="english", binary=True, lowercase=False,
vocabulary=['Jscript', '.Net', 'TypeScript', 'SQL', 'NodeJS', 'Angular', 'Mongo',
'CSS',
'Python', 'PHP', 'Photoshop', 'Oracle', 'Linux', 'C++', "Java", 'TeamCity',
'Frontend', 'Backend', 'Full stack', 'UI Design', 'Web', 'Integration',
'Database design', 'UX'])
X = vectorizer.fit_transform(corpus).toarray()
feature_names = vectorizer.get_feature_names_out()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,021
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
from thinkbayes2 import Pmf, Cdf, Suite, Beta
import thinkplot
def Odds(p):
return p / (1-p)
def Probability(o):
return o / (o+1)
p = 0.2
Odds(p)
o = 1/5
Probability(o)
prior_odds = 1
likelihood_ratio = 0.75 / 0.5
post_odds = prior_odds * likelihood_ratio
post_odds
post_prob = Probability(post_odds)
post_prob
likelihood_ratio = 0.25 / 0.5
post_odds *= likelihood_ratio
post_odds
post_prob = Probability(post_odds)
post_prob
like1 = 0.01
like2 = 2 * 0.6 * 0.01
likelihood_ratio = like1 / like2
likelihood_ratio
post_odds = 1 * like1 / like2
Probability(post_odds)
# Solution
post_odds = Odds(0.9) * like1 / like2
Probability(post_odds)
# Solution
post_odds = Odds(0.1) * like1 / like2
Probability(post_odds)
rhode = Beta(1, 1, label='Rhode')
rhode.Update((22, 11))
wei = Beta(1, 1, label='Wei')
wei.Update((21, 12))
thinkplot.Pdf(rhode.MakePmf())
thinkplot.Pdf(wei.MakePmf())
thinkplot.Config(xlabel='x', ylabel='Probability')
iters = 1000
count = 0
for _ in range(iters):
x1 = rhode.Random()
x2 = wei.Random()
if x1 > x2:
count += 1
count / iters
rhode_sample = rhode.Sample(iters)
wei_sample = wei.Sample(iters)
np.mean(rhode_sample > wei_sample)
def ProbGreater(pmf1, pmf2):
total = 0
for x1, prob1 in pmf1.Items():
for x2, prob2 in pmf2.Items():
if x1 > x2:
total += prob1 * prob2
return total
pmf1 = rhode.MakePmf(1001)
pmf2 = wei.MakePmf(1001)
ProbGreater(pmf1, pmf2)
pmf1.ProbGreater(pmf2)
pmf1.ProbLess(pmf2)
import random
def flip(p):
return random.random() < p
iters = 1000
wins = 0
losses = 0
for _ in range(iters):
x1 = rhode.Random()
x2 = wei.Random()
count1 = count2 = 0
for _ in range(25):
if flip(x1):
count1 += 1
if flip(x2):
count2 += 1
if count1 > count2:
wins += 1
if count1 < count2:
losses += 1
wins/iters, losses/iters
rhode_rematch = np.random.binomial(25, rhode_sample)
thinkplot.Hist(Pmf(rhode_rematch))
wei_rematch = np.random.binomial(25, wei_sample)
np.mean(rhode_rematch > wei_rematch)
np.mean(rhode_rematch < wei_rematch)
from thinkbayes2 import MakeBinomialPmf
def MakeBinomialMix(pmf, label=''):
mix = Pmf(label=label)
for x, prob in pmf.Items():
binom = MakeBinomialPmf(n=25, p=x)
for k, p in binom.Items():
mix[k] += prob * p
return mix
rhode_rematch = MakeBinomialMix(rhode.MakePmf(), label='Rhode')
wei_rematch = MakeBinomialMix(wei.MakePmf(), label='Wei')
thinkplot.Pdf(rhode_rematch)
thinkplot.Pdf(wei_rematch)
thinkplot.Config(xlabel='hits')
rhode_rematch.ProbGreater(wei_rematch), rhode_rematch.ProbLess(wei_rematch)
from thinkbayes2 import MakeMixture
def MakeBinomialMix2(pmf):
binomials = Pmf()
for x, prob in pmf.Items():
binom = MakeBinomialPmf(n=25, p=x)
binomials[binom] = prob
return MakeMixture(binomials)
rhode_rematch = MakeBinomialMix2(rhode.MakePmf())
wei_rematch = MakeBinomialMix2(wei.MakePmf())
rhode_rematch.ProbGreater(wei_rematch), rhode_rematch.ProbLess(wei_rematch)
iters = 1000
pmf = Pmf()
for _ in range(iters):
k = rhode_rematch.Random() + wei_rematch.Random()
pmf[k] += 1
pmf.Normalize()
thinkplot.Hist(pmf)
ks = rhode_rematch.Sample(iters) + wei_rematch.Sample(iters)
pmf = Pmf(ks)
thinkplot.Hist(pmf)
def AddPmfs(pmf1, pmf2):
pmf = Pmf()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
pmf[v1 + v2] += p1 * p2
return pmf
pmf = AddPmfs(rhode_rematch, wei_rematch)
thinkplot.Pdf(pmf)
pmf = rhode_rematch + wei_rematch
thinkplot.Pdf(pmf)
# Solution
pmf = rhode_rematch - wei_rematch
thinkplot.Pdf(pmf)
# Solution
# On average, we expect Rhode to win by about 1 clay.
pmf.Mean(), pmf.Median(), pmf.Mode()
# Solution
# But there is, according to this model, a 2% chance that she could win by 10.
sum([p for (x, p) in pmf.Items() if x >= 10])
iters = 1000
pmf = Pmf()
for _ in range(iters):
ks = rhode_rematch.Sample(6)
pmf[max(ks)] += 1
pmf.Normalize()
thinkplot.Hist(pmf)
iters = 1000
ks = rhode_rematch.Sample((6, iters))
ks
maxes = np.max(ks, axis=0)
maxes[:10]
pmf = Pmf(maxes)
thinkplot.Hist(pmf)
pmf = rhode_rematch.Max(6).MakePmf()
thinkplot.Hist(pmf)
def Min(pmf, k):
cdf = pmf.MakeCdf()
cdf.ps = 1 - (1-cdf.ps)**k
return cdf
pmf = Min(rhode_rematch, 6).MakePmf()
thinkplot.Hist(pmf)
# Solution
n_allergic = 4
n_non = 6
p_allergic = 0.5
p_non = 0.1
pmf = MakeBinomialPmf(n_allergic, p_allergic) + MakeBinomialPmf(n_non, p_non)
thinkplot.Hist(pmf)
# Solution
pmf.Mean()
# Solution
# Here's a class that models the study
class Gluten(Suite):
def Likelihood(self, data, hypo):
Computes the probability of the data under the hypothesis.
data: tuple of (number who identified, number who did not)
hypothesis: number of participants who are gluten sensitive
# compute the number who are gluten sensitive, `gs`, and
# the number who are not, `ngs`
gs = hypo
yes, no = data
n = yes + no
ngs = n - gs
pmf1 = MakeBinomialPmf(gs, 0.95)
pmf2 = MakeBinomialPmf(ngs, 0.4)
pmf = pmf1 + pmf2
return pmf[yes]
# Solution
prior = Gluten(range(0, 35+1))
thinkplot.Pdf(prior)
# Solution
posterior = prior.Copy()
data = 12, 23
posterior.Update(data)
# Solution
thinkplot.Pdf(posterior)
thinkplot.Config(xlabel='# who are gluten sensitive',
ylabel='PMF', legend=False)
# Solution
posterior.CredibleInterval(95)
# Solution
# Solution
# Solution
# Solution
# Solution
# Solution
# Solution
# Solution
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Odds
Step2: And this function converts from odds to probabilities.
Step3: If 20% of bettors think my horse will win, that corresponds to odds of 1
Step4: If the odds against my horse are 1
Step5: We can use the odds form of Bayes's theorem to solve the cookie problem
Step6: And then we can compute the posterior probability, if desired.
Step7: If we draw another cookie and it's chocolate, we can do another update
Step8: And convert back to probability.
Step9: Oliver's blood
Step10: Since the ratio is less than 1, it is evidence against the hypothesis that Oliver left blood at the scence.
Step11: So this evidence doesn't "move the needle" very much.
Step12: Comparing distributions
Step13: Based on the data, the distribution for Rhode is slightly farther right than the distribution for Wei, but there is a lot of overlap.
Step14: To compute the probability that Rhode actually has a higher value of p, there are two options
Step15: Beta also provides Sample, which returns a NumPy array, so we an perform the comparisons using array operations
Step16: The other option is to make Pmf objects that approximate the Beta distributions, and enumerate pairs of values
Step17: Exercise
Step18: flip returns True with probability p and False with probability 1-p
Step19: Or, realizing that the distribution of k is binomial, we can simplify the code using NumPy
Step20: Alternatively, we can make a mixture that represents the distribution of k, taking into account our uncertainty about x
Step21: Alternatively, we could use MakeMixture
Step22: Here's how we use it.
Step23: Exercise
Step24: Or we could use Sample and NumPy
Step25: Alternatively, we could compute the distribution of the sum by enumeration
Step26: Here's how it's used
Step27: The Pmf class provides a + operator that does the same thing.
Step28: Exercise
Step29: Distribution of maximum
Step30: And here's a version using NumPy. I'll generate an array with 6 rows and 10 columns
Step31: Compute the maximum in each column
Step32: And then plot the distribution of maximums
Step33: Or we can figure it out analytically. If the maximum is less-than-or-equal-to some value k, all 6 random selections must be less-than-or-equal-to k, so
Step34: Exercise
Step36: Exercises
Step38: Exercise This study from 2015 showed that many subjects diagnosed with non-celiac gluten sensitivity (NCGS) were not able to distinguish gluten flour from non-gluten flour in a blind challenge.
Step39: Exercise Coming soon
|
14,022
|
<ASSISTANT_TASK:>
Python Code:
# some code in python
def f(x):
y = x * x
return y
import IPython
print("Hello world!")
2*2
def decorator(f):
return f
@decorator
def f(x):
pass
3*3
print(4*4)
%%bash
echo 'hello'
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.imshow(np.random.rand(5,5,4), interpolation='none');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Random code
Step2: some text
Step3: An image
|
14,023
|
<ASSISTANT_TASK:>
Python Code:
# imports
import networkx as nx
%matplotlib inline
import matplotlib.pyplot as plt
params = {'legend.fontsize':'small',
'figure.figsize': (7,7),
'axes.labelsize': 'small',
'axes.titlesize': 'small',
'xtick.labelsize':'small',
'ytick.labelsize':'small'}
plt.rcParams.update(params)
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
def unique_node_aggregation_perts(gb,pddf):
# print(r'* Unique edges added at each time stamp')
acnd= []
uniq_v= {}
for k,v in gb.groups.items():
nodes = pddf[['u','v']].loc[v].values[0]
newvcnt=0
for x in nodes:
if not(x in acnd):
[acnd.append(x) for x in nodes if not(x in acnd)]
newvcnt +=1
uniq_v[k] = newvcnt
df = pd.DataFrame.from_dict(uniq_v.items())
df.sort_values(by=[0],inplace=True)
return df
G = nx.Graph()
G.add_edge(0, 1,attr_dict={'ts':1})
G.add_edge(0, 4,attr_dict={'ts':38})
G.add_edge(0, 6,attr_dict={'ts':32})
G.add_edge(1, 2,attr_dict={'ts':2})
G.add_edge(1, 3,attr_dict={'ts':12})
G.add_edge(2, 3,attr_dict={'ts':14})
G.add_edge(2, 5,attr_dict={'ts':27})
G.add_edge(3, 4,attr_dict={'ts':11})
G.add_edge(3, 5,attr_dict={'ts':24})
G.add_edge(4, 6,attr_dict={'ts':40})
t = [d.values()[0] for u,v,d in G.edges(data=True)]
g_edges = [[d.values()[0],u,v] for u,v,d in G.edges(data=True)]
df = pd.DataFrame(g_edges, columns=['ts','u','v']) # u= source, v= target
gb = df.groupby(['ts'])
print(r'* Unique edges added at each time stamp')
acnd= []
uniq_v= {}
for k,v in gb.groups.items():
nodes = df[['u','v']].loc[v].values[0]
newvcnt=0
for x in nodes:
if not(x in acnd):
[acnd.append(x) for x in nodes if not(x in acnd)]
newvcnt +=1
uniq_v[k] = newvcnt
df = pd.DataFrame.from_dict(uniq_v.items())
df.sort_values(by=[0],inplace=True)
f, axs = plt.subplots(1, 3, figsize=(15,5))
ax0=axs[0]
ax1=axs[1]
ax2=axs[2]
pstn = nx.spring_layout(G)
nx.draw_networkx(G,pos=pstn, alpha=0.5,ax=ax0)
nx.draw_networkx_edge_labels(G,pos=pstn,alpha=0.5,ax=ax0)
nf = gb['v'].count()
df['ecnt'] = gb['v'].count().values
df['cs']= df[1].cumsum()
df['ce']= df['ecnt'].cumsum()
ax1.plot(df[0].values,df['ecnt'].values,'ro',linestyle=':')
ax1.bar(df[0].values,df[1].values, width = 0.8, alpha=0.5)
ax1.set_ylim([0,1.5]);
ax1.set_xlabel('Time Stamps')
ax1.set_ylabel('Unique Vertices Joining the Graph')
ax1.set_yticks([1,1.5]);
# Cummulative nodes
ax2.plot(df[0].values,df['cs'].values, alpha=0.5, label='nodes')
ax2.plot(df[0].values,df['ce'].values,label='edges')
ax2.legend()
ax2.set_xlabel('Time Stamps');
ax2.set_ylabel('Cumulative V and E over time');
# nx.nx.write_edgelist(G, '/tmp/out.weighted_example_graph',data=True)
%run time/time1.py -d /tmp/toygraph -b 4 -i 0 -g ''Toygraph''
%run time/time2.py -din /tmp/toygraph -dout /tmp/toygraph_o -m 2 -gen 20
from glob import glob
dir_path = "/tmp/toygraph_o"
c_files = glob(dir_path+ "/*cliques.p")
e_files = glob(dir_path+ "/*edges.p")
if 0:
for j,f in enumerate(c_files):
clq_lst = pickle.load(open(f, "rb"))
print j, len(clq_lst), clq_lst
for c in clq_lst:
print c.history, c.nids
break
gdf = pd.DataFrame()
for f in e_files:
edg_lst = pickle.load(open(f, "rb"))
df = pd.DataFrame(edg_lst)
gdf = gdf.append(df)
# print gdf.shape
gb = gdf.groupby([2]).count()/20.0
# print gb.head()
f, axs = plt.subplots(1, 1, figsize=(1.6 * 6., 1 * 4.))
axs.scatter(x=gb.index.values,y=gb[1])
axs.set_ylabel('Avg # of Edges per Timestamp');
# plt.boxplot(gb[1].values, labels=)
# print gb.index.values
# print gb[1].values
# # Average Node Degree for the group of K=20 generated graphs
# gb = gdf.groupby([2]).groups
# avgk =[]
# for k,v in gb.items():
# # print gdf.loc[gb.groups[k]]
# df= gdf.loc[v]
# df.columns = ['s','t','ts']
# # print df.head()
# g = nx.from_pandas_dataframe(df, 's','t', ['ts'])
# # nodes.append(g.number_of_nodes())
# avgk.append(g.degree().values())
# # print k, np.mean(g.degree().keys()), g.degree().keys()
# f, axs = plt.subplots(1, 2, figsize=(1.6 * 6., 1 * 4.))
# axs[0].boxplot(avgk);
# axs[0].set_ylim([0,5])
# axs[0].set_ylabel('Degree per Timestamp');
# in blocks
gdf.columns = ['u','v','ts']
# print gdf.head()
span = gdf.ts.max() - gdf.ts.min()
slic = span/4.
for blk in range(int(gdf.index.min()),int(gdf.index.max()),int(slic)):
mask = (gdf['ts'] >= blk) & (gdf['ts'] <= blk+slic)
df = gdf.loc[mask]
g = nx.from_pandas_dataframe(df, 'u','v',['ts'])
print g.degree()
print nx.average_degree_connectivity()
break
# Average Degree At each time stamp
# in one generated graph determine the average degree
import pprint as pp
print ()
for f in e_files:
edg_lst = pickle.load(open(f, "rb"))
df = pd.DataFrame(edg_lst, columns=['u','v','ts'])
# Within this set of edges, gropu by time-stamp (lowest level)
gb = df.groupby(['ts']).groups
kd_lst = [nx.from_pandas_dataframe(df.loc[v],'u','v',['ts']).degree() for k,v in gb.items()]
ts_graphs = [nx.from_pandas_dataframe(df.loc[v],'u','v',['ts']) for k,v in gb.items()]
grps_k = [d.keys() for d in kd_lst]
# print [np.mean(kg) for kg in grps_k]
g = nx.from_pandas_dataframe(df.loc[v],'u','v',['ts'])
f, axmult = plt.subplots(1, len(ts_graphs), figsize=(1.6 * 6., 1 * 4.))
for j,axs in enumerate(axmult):
nx.draw_networkx(ts_graphs[j],pos=nx.spring_layout(ts_graphs[j]),ax=axs)
axs.set_xlabel('ts:'+str(j))
axs.spines['top'].set_visible(False)
axs.spines['right'].set_visible(False)
axs.spines['left'].set_visible(False)
# axs.axis('off')
axs.get_yaxis().set_visible(False)
axs.get_xaxis().set_ticks([])
if 0: print ts_graphs[j].degree().values()
break
plt.suptitle('Generated Graph Fragments Per Timestamp');
#
# From the group of generated graphs, these are some stats
#
mdf = pd.DataFrame()
for f in e_files:
edg_lst = pickle.load(open(f, "rb"))
df = pd.DataFrame(edg_lst)
df.columns = ['u','v','ts']
gb = df.groupby(['ts'])
# print gb.keys()
# nodes = []
# for k,v in gb.items():
# g = nx.from_pandas_dataframe(df.loc[v],'u','v',['ts'])
# nodes.append([k,g.number_of_nodes()])
# print g.number_of_nodes()
# print nodes
nf = unique_node_aggregation_perts(gb, df)
nf.columns = ['ts','v']
if f == e_files[0]:
mdf = nf
continue
# nf['cs'] = nf[1].cumsum()
mdf = pd.merge(left=nf,right=mdf,on='ts',how='outer')
# df = pd.DataFrame(nodes)
mdf['avgVcnt'] = mdf.mean(axis=1)
mdf['cs'] = mdf['avgVcnt'].cumsum()
# print mdf.head()
# df['cs'] = df[0].cumsum()
# nf[[1,'cs']].plot()
f, axs = plt.subplots(1, 1, figsize=(1.6 * 6., 1 * 4.))
# axs.plot(nf[0].values, nf[1].values)
mdf['cs'].plot(x='ts',ax=axs,marker='o',linestyle=":");
axs.set_ylabel('Average Unique Nodes Accumluating')
axs.set_ylim(0,13)
axs.set_xlim(-1,10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Network Dynamics
Step2: THRG
Step3: Average Node Degree for the group of K=20 generated graphs.
Step4: Looking at the Avg Nodes and Edges in a group of generated graphs
|
14,024
|
<ASSISTANT_TASK:>
Python Code:
import pymatgen as mg
#The constructor is simply the value + a string unit.
e = mg.Energy(1000, "Ha")
#Let's perform a conversion. Note that when printing, the units are printed as well.
print "{} = {}".format(e, e.to("eV"))
#To check what units are supported
print "Supported energy units are {}".format(e.supported_units)
dist = mg.Length(65, "mile")
time = mg.Time(30, "min")
speed = dist / time
print "The speed is {}".format(speed)
#Let's do a more sensible unit.
print "The speed is {}".format(speed.to("mile h^-1"))
g = mg.FloatWithUnit(9.81, "m s^-2") #Acceleration due to gravity
m = mg.Mass(2, "kg")
h = mg.Length(10, "m")
print "The force is {}".format(m * g)
print "The potential energy is force is {}".format((m * g * h).to("J"))
made_up = mg.FloatWithUnit(100, "Ha^3 bohr^-2")
print made_up.to("J^3 ang^-2")
try:
made_up.to("J^2")
except mg.UnitError as ex:
print ex
dists = mg.LengthArray([1, 2, 3], "mile")
times = mg.TimeArray([0.11, 0.12, 0.23], "h")
print "Speeds are {}".format(dists / times)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Units support all functionality that is supported by floats. Unit combinations are automatically taken care of.
Step2: Note that complex units are specified as space-separated powers of units. Powers are specified using "^". E.g., "kg m s^-1". Only integer powers are supported.
Step3: Some highly complex conversions are possible with this system. Let's do some made up units. We will also demonstrate pymatgen's internal unit consistency checks.
Step4: For arrays, we have the equivalent EnergyArray, ... and ArrayWithUnit classes. All other functionality remain the same.
|
14,025
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import warnings
with warnings.catch_warnings(): # suppress annoying TensorFlow FutureWarnings
warnings.filterwarnings("ignore",category=FutureWarning)
import wobble
data = wobble.Data('../data/51peg_e2ds.hdf5')
results = wobble.Results(data)
r = 67 # index into data.orders for the desired order
model = wobble.Model(data, results, r)
model.add_star('star')
model.add_telluric('tellurics')
history = wobble.optimize_order(model, niter=40, save_history=True, rv_uncertainties=False)
history.plot_nll();
template_ani_star = history.plot_template(0, nframes=50)
from IPython.display import HTML
HTML(template_ani_star.to_html5_video())
model = wobble.Model(data, results, r)
model.add_star('star')
model.add_telluric('tellurics')
history = wobble.optimize_order(model, niter=60, save_history=True, rv_uncertainties=False)
# print the current (default) settings:
for c in model.components:
if not c.template_fixed:
print('template learning rate for {0}: {1:.0e}'.format(c.name, c.learning_rate_template))
if not c.rvs_fixed:
print('RVs learning rate for {0}: {1:.0e}'.format(c.name, c.learning_rate_template))
model = wobble.Model(data, results, r)
model.add_star('star', learning_rate_template=0.001)
model.add_telluric('tellurics', learning_rate_template=0.001)
history = wobble.optimize_order(model, niter=60, save_history=True, rv_uncertainties=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: viewing more optimization info
Step2: toggle on the save_history keyword (which is False by default) to generate a wobble.History object when optimizing the order -- this object will keep track of the best-fit parameters and goodness-of-fit metric at each step of the optimization
Step3: The wobble.History class includes several convenience functions for plotting - here are a couple of examples
Step4: tuneable knobs in wobble
Step5: Do the optimizer learning rates work well?
|
14,026
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import socnet as sn
sn.node_size = 10
sn.node_color = (0, 0, 0)
sn.edge_width = 1
g = sn.generate_complete_graph(15)
sn.show_graph(g)
from random import shuffle
def randomize_types(g, num_openers, num_closers, num_chummies):
if num_openers + num_closers + num_chummies != g.number_of_nodes():
raise Exception('a soma dos tipos não é igual ao número de nós')
nodes = g.nodes()
shuffle(nodes)
for _ in range(num_openers):
g.node[nodes.pop()]['type'] = 'opener'
for _ in range(num_closers):
g.node[nodes.pop()]['type'] = 'closer'
for _ in range(num_chummies):
g.node[nodes.pop()]['type'] = 'chummy'
randomize_types(g, 15, 0, 0)
from random import random
def randomize_existences(g):
for n, m in g.edges():
if random() < 0.5:
g.edge[n][m]['exists'] = 0
else:
g.edge[n][m]['exists'] = 1
randomize_existences(g)
def convert_types_and_existences_to_colors(g):
for n in g.nodes():
if g.node[n]['type'] == 'opener':
g.node[n]['color'] = (255, 0, 0)
elif g.node[n]['type'] == 'closer':
g.node[n]['color'] = (0, 255, 0)
else:
g.node[n]['color'] = (0, 0, 255)
for n, m in g.edges():
if g.edge[n][m]['exists'] == 0:
g.edge[n][m]['color'] = (192, 192, 192)
else:
g.edge[n][m]['color'] = (0, 0, 0)
convert_types_and_existences_to_colors(g)
sn.show_graph(g)
def neighbors(g, n):
return [m for m in g.neighbors(n) if g.edge[n][m]['exists'] == 1]
# Independentemente do tipo, a restrição é sempre entre 0 e 2.
def calculate_constraint(g, n):
neighbors_n = neighbors(g, n)
degree_n = len(neighbors_n)
# Todos os tipos evitam isolamento. A restrição é máxima nesse caso.
if degree_n == 0:
return 2
# Para um chummy, a restrição é o inverso do grau. Uma pequena
# normalização é necessária para garantir que está entre 0 e 2.
if g.node[n]['type'] == 'chummy':
return 2 * (g.number_of_nodes() - degree_n - 1) / (g.number_of_nodes() - 1)
# Fórmula de Burt.
constraint = 0
for m in neighbors_n:
neighbors_m = neighbors(g, m)
degree_m = len(neighbors_m)
sub_constraint = 1 / degree_n
for l in neighbors_m:
if n != l and g.edge[n][l]['exists'] == 1:
sub_constraint += (1 / degree_n) * (1 / degree_m)
constraint += sub_constraint ** 2
# Para um closer, a restrição é o inverso da fórmula de Burt.
if g.node[n]['type'] == 'closer':
return 2 - constraint
# Para um opener, a restrição é a fórmula de Burt.
return constraint
from random import choice
def equals(a, b):
return abs(a - b) < 0.000000001
def invert_existence(g, n, m):
g.edge[n][m]['exists'] = 1 - g.edge[n][m]['exists']
def update_existences(g):
# Para cada nó n...
for n in g.nodes():
# Calcula a restrição de n.
cn = calculate_constraint(g, n)
# Inicializa o dicionário de ganhos.
g.node[n]['gains'] = {}
# Para cada nó m diferente de n...
for m in g.nodes():
if n != m:
# Calcula a restrição de m.
cm = calculate_constraint(g, m)
# Inverte temporariamente a existência de (n, m) para ver o que acontece.
invert_existence(g, n, m)
# Se a inversão representa uma adição e ela não faz a restrição
# de m diminuir, então o ganho é zero porque essa inversão não
# é possível: adicionar só é possível se ambos os nós querem.
if g.edge[n][m]['exists'] == 1 and calculate_constraint(g, m) >= cm:
g.node[n]['gains'][m] = 0
# Senão, o ganho é simplesmente a diferença das restrições.
else:
g.node[n]['gains'][m] = cn - calculate_constraint(g, n)
# Restaura a existência original de (n, m), pois a inversão era temporária.
invert_existence(g, n, m)
# Obtém o maior ganho de n.
g.node[n]['max_gain'] = max(g.node[n]['gains'].values())
# Obtém o maior ganho de todos os nós.
max_gain = max([g.node[n]['max_gain'] for n in g.nodes()])
# Se o maior ganho não for positivo, devolve False indicando que o grafo estabilizou.
if max_gain <= 0:
return False
# Senão, escolhe aleatoriamente uma aresta correspondente ao maior ganho e inverte sua existência.
n = choice([n for n in g.nodes() if equals(g.node[n]['max_gain'], max_gain)])
m = choice([m for m in g.node[n]['gains'] if equals(g.node[n]['gains'][m], max_gain)])
invert_existence(g, n, m)
# Devolve True indicando que o grafo ainda não estabilizou.
return True
from math import inf
from statistics import stdev
from queue import Queue
def calculate_variables(g, verbose=False):
# Cria uma cóṕia do grafo na qual as arestas realmente
# existem ou não existem. Isso facilita os cálculos.
gc = g.copy()
for n, m in g.edges():
if g.edge[n][m]['exists'] == 0:
gc.remove_edge(n, m)
# Cálculo do número de arestas. (densidade)
num_edges = gc.number_of_edges()
if verbose:
print('número de arestas:', num_edges)
# Cálculo do número de componentes. (fragmentação)
for n in gc.nodes():
gc.node[n]['label'] = 0
label = 0
q = Queue()
for s in gc.nodes():
if gc.node[s]['label'] == 0:
label += 1
gc.node[s]['label'] = label
q.put(s)
while not q.empty():
n = q.get()
for m in gc.neighbors(n):
if gc.node[m]['label'] == 0:
gc.node[m]['label'] = label
q.put(m)
num_components = label
if verbose:
print('número de componentes:', num_components)
# Cálculo do desvio do tamanho de componentes. (fragmentação)
sizes = {label: 0 for label in range(1, num_components + 1)}
for n in gc.nodes():
sizes[gc.node[n]['label']] += 1
if num_components == 1:
dev_components = 0
else:
dev_components = stdev(sizes.values())
if verbose:
print('desvio do tamanho de componentes: {:05.2f}\n'.format(dev_components))
# Cálculo do desvio do betweenness. (desigualdade)
# Cálculo do betweenness médio por tipo. (quais perfis ficaram centrais)
sn.build_betweenness(gc)
betweenness = []
mean_betweenness = {
'closer': 0,
'opener': 0,
'chummy': 0,
}
for n in gc.nodes():
betweenness.append(gc.node[n]['theoretical_betweenness'])
mean_betweenness[gc.node[n]['type']] += betweenness[-1]
if verbose:
print('betweenness do nó {:2} ({}): {:05.2f}'.format(n, gc.node[n]['type'], betweenness[-1]))
dev_betweenness = stdev(betweenness)
for key in mean_betweenness:
length = len([n for n in gc.nodes() if gc.node[n]['type'] == key])
if length == 0:
mean_betweenness[key] = 0
else:
mean_betweenness[key] /= length
if verbose:
print('\ndesvio do betweenness: {:05.2f}\n'.format(dev_betweenness))
for key, value in mean_betweenness.items():
print('betweenness médios de {}: {:05.2f}'.format(key, value))
return num_edges, num_components, dev_components, dev_betweenness, mean_betweenness
TIMES = 25
num_openers = 15
num_closers = 0
num_chummies = 0
mean_num_edges = 0
mean_num_components = 0
mean_dev_components = 0
mean_dev_betweenness = 0
mean_mean_betweenness = {
'opener': 0,
'closer': 0,
'chummy': 0,
}
for _ in range(TIMES):
randomize_types(g, num_openers, num_closers, num_chummies)
randomize_existences(g)
while update_existences(g):
pass
num_edges, num_components, dev_components, dev_betweenness, mean_betweenness = calculate_variables(g)
mean_num_edges += num_edges
mean_num_components += num_components
mean_dev_components += dev_components
mean_dev_betweenness += dev_betweenness
for key, value in mean_betweenness.items():
mean_mean_betweenness[key] += value
mean_num_edges /= TIMES
mean_num_components /= TIMES
mean_dev_components /= TIMES
mean_dev_betweenness /= TIMES
for key in mean_mean_betweenness:
mean_mean_betweenness[key] /= TIMES
print('média do número de arestas: {:05.2f}'.format(mean_num_edges))
print('média do número de componentes: {:05.2f}'.format(mean_num_components))
print('média do desvio do tamanho de componentes: {:05.2f}'.format(mean_dev_components))
print('média do desvio do betweenness: {:05.2f}'.format(mean_dev_betweenness))
for key, value in mean_mean_betweenness.items():
print('média do betweenness médio de {}: {:05.2f}'.format(key, value))
def update_positions(g, invert=False):
if invert:
for n, m in g.edges():
g.edge[n][m]['notexists'] = 1 - g.edge[n][m]['exists']
sn.update_positions(g, 'notexists')
else:
sn.update_positions(g, 'exists')
def snapshot(g, frames):
convert_types_and_existences_to_colors(g)
frame = sn.generate_frame(g)
frames.append(frame)
frames = []
randomize_types(g, num_openers, num_closers, num_chummies)
randomize_existences(g)
sn.randomize_positions(g)
snapshot(g, frames)
while update_existences(g):
update_positions(g, False)
snapshot(g, frames)
sn.show_animation(frames)
_, _, _, _, _ = calculate_variables(g, verbose=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configurando a biblioteca
Step2: Gerando um grafo completo
Step3: Esse será o grafo da comunidade.
Step4: Atribuindo aleatoriamente existências às arestas
Step5: Convertendo tipos e existências em cores para visualização
Step6: Definindo uma medida de restrição
Step7: Definindo uma atualização de existência
Step8: Definindo uma calculadora de variáveis.
Step9: Simulando várias vezes o processo no qual os nós invertem a existência de arestas até não poderem mais diminuir a restrição.
Step10: No final das simulações, a média das variáveis é impressa.
Step11: Para ter insights sobre o que está acontecendo, não esqueça de examinar a versão animada da simulação!
|
14,027
|
<ASSISTANT_TASK:>
Python Code:
# Environment at time of execution
%load_ext watermark
%pylab inline
%watermark -a "Anthony Abercrombie" -d -t -v -p numpy,pandas,matplotlib -g
from __future__ import print_function
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import dotenv
import os
import sys
import dotenv
import subprocess
import glob
from tqdm import tqdm
#File path to get to the project root
PROJ_ROOT = os.path.join(os.path.pardir, os.pardir)
# add local python functions
sys.path.append(os.path.join(PROJ_ROOT, "src"))
#Load AWS keys as environment variables
dotenv_path = os.path.join(PROJ_ROOT, '.env')
dotenv.load_dotenv(dotenv_path)
AWS_ACCESS_KEY = os.environ.get("AWS_ACCESS_KEY")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
SPARK_HOME = os.environ.get("SPARK_HOME")
ec2_keypair = os.environ.get("ec2_keypair")
ec2_keypair_pem = os.environ.get("ec2_keypair_pem")
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load the "autoreload" extension
%load_ext autoreload
# always reload modules marked with "%aimport"
%autoreload 1
SPARK_HOME
def spinup_spark_ec2(SPARK_HOME, keypair, keyfile, num_slaves, cluster_name):
bash_command = '{}/ec2/spark-ec2 -k {} -i {}> -s {} launch {}'.format(SPARK_HOME, keypair, keyfile, num_slaves, cluster_name)
return bash_command
args = (SPARK_HOME, ec2_keypair, ec2_keypair_pem, 1, 'spark_ec2_cluster')
x = spinup_spark_ec2(*args)
x
x
'{}/bin/spark-ec2 -k {}<keypair> -i {}<key-file> -s {}<num-slaves> launch {}<cluster-name>'
def connect_master_node(SPARK_HOME, keypair, keyfile, region,cluster_name):
bash_cmd = '{}/ec2/spark-ec2 -k {} -i {} --region={} login {}'.format(SPARK_HOME, keypair, keyfile, region,cluster_name)
return bash_cmd
args = (SPARK_HOME, ec2_keypair, ec2_keypair_pem, 'us-west-2b', 'spark_ec2_cluster')
y = connect_master_node(*args)
y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: where is spark-ec2?
Step2: Numerical DataFlow with Spark and Tensorflow
|
14,028
|
<ASSISTANT_TASK:>
Python Code:
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print("Downloaded " + local)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py")
import numpy as np
import thinkstats2
import thinkplot
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r"$\lambda=%g$" % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title="Exponential CDF", xlabel="x", ylabel="CDF", loc="lower right")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/analytic.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/babyboom.dat")
import analytic
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label="actual")
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel="Time between births (minutes)", ylabel="CDF")
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(
xlabel="Time between births (minutes)",
ylabel="CCDF",
yscale="log",
loc="upper right",
)
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma, low=-1.0, high=4.0)
label = r"$\mu=%g$, $\sigma=%g$" % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title="Normal CDF", xlabel="x", ylabel="CDF", loc="upper left")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/first.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dct")
download(
"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dat.gz"
)
import nsfg
import first
preg = nsfg.ReadFemPreg()
weights = preg.totalwgt_lb.dropna()
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print("Mean, Var", mu, var)
# plot the model
sigma = np.sqrt(var)
print("Sigma", sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label="model", color="0.6")
# plot the data
cdf = thinkstats2.Cdf(weights, label="data")
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Config(title="Birth weights", xlabel="Birth weight (pounds)", ylabel="CDF")
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = "$\mu=%d$, $\sigma=%d$" % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Config(
title="Normal probability plot",
xlabel="standard normal sample",
ylabel="sample values",
)
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color="0.8")
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="all live")
thinkplot.Config(
title="Normal probability plot",
xlabel="Standard deviations from mean",
ylabel="Birth weight (lbs)",
)
full_term = preg[preg.prglngth >= 37]
term_weights = full_term.totalwgt_lb.dropna()
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color="0.8")
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="all live")
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label="full term")
thinkplot.Config(
title="Normal probability plot",
xlabel="Standard deviations from mean",
ylabel="Birth weight (lbs)",
)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/brfss.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/CDBRFS08.ASC.gz")
import brfss
df = brfss.ReadBrfss()
weights = df.wtkg2.dropna()
def MakeNormalModel(weights):
Plots a CDF with a Normal model.
weights: sequence
cdf = thinkstats2.Cdf(weights, label="weights")
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = np.sqrt(var)
print("n, mean, std", len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label="model", linewidth=4, color="0.8")
thinkplot.Cdf(cdf)
MakeNormalModel(weights)
thinkplot.Config(
title="Adult weight, linear scale",
xlabel="Weight (kg)",
ylabel="CDF",
loc="upper right",
)
log_weights = np.log10(weights)
MakeNormalModel(log_weights)
thinkplot.Config(
title="Adult weight, log scale",
xlabel="Weight (log10 kg)",
ylabel="CDF",
loc="upper right",
)
def MakeNormalPlot(weights):
Generates a normal probability plot of birth weights.
weights: sequence
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color="0.8", label="model")
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="weights")
MakeNormalPlot(weights)
thinkplot.Config(
title="Adult weight, normal plot",
xlabel="Weight (kg)",
ylabel="CDF",
loc="upper left",
)
MakeNormalPlot(log_weights)
thinkplot.Config(
title="Adult weight, lognormal plot",
xlabel="Weight (log10 kg)",
ylabel="CDF",
loc="upper left",
)
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r"$\alpha=%g$" % alpha)
thinkplot.Config(title="Pareto CDF", xlabel="x", ylabel="CDF", loc="lower right")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/populations.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/PEP_2012_PEPANNRES_with_ann.csv")
import populations
pops = populations.ReadData()
print("Number of cities/towns", len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label="data")
cdf_log = thinkstats2.Cdf(log_pops, label="data")
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1 - ys, label="model", color="0.8")
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(
xlabel="log10 population", ylabel="CCDF", yscale="log", loc="lower left"
)
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label="model", color="0.8")
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel="log10 population", ylabel="CDF", loc="lower right")
thinkstats2.NormalProbabilityPlot(log_pops, label="data")
thinkplot.Config(xlabel="Random variate", ylabel="log10 population", xlim=[-5, 5])
import random
def expovariate(lam):
p = random.random()
x = -np.log(1 - p) / lam
return x
t = [expovariate(lam=2) for _ in range(1000)]
cdf = thinkstats2.Cdf(t)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel="Exponential variate", ylabel="CCDF", yscale="log")
import scipy.stats
mu = 178
sigma = 7.7
dist = scipy.stats.norm(loc=mu, scale=sigma)
type(dist)
dist.mean(), dist.std()
dist.cdf(mu - sigma)
alpha = 1.7
xmin = 1 # meter
dist = scipy.stats.pareto(b=alpha, scale=xmin)
dist.median()
sample = [random.weibullvariate(2, 1) for _ in range(1000)]
cdf = thinkstats2.Cdf(sample)
thinkplot.Cdf(cdf, transform="weibull")
thinkplot.Config(xlabel="Weibull variate", ylabel="CCDF")
import analytic
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label="actual")
n = len(diffs)
lam = 44.0 / 24 / 60
sample = [random.expovariate(lam) for _ in range(n)]
1 / lam, np.mean(sample)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/hinc.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/hinc06.csv")
import hinc
df = hinc.ReadData()
df
xs, ps = df.income.values, df.ps.values
cdf = thinkstats2.Cdf(xs, ps, label="data")
cdf_log = thinkstats2.Cdf(np.log10(xs), ps, label="data")
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel="household income", ylabel="CDF")
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5, low=0, high=250000)
thinkplot.Plot(xs, 1 - ys, label="model", color="0.8")
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(
xlabel="log10 household income",
ylabel="CCDF",
xscale="log",
yscale="log",
loc="lower left",
)
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label="model", color="0.8")
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel="log10 household income", ylabel="CDF")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exponential distribution
Step2: Here's the distribution of interarrival times from a dataset of birth times.
Step3: Here's what the CCDF looks like on a log-y scale. A straight line is consistent with an exponential distribution.
Step4: Normal distribution
Step5: I'll use a normal model to fit the distribution of birth weights from the NSFG.
Step6: Here's the observed CDF and the model. The model fits the data well except in the left tail.
Step7: A normal probability plot is a visual test for normality. The following example shows that if the data are actually from a normal distribution, the plot is approximately straight.
Step8: Here's the normal probability plot for birth weights, showing that the lightest babies are lighter than we expect from the normal mode, and the heaviest babies are heavier.
Step9: If we suspect that the deviation in the left tail is due to preterm babies, we can check by selecting only full term births.
Step10: Now the deviation in the left tail is almost gone, but the heaviest babies are still heavy.
Step11: Lognormal model
Step13: The following function estimates the parameters of a normal distribution and plots the data and a normal model.
Step14: Here's the distribution of adult weights and a normal model, which is not a very good fit.
Step15: Here's the distribution of adult weight and a lognormal model, plotted on a log-x scale. The model is a better fit for the data, although the heaviest people are heavier than the model expects.
Step17: The following function generates a normal probability plot.
Step18: When we generate a normal probability plot with adult weights, we can see clearly that the data deviate from the model systematically.
Step19: If we make a normal probability plot with log weights, the model fit the data well except in the tails, where the heaviest people exceed expectations.
Step20: Pareto distribution
Step21: The distribution of populations for cities and towns is sometimes said to be Pareto-like.
Step22: Here's the distribution of population for cities and towns in the U.S., along with a Pareto model. The model fits the data well in the tail.
Step23: The lognormal model might be a better fit for this data (as is often the case for things that are supposed to be Pareto).
Step24: Here's a normal probability plot for the log-populations. The model fits the data well except in the right tail, where the biggest cities are bigger than expected.
Step25: Random variates
Step26: We can test it by generating a sample.
Step27: And plotting the CCDF on a log-y scale.
Step28: A straight line is consistent with an exponential distribution.
Step29: For example <tt>scipy.stats.norm</tt> represents a normal distribution.
Step30: A "frozen random variable" can compute its mean and standard deviation.
Step31: It can also evaluate its CDF. How many people are below the mean by more than one standard deviation? About 16%
Step32: How many people are between 5'10" and 6'1"?
Step33: What is the mean height in Pareto world?
Step34: Exercise
Step35: Bonus Example
Step36: Here's what the CDF looks like on a linear scale.
Step37: To check whether a Pareto model describes the data well, I plot the CCDF on a log-log scale.
Step38: For the lognormal model I estimate mu and sigma using percentile-based statistics (median and IQR).
Step39: Here's what the distribution, and fitted model, look like on a log-x scale.
|
14,029
|
<ASSISTANT_TASK:>
Python Code:
#!wget http://www.remss.com/data/msu/data/netcdf/uat4_tb_v03r03_avrg_chTLT_197812_201308.nc3.nc
#!mv uat4_tb_v03r03_avrg_chTLT_197812_201308.nc3.nc data/
#!wget http://www.remss.com/data/msu/data/netcdf/uat4_tb_v03r03_anom_chTLT_197812_201308.nc3.nc
#!mv uat4_tb_v03r03_anom_chTLT_197812_201308.nc3.nc data/
%pylab inline
import urllib2
import os
from IPython.display import Image
def download(url, dir):
Saves file 'url' into 'dir', unless it already exists.
filename = os.path.basename(url)
fullpath = os.path.join(dir, filename)
if os.path.exists(fullpath):
print "Already downloaded:", filename
else:
print "Downloading:", filename
open(fullpath, "w").write(urllib2.urlopen(url).read())
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_TTS.txt", "data")
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_TLS.txt", "data")
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_tlt_land.txt", "data")
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_tlt_ocean.txt", "data")
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_tmt_land.txt", "data")
download("http://www.remss.com/data/msu/weighting_functions/std_atmosphere_wt_function_chan_tmt_ocean.txt", "data")
D = loadtxt("data/std_atmosphere_wt_function_chan_TTS.txt", skiprows=6)
h = D[:, 1]
wTTS = D[:, 5]
D = loadtxt("data/std_atmosphere_wt_function_chan_TLS.txt", skiprows=6)
assert max(abs(h-D[:, 1])) < 1e-12
wTLS = D[:, 5]
D = loadtxt("data/std_atmosphere_wt_function_chan_tlt_land.txt", skiprows=7)
assert max(abs(h-D[:, 1])) < 1e-12
wTLT_land = D[:, 5]
D = loadtxt("data/std_atmosphere_wt_function_chan_tlt_ocean.txt", skiprows=7)
assert max(abs(h-D[:, 1])) < 1e-12
wTLT_ocean = D[:, 5]
D = loadtxt("data/std_atmosphere_wt_function_chan_tmt_land.txt", skiprows=7)
assert max(abs(h-D[:, 1])) < 1e-12
wTMT_land = D[:, 5]
D = loadtxt("data/std_atmosphere_wt_function_chan_tmt_ocean.txt", skiprows=7)
assert max(abs(h-D[:, 1])) < 1e-12
wTMT_ocean = D[:, 5]
figure(figsize=(3, 8))
plot(wTLS, h/1000, label="TLS")
plot(wTTS, h/1000, label="TTS")
plot(wTMT_ocean, h/1000, label="TMT ocean")
plot(wTMT_land, h/1000, label="TMT land")
plot(wTLT_ocean, h/1000, label="TLT ocean")
plot(wTLT_land, h/1000, label="TLT land")
xlim([0, 0.2])
ylim([0, 50])
legend()
ylabel("Height [km]")
show()
Image(url="http://www.ssmi.com/msu/img/wt_func_plot_for_web_2012.all_channels2.png", embed=True)
from netCDF4 import Dataset
from numpy.ma import average
rootgrp = Dataset('data/uat4_tb_v03r03_avrg_chtlt_197812_201504.nc3.nc')
list(rootgrp.variables)
# 144 values, interval [-180, 180]
longitude = rootgrp.variables["longitude"][:]
# 72 values, interval [-90, 90]
latitude = rootgrp.variables["latitude"][:]
# 144 rows of [min, max]
longitude_bounds = rootgrp.variables["longitude_bounds"][:]
# 72 rows of [min, max]
latitude_bounds = rootgrp.variables["latitude_bounds"][:]
# time in days, 1978 - today
time = rootgrp.variables["time"][:]
# time in years
years = time / 365.242 + 1978
# 12 values: time in days for 12 months in a year
time_climatology = rootgrp.variables["climatology_time"][:]
# (time, latitude, longitude)
brightness_temperature = rootgrp.variables["brightness_temperature"][:]
# (time_climatology, latitude, longitude)
brightness_temperature_climatology = rootgrp.variables["brightness_temperature_climatology"][:]
S_theta = pi / 36 * sin(pi/144) * cos(latitude*pi/180)
sum(144 * S_theta)-4*pi
w_theta = sin(pi/144) * cos(latitude*pi/180)
sum(w_theta)
Tavg = average(brightness_temperature, axis=2)
Tavg = average(Tavg, axis=1, weights=w_theta)
plot(years, Tavg-273.15)
xlabel("Year")
ylabel("T [C]")
title("TLT (Temperature Lower Troposphere)")
show()
Tanom = empty(Tavg.shape)
for i in range(12):
Tanom[i::12] = Tavg[i::12] - average(Tavg[i::12])
from scipy.stats import linregress
# Skip the first year, start from 1979, that's why you see the "12" here and below:
n0 = 12 # use 276 for the year 2001
Y0 = years[n0]
a, b, _, _, adev = linregress(years[n0:]-Y0, Tanom[n0:])
print "par dev"
print a, adev
print b
from matplotlib.ticker import MultipleLocator
figure(figsize=(6.6, 3.5))
plot(years, Tanom, "b-", lw=0.7)
plot(years, a*(years-Y0)+b, "b-", lw=0.7, label="Trend = $%.3f \pm %.3f$ K/decade" % (a*10, adev*10))
xlim([1979, 2016])
ylim([-1.2, 1.2])
gca().xaxis.set_minor_locator(MultipleLocator(1))
legend()
xlabel("Year")
ylabel("Temperature Anomaly [K]")
title("TLT (Temperature Lower Troposphere)")
show()
Image(url="http://www.remss.com/data/msu/graphics/TLT/plots/RSS_TS_channel_TLT_Global_Land_And_Sea_v03_3.png", embed=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Weight functions
Step3: Netcdf data
Step4: We need to calculate the element area (on a unit sphere) as follows
Step5: Let's create averaging weights that are normalized to 1 as follows
Step6: The temperature oscillates each year. To calculate the "anomaly", we subtract from each month its average temperature
Step7: We calculate linear fit
Step8: And compare against official graph + trend. As can be seen, the agreement is perfect
|
14,030
|
<ASSISTANT_TASK:>
Python Code:
# built-in python modules
import os
import inspect
# scientific python add-ons
import numpy as np
import pandas as pd
# plotting stuff
# first line makes the plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# seaborn makes your plots look better
try:
import seaborn as sns
sns.set(rc={"figure.figsize": (12, 6)})
except ImportError:
print('We suggest you install seaborn using conda or pip and rerun this cell')
# finally, we import the pvlib library
import pvlib
# Find the absolute file path to your pvlib installation
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(pvlib)))
# absolute path to a data file
datapath = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
# read tmy data with year values coerced to a single year
tmy_data, meta = pvlib.tmy.readtmy3(datapath, coerce_year=2015)
tmy_data.index.name = 'Time'
# TMY data seems to be given as hourly data with time stamp at the end
# shift the index 30 Minutes back for calculation of sun positions
tmy_data = tmy_data.shift(freq='-30Min')
tmy_data.GHI.plot()
plt.ylabel('Irradiance (W/m**2)')
tmy_data.DHI.plot()
plt.ylabel('Irradiance (W/m**2)')
surface_tilt = 30
surface_azimuth = 180 # pvlib uses 0=North, 90=East, 180=South, 270=West convention
albedo = 0.2
# create pvlib Location object based on meta data
sand_point = pvlib.location.Location(meta['latitude'], meta['longitude'], tz='US/Alaska',
altitude=meta['altitude'], name=meta['Name'].replace('"',''))
print(sand_point)
solpos = pvlib.solarposition.get_solarposition(tmy_data.index, sand_point)
solpos.plot()
# the extraradiation function returns a simple numpy array
# instead of a nice pandas series. We will change this
# in a future version
dni_extra = pvlib.irradiance.extraradiation(tmy_data.index)
dni_extra = pd.Series(dni_extra, index=tmy_data.index)
dni_extra.plot()
plt.ylabel('Extra terrestrial radiation (W/m**2)')
airmass = pvlib.atmosphere.relativeairmass(solpos['apparent_zenith'])
airmass.plot()
plt.ylabel('Airmass')
diffuse_irrad = pd.DataFrame(index=tmy_data.index)
models = ['Perez', 'Hay-Davies', 'Isotropic', 'King', 'Klucher', 'Reindl']
diffuse_irrad['Perez'] = pvlib.irradiance.perez(surface_tilt,
surface_azimuth,
dhi=tmy_data.DHI,
dni=tmy_data.DNI,
dni_extra=dni_extra,
solar_zenith=solpos.apparent_zenith,
solar_azimuth=solpos.azimuth,
airmass=airmass)
diffuse_irrad['Hay-Davies'] = pvlib.irradiance.haydavies(surface_tilt,
surface_azimuth,
dhi=tmy_data.DHI,
dni=tmy_data.DNI,
dni_extra=dni_extra,
solar_zenith=solpos.apparent_zenith,
solar_azimuth=solpos.azimuth)
diffuse_irrad['Isotropic'] = pvlib.irradiance.isotropic(surface_tilt,
dhi=tmy_data.DHI)
diffuse_irrad['King'] = pvlib.irradiance.king(surface_tilt,
dhi=tmy_data.DHI,
ghi=tmy_data.GHI,
solar_zenith=solpos.apparent_zenith)
diffuse_irrad['Klucher'] = pvlib.irradiance.klucher(surface_tilt, surface_azimuth,
dhi=tmy_data.DHI,
ghi=tmy_data.GHI,
solar_zenith=solpos.apparent_zenith,
solar_azimuth=solpos.azimuth)
diffuse_irrad['Reindl'] = pvlib.irradiance.reindl(surface_tilt,
surface_azimuth,
dhi=tmy_data.DHI,
dni=tmy_data.DNI,
ghi=tmy_data.GHI,
dni_extra=dni_extra,
solar_zenith=solpos.apparent_zenith,
solar_azimuth=solpos.azimuth)
yearly = diffuse_irrad.resample('A', how='sum').dropna().squeeze() / 1000.0 # kWh
monthly = diffuse_irrad.resample('M', how='sum', kind='period') / 1000.0
daily = diffuse_irrad.resample('D', how='sum') / 1000.0
ax = diffuse_irrad.plot(title='In-plane diffuse irradiance', alpha=.75, lw=1)
ax.set_ylim(0, 800)
ylabel = ax.set_ylabel('Diffuse Irradiance [W]')
plt.legend()
diffuse_irrad.describe()
diffuse_irrad.dropna().plot(kind='density')
ax_daily = daily.tz_convert('UTC').plot(title='Daily diffuse irradiation')
ylabel = ax_daily.set_ylabel('Irradiation [kWh]')
ax_monthly = monthly.plot(title='Monthly average diffuse irradiation', kind='bar')
ylabel = ax_monthly.set_ylabel('Irradiation [kWh]')
yearly.plot(kind='barh')
mean_yearly = yearly.mean()
yearly_mean_deviation = (yearly - mean_yearly) / yearly * 100.0
yearly_mean_deviation.plot(kind='bar')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Diffuse irradiance models
Step2: Perez
Step3: HayDavies
Step4: Isotropic
Step5: King Diffuse model
Step6: Klucher Model
Step7: Reindl
Step8: Calculate yearly, monthly, daily sums.
Step9: Plot Results
Step10: Daily
Step11: Monthly
Step12: Yearly
Step13: Compute the mean deviation from measured for each model and display as a function of the model
|
14,031
|
<ASSISTANT_TASK:>
Python Code:
#%matplotlib inline
import numpy as num, astropy.io.fits as pyf,pylab as pyl
from trippy import psf, pill, psfStarChooser
from trippy import scamp,MCMCfit
import scipy as sci
from os import path
import os
from astropy.visualization import interval, ZScaleInterval
def trimCatalog(cat):
good=[]
for i in range(len(cat['XWIN_IMAGE'])):
try:
a = int(cat['XWIN_IMAGE'][i])
b = int(cat['YWIN_IMAGE'][i])
m = num.max(data[b-4:b+5,a-4:a+5])
except: pass
dist = num.sort(((cat['XWIN_IMAGE']-cat['XWIN_IMAGE'][i])**2+(cat['YWIN_IMAGE']-cat['YWIN_IMAGE'][i])**2)**0.5)
d = dist[1]
if cat['FLAGS'][i]==0 and d>30 and m<70000:
good.append(i)
good=num.array(good)
outcat = {}
for i in cat:
outcat[i] = cat[i][good]
return outcat
inputFile='Polonskaya.fits'
if not path.isfile(inputFile):
os.system('wget -O Polonskaya.fits http://www.canfar.phys.uvic.ca/vospace/nodes/fraserw/Polonskaya.fits?view=data')
else:
print("We already have the file.")
with pyf.open(inputFile) as han:
data = han[0].data
header = han[0].header
EXPTIME = header['EXPTIME']
scamp.makeParFiles.writeSex('example.sex',
minArea=3.,
threshold=5.,
zpt=27.8,
aperture=20.,
min_radius=2.0,
catalogType='FITS_LDAC',
saturate=55000)
scamp.makeParFiles.writeConv()
scamp.makeParFiles.writeParam(numAps=1) #numAps is thenumber of apertures that you want to use. Here we use 1
scamp.runSex('example.sex', inputFile ,options={'CATALOG_NAME':'example.cat'},verbose=False)
catalog = trimCatalog(scamp.getCatalog('example.cat',paramFile='def.param'))
dist = ((catalog['XWIN_IMAGE']-811)**2+(catalog['YWIN_IMAGE']-4005)**2)**0.5
args = num.argsort(dist)
xt = catalog['XWIN_IMAGE'][args][0]
yt = catalog['YWIN_IMAGE'][args][0]
rate = 18.4588 # "/hr
angle = 31.11+1.1 # degrees counter clockwise from horizontal, right
starChooser=psfStarChooser.starChooser(data,
catalog['XWIN_IMAGE'],catalog['YWIN_IMAGE'],
catalog['FLUX_AUTO'],catalog['FLUXERR_AUTO'])
(goodFits,goodMeds,goodSTDs) = starChooser(30,200,noVisualSelection=False,autoTrim=True,
bgRadius=15, quickFit = False,
printStarInfo = True,
repFact = 5, ftol=1.49012e-08)
print(goodFits)
print(goodMeds)
goodPSF = psf.modelPSF(num.arange(61),num.arange(61), alpha=goodMeds[2],beta=goodMeds[3],repFact=10)
goodPSF.genLookupTable(data,goodFits[:,4],goodFits[:,5],verbose=False)
fwhm = goodPSF.FWHM() ###this is the FWHM with lookuptable included
fwhm = goodPSF.FWHM(fromMoffatProfile=True) ###this is the pure moffat FWHM.
print("Full width at half maximum {:5.3f} (in pix).".format(fwhm))
zscale = ZScaleInterval()
(z1, z2) = zscale.get_limits(goodPSF.lookupTable)
normer = interval.ManualInterval(z1,z2)
pyl.imshow(normer(goodPSF.lookupTable))
pyl.show()
goodPSF.line(rate,angle,EXPTIME/3600.,pixScale=0.185,useLookupTable=True)
goodPSF.computeRoundAperCorrFromPSF(psf.extent(0.8*fwhm,4*fwhm,10),display=False,
displayAperture=False,
useLookupTable=True)
roundAperCorr = goodPSF.roundAperCorr(1.4*fwhm)
goodPSF.computeLineAperCorrFromTSF(psf.extent(0.1*fwhm,4*fwhm,10),
l=(EXPTIME/3600.)*rate/0.185,a=angle,display=False,displayAperture=False)
lineAperCorr = goodPSF.lineAperCorr(1.4*fwhm)
print(lineAperCorr,roundAperCorr)
goodPSF.psfStore('psf.fits', psfV2=True)
#goodPSF = psf.modelPSF(restore='psf.fits')
#goodPSF.line(new_rate,new_angle,EXPTIME/3600.,pixScale=0.185,useLookupTable=True)
#initiate the pillPhot object
phot = pill.pillPhot(data,repFact=10)
#get photometry, assume ZPT=26.0
#enableBGselection=True allows you to zoom in on a good background region in the aperture display window
#trimBGhighPix is a sigma cut to get rid of the cosmic rays. They get marked as blue in the display window
#background is selected inside the box and outside the skyRadius value
#mode is th background mode selection. Options are median, mean, histMode (JJ's jjkmode technique), fraserMode (ask me about it), gaussFit, and "smart". Smart does a gaussian fit first, and if the gaussian fit value is discrepant compared to the expectation from the background std, it resorts to the fraserMode. "smart" seems quite robust to nearby bright sources
#examples of round sources
phot(goodFits[0][4], goodFits[0][5],radius=3.09*1.1,l=0.0,a=0.0,
skyRadius=4*3.09,width=6*3.09,
zpt=26.0,exptime=EXPTIME,enableBGSelection=True,display=True,
backupMode="fraserMode",trimBGHighPix=3.)
#example of a trailed source
phot(xt,yt,radius=fwhm*1.4,l=(EXPTIME/3600.)*rate/0.185,a=angle,
skyRadius=4*fwhm,width=6*fwhm,
zpt=26.0,exptime=EXPTIME,enableBGSelection=True,display=True,
backupMode="smart",trimBGHighPix=3.)
phot.SNR(verbose=True)
#get those values
print(phot.magnitude)
print(phot.dmagnitude)
print(phot.sourceFlux)
print(phot.snr)
print(phot.bg)
phot.computeRoundAperCorrFromSource(goodFits[0,4],goodFits[0,5],num.linspace(1*fwhm,4*fwhm,10),
skyRadius=5*fwhm, width=6*fwhm,displayAperture=False,display=True)
print('Round aperture correction for a 4xFWHM aperture is {:.3f}.'.format(phot.roundAperCorr(1.4*fwhm)))
Data = data[int(yt)-200:int(yt)+200,int(xt)-200:int(xt)+200]-phot.bg
zscale = ZScaleInterval()
(z1, z2) = zscale.get_limits(Data)
normer = interval.ManualInterval(z1,z2)
pyl.imshow(normer(Data))
pyl.show()
fitter = MCMCfit.MCMCfitter(goodPSF,Data)
fitter.fitWithModelPSF(200+xt-int(xt)-1,200+yt-int(yt)-1, m_in=1000.,
fitWidth=10,
nWalkers=20, nBurn=20, nStep=20,
bg=phot.bg, useLinePSF=True, verbose=False,useErrorMap=False)
(fitPars, fitRange) = fitter.fitResults(0.67)
print(fitPars)
print(fitRange)
modelImage = goodPSF.plant(fitPars[0],fitPars[1],fitPars[2],Data,addNoise=False,useLinePSF=True,returnModel=True)
pyl.imshow(normer(modelImage))
pyl.show()
removed = goodPSF.remove(fitPars[0],fitPars[1],fitPars[2],Data,useLinePSF=True)
pyl.imshow(normer(removed))
pyl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function trim catalog is a convenience function to simply return only those sources that are well enough isolated for PSF generation. It rejects any sources within 30 pixels of another source, any sources with peak pixel above 70,000, and any sources that sextractor has flagged for what ever reason. We may fold this into psfStarChooser in the future.
Step2: Get the image this tutorial assumes you have. If wget fails then you are likely on a mac, and should just download it manually
Step3: First load the fits image and get out the header, data, and exposure time.
Step4: Next run sextractor on the images, and use trimCatalog to create a trimmed down list of isolated sources.
Step5: Finally, find the source closest to 811, 4005 which is the bright asteroid, 2006 Polonskaya. Also, set the rate and angle of motion. These were found from JPL horizons. The 1 degree increase is to account for the slight rotation of the image.
Step6: Now use psfStarChooser to select the PSF stars. The first and second parameters to starChooser are the fitting box width in pixels, and the SNR minimum required for a star to be considered as a potential PSF star.
Step7: Generate the PSF. We want a 61 pixel wide PSF, adopt a repFactor of 10, and use the mean star fits chosen above.
Step8: Now generate the TSF, which we call the line/long PSF interchangeably through the code...
Step9: Now calculate aperture corrections for the PSF and TSF. Store for values of r=1.4*FWHM.
Step10: Store the PSF. In TRIPPy v1.0 we introduced a new psf save format which decreases the storage requirements by roughly half, at the cost of increase CPU time when restoring the stored PSF. The difference is that the moffat component of the PSF was originally saved in the fits file's first extension. This is no longer saved, as it's pretty quick to calculate.
Step11: If we've already done the above once, we could doing it again by restoring the previously constructed PSF by the following commented out code.
Step12: And we could generate a new line psf by recalling .line with a new rate and angle
Step13: Now let's do some pill aperture photometry. Instantiate the class, then call the object you created to get photometry of Polonskaya. Again assume repFact=10.
Step14: The SNR function calculates the SNR of the aperture,as well as provide an estiamte of the magnitude/flux uncertainties. Select useBGstd=True if you wish to use the background noise level instead of sqrt of the background level in your uncertainty estimate. Note
Step15: Let's get aperture corrections measured directly from a star.
Step16: Finally, let's do some PSF source subtraction. This is only possible with emcee and sextractor installed.
Step17: Now instantiate the MCMCfitter class, and then perform the fit. Verbose=False will not put anything to terminal. Setting to true will dump the result of each step. Only good idea if you insist on seeing what's happening. Do you trust black boxes?
Step18: Now get the fits results, including best fit and confidence region using the input value. 0.67 for 1-sigma is shown
Step19: Finally, lets produce the model best fit image, and perform a subtraction. Plant will plant a fake source with the given input x,y,amplitude into the input data. If returnModel=True, then no source is planted, but the model image that would have been planted is returned.
Step20: Now show the image and the image with model removed for comparison.
|
14,032
|
<ASSISTANT_TASK:>
Python Code:
!pip install -U optax distrax dm-haiku
from typing import Any, Iterator, Mapping, Optional, Sequence, Tuple
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
Array = jnp.ndarray
PRNGKey = Array
Batch = Mapping[str, np.ndarray]
OptState = Any
MNIST_IMAGE_SHAPE = (28, 28, 1)
batch_size = 128
def make_conditioner(
event_shape: Sequence[int], hidden_sizes: Sequence[int], num_bijector_params: int
) -> hk.Sequential:
Creates an MLP conditioner for each layer of the flow.
return hk.Sequential(
[
hk.Flatten(preserve_dims=-len(event_shape)),
hk.nets.MLP(hidden_sizes, activate_final=True),
# We initialize this linear layer to zero so that the flow is initialized
# to the identity function.
hk.Linear(np.prod(event_shape) * num_bijector_params, w_init=jnp.zeros, b_init=jnp.zeros),
hk.Reshape(tuple(event_shape) + (num_bijector_params,), preserve_dims=-1),
]
)
def make_flow_model(
event_shape: Sequence[int], num_layers: int, hidden_sizes: Sequence[int], num_bins: int
) -> distrax.Transformed:
Creates the flow model.
# Alternating binary mask.
mask = jnp.arange(0, np.prod(event_shape)) % 2
mask = jnp.reshape(mask, event_shape)
mask = mask.astype(bool)
def bijector_fn(params: Array):
return distrax.RationalQuadraticSpline(params, range_min=0.0, range_max=1.0)
# Number of parameters for the rational-quadratic spline:
# - `num_bins` bin widths
# - `num_bins` bin heights
# - `num_bins + 1` knot slopes
# for a total of `3 * num_bins + 1` parameters.
num_bijector_params = 3 * num_bins + 1
layers = []
for _ in range(num_layers):
layer = distrax.MaskedCoupling(
mask=mask,
bijector=bijector_fn,
conditioner=make_conditioner(event_shape, hidden_sizes, num_bijector_params),
)
layers.append(layer)
# Flip the mask after each layer.
mask = jnp.logical_not(mask)
# We invert the flow so that the `forward` method is called with `log_prob`.
flow = distrax.Inverse(distrax.Chain(layers))
base_distribution = distrax.Independent(
distrax.Uniform(low=jnp.zeros(event_shape), high=jnp.ones(event_shape)),
reinterpreted_batch_ndims=len(event_shape),
)
return distrax.Transformed(base_distribution, flow)
def load_dataset(split: tfds.Split, batch_size: int) -> Iterator[Batch]:
ds = tfds.load("mnist", split=split, shuffle_files=True)
ds = ds.shuffle(buffer_size=10 * batch_size)
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=5)
ds = ds.repeat()
return iter(tfds.as_numpy(ds))
def prepare_data(batch: Batch, prng_key: Optional[PRNGKey] = None) -> Array:
data = batch["image"].astype(np.float32)
if prng_key is not None:
# Dequantize pixel values {0, 1, ..., 255} with uniform noise [0, 1).
data += jax.random.uniform(prng_key, data.shape)
return data / 256.0 # Normalize pixel values from [0, 256) to [0, 1).
flow_num_layers = 8
mlp_num_layers = 2
hidden_size = 500
num_bins = 4
learning_rate = 1e-4
# using 100,000 steps could take long (about 2 hours) but will give better results.
# You can try with 10,000 steps to run it fast but result may not be very good
training_steps = 10000
eval_frequency = 1000
@hk.without_apply_rng
@hk.transform
def log_prob(data: Array) -> Array:
model = make_flow_model(
event_shape=MNIST_IMAGE_SHAPE,
num_layers=flow_num_layers,
hidden_sizes=[hidden_size] * mlp_num_layers,
num_bins=num_bins,
)
return model.log_prob(data)
@hk.without_apply_rng
@hk.transform
def model_sample(key: PRNGKey, num_samples: int) -> Array:
model = make_flow_model(
event_shape=MNIST_IMAGE_SHAPE,
num_layers=flow_num_layers,
hidden_sizes=[hidden_size] * mlp_num_layers,
num_bins=num_bins,
)
return model.sample(seed=key, sample_shape=[num_samples])
def loss_fn(params: hk.Params, prng_key: PRNGKey, batch: Batch) -> Array:
data = prepare_data(batch, prng_key)
# Loss is average negative log likelihood.
loss = -jnp.mean(log_prob.apply(params, data))
return loss
@jax.jit
def eval_fn(params: hk.Params, batch: Batch) -> Array:
data = prepare_data(batch) # We don't dequantize during evaluation.
loss = -jnp.mean(log_prob.apply(params, data))
return loss
optimizer = optax.adam(learning_rate)
@jax.jit
def update(params: hk.Params, prng_key: PRNGKey, opt_state: OptState, batch: Batch) -> Tuple[hk.Params, OptState]:
Single SGD update step.
grads = jax.grad(loss_fn)(params, prng_key, batch)
updates, new_opt_state = optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, new_opt_state
prng_seq = hk.PRNGSequence(42)
params = log_prob.init(next(prng_seq), np.zeros((1, *MNIST_IMAGE_SHAPE)))
opt_state = optimizer.init(params)
train_ds = load_dataset(tfds.Split.TRAIN, batch_size)
valid_ds = load_dataset(tfds.Split.TEST, batch_size)
for step in range(training_steps):
params, opt_state = update(params, next(prng_seq), opt_state, next(train_ds))
if step % eval_frequency == 0:
val_loss = eval_fn(params, next(valid_ds))
print(f"STEP: {step:5d}; Validation loss: {val_loss:.3f}")
def plot_batch(batch: Batch) -> None:
Plots a batch of MNIST digits.
images = batch.reshape((-1,) + MNIST_IMAGE_SHAPE)
plt.figure(figsize=(10, 4))
for i in range(10):
plt.subplot(2, 5, i + 1)
plt.imshow(np.squeeze(images[i]), cmap="gray")
plt.axis("off")
plt.show()
sample = model_sample.apply(params, next(prng_seq), num_samples=10)
plot_batch(sample)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing all required libraries and packages
Step3: Conditioner
Step5: Flow Model
Step6: Data Loading and preparation
Step7: Log Probability, Sample and training loss Functions
Step9: Training
Step10: Now we carry out the training of the model.
Step12: Sampling from Trained Flow Model
|
14,033
|
<ASSISTANT_TASK:>
Python Code:
from tardis.io.util import HDFWriterMixin
class ExampleClass(HDFWriterMixin):
hdf_properties = ['property1', 'property2']
hdf_name = 'mock_setup'
def __init__(self, property1, property2):
self.property1 = property1
self.property2 = property2
import numpy as np
import pandas as pd
#Instantiating Object
property1 = np.array([4.0e14, 2, 2e14, 27.5])
property2 = pd.DataFrame({'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])})
obj = ExampleClass(property1, property2)
obj.to_hdf(file_path='test.hdf', path='test')
#obj.to_hdf(file_path='test.hdf', path='test', name='hdf')
#Read HDF file
with pd.HDFStore('test.hdf','r') as data:
print data
#print data['/test/mock_setup/property1']
class NestedExampleClass(HDFWriterMixin):
hdf_properties = ['property1', 'nested_object']
def __init__(self, property1, nested_obj):
self.property1 = property1
self.nested_object = nested_obj
obj2 = NestedExampleClass(property1, obj)
obj2.to_hdf(file_path='nested_test.hdf')
#Read HDF file
with pd.HDFStore('nested_test.hdf','r') as data:
print data
class ModifiedWriterMixin(HDFWriterMixin):
def get_properties(self):
#Change behaviour here, how properties will be collected from Class
data = {name: getattr(self, name) for name in self.outputs}
return data
class DemoClass(ModifiedWriterMixin):
outputs = ['property1']
hdf_name = 'demo'
def __init__(self, property1):
self.property1 = property1
obj3 = DemoClass('random_string')
obj3.to_hdf('demo_class.hdf')
with pd.HDFStore('demo_class.hdf','r') as data:
print data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can now save properties using to_hdf method.
Step2: You can now read hdf file using pd.HDFStore , or pd.read_hdf
Step3: Saving nested class objects.
Step4: Modifed Usage
Step5: A demo class , using this modified mixin.
|
14,034
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import unicode_literals, division, print_function, absolute_import
from builtins import range
import numpy as np
np.random.seed(28)
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import load_digits, fetch_mldata, fetch_20newsgroups
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
import tensorflow as tf
tf.set_random_seed(28)
import keras
# find nlputils at https://github.com/cod3licious/nlputils
from nlputils.features import FeatureTransform, features2mat
from simec import SimilarityEncoder
from utils import center_K, check_similarity_match
from utils_plotting import get_colors, plot_digits, plot_mnist, plot_20news
%matplotlib inline
%load_ext autoreload
%autoreload 2
# set this to True if you want to save the figures from the paper
savefigs = False
# load digits dataset
digits = load_digits()
X = digits.data
X /= float(X.max())
ss = StandardScaler(with_std=False)
X = ss.fit_transform(X)
y = digits.target
n_samples, n_features = X.shape
Y = np.tile(y, (len(y), 1))
S = center_K(np.array(Y==Y.T, dtype=int))
# take only some of the samples as targets to speed it all up
n_targets = 1000
# knn accuracy using all original feature dimensions
clf = KNN(n_neighbors=10)
clf.fit(X[:n_targets], y[:n_targets])
print("knn accuracy: %f" % clf.score(X[n_targets:], y[n_targets:]))
# PCA
pca = PCA(n_components=2)
X_embedp = pca.fit_transform(X)
plot_digits(X_embedp, digits, title='Digits embedded with PCA')
clf = KNN(n_neighbors=10)
clf.fit(X_embedp[:n_targets], y[:n_targets])
print("knn accuracy: %f" % clf.score(X_embedp[n_targets:], y[n_targets:]))
# check how many relevant dimensions there are
eigenvals = np.linalg.eigvalsh(S)[::-1]
plt.figure();
plt.plot(list(range(1, S.shape[0]+1)), eigenvals, '-o', markersize=3);
plt.plot([1, S.shape[0]],[0,0], 'k--', linewidth=0.5);
plt.xlim(1, X.shape[1]+1);
plt.title('Eigenvalue spectrum of S (based on class labels)');
D, V = np.linalg.eig(S)
# regular kpca embedding: take largest EV
D1, V1 = D[np.argsort(D)[::-1]], V[:,np.argsort(D)[::-1]]
X_embed = np.dot(V1.real, np.diag(np.sqrt(np.abs(D1.real))))
plot_digits(X_embed[:,:2], digits, title='Digits embedded based on first 2 components', plot_box=False)
clf = KNN(n_neighbors=10)
clf.fit(X_embed[:n_targets,:2], y[:n_targets])
print("knn accuracy: %f" % clf.score(X_embed[n_targets:,:2], y[n_targets:]))
print("similarity approximation - mse: %f" % check_similarity_match(X_embed[:,:2], S)[0])
# similarity encoder with similarities relying on class information - linear
simec = SimilarityEncoder(X.shape[1], 2, n_targets, l2_reg_emb=0.00001, l2_reg_out=0.0000001,
s_ll_reg=0.5, S_ll=S[:n_targets,:n_targets], opt=keras.optimizers.Adamax(lr=0.005))
simec.fit(X, S[:,:n_targets])
X_embed = simec.transform(X)
plot_digits(X_embed, digits, title='Digits - SimEc (class sim, linear)')
# of course we're overfitting here quite a bit since we used all samples for training
# even if we didn't use the corresponding similarities...but this is only a toy example anyways
clf = KNN(n_neighbors=10)
clf.fit(X_embed[:n_targets], y[:n_targets])
print("knn accuracy: %f" % clf.score(X_embed[n_targets:], y[n_targets:]))
print("similarity approximation - mse: %f" % check_similarity_match(X_embed, S)[0])
# similarity encoder with similarities relying on class information - 1 hidden layer
n_targets = 1000
simec = SimilarityEncoder(X.shape[1], 2, n_targets, hidden_layers=[(100, 'tanh')],
l2_reg=0.00000001, l2_reg_emb=0.00001, l2_reg_out=0.0000001,
s_ll_reg=0.5, S_ll=S[:n_targets,:n_targets], opt=keras.optimizers.Adamax(lr=0.01))
e_total = 0
for e in [5, 10, 10, 10, 15, 25, 25]:
e_total += e
print(e_total)
simec.fit(X, S[:,:n_targets], epochs=e)
X_embed = simec.transform(X)
clf = KNN(n_neighbors=10)
clf.fit(X_embed[:1000], y[:1000])
acc = clf.score(X_embed[1000:], y[1000:])
print("knn accuracy: %f" % acc)
print("similarity approximation - mse: %f" % check_similarity_match(X_embed, S)[0])
plot_digits(X_embed, digits, title='SimEc after %i epochs; accuracy: %.1f' % (e_total, 100*acc) , plot_box=False)
# load digits
mnist = fetch_mldata('MNIST original', data_home='data')
X = mnist.data/255. # normalize to 0-1
y = np.array(mnist.target, dtype=int)
# subsample 10000 random data points
np.random.seed(42)
n_samples = 10000
n_test = 2000
n_targets = 1000
rnd_idx = np.random.permutation(X.shape[0])[:n_samples]
X_test, y_test = X[rnd_idx[:n_test],:], y[rnd_idx[:n_test]]
X, y = X[rnd_idx[n_test:],:], y[rnd_idx[n_test:]]
# scale
ss = StandardScaler(with_std=False)
X = ss.fit_transform(X)
X_test = ss.transform(X_test)
n_train, n_features = X.shape
# compute similarity matrix based on class labels
Y = np.tile(y, (len(y), 1))
S = center_K(np.array(Y==Y.T, dtype=int))
Y = np.tile(y_test, (len(y_test), 1))
S_test = center_K(np.array(Y==Y.T, dtype=int))
D, V = np.linalg.eig(S)
# as a comparison: regular kpca embedding: take largest EV
D1, V1 = D[np.argsort(D)[::-1]], V[:,np.argsort(D)[::-1]]
X_embed = np.dot(V1.real, np.diag(np.sqrt(np.abs(D1.real))))
plot_mnist(X_embed[:,:2], y, title='MNIST (train) - largest 2 EV')
print("similarity approximation 2D - mse: %f" % check_similarity_match(X_embed[:,:2], S)[0])
print("similarity approximation 5D - mse: %f" % check_similarity_match(X_embed[:,:5], S)[0])
print("similarity approximation 7D - mse: %f" % check_similarity_match(X_embed[:,:7], S)[0])
print("similarity approximation 10D - mse: %f" % check_similarity_match(X_embed[:,:10], S)[0])
print("similarity approximation 25D - mse: %f" % check_similarity_match(X_embed[:,:25], S)[0])
n_targets = 2000
# get good alpha for RR model
m = Ridge()
rrm = GridSearchCV(m, {'alpha': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 1., 2.5, 5., 7.5, 10., 25., 50., 75., 100., 250., 500., 750., 1000.]})
rrm.fit(X, X_embed[:,:8])
alpha = rrm.best_params_["alpha"]
print("Ridge Regression with alpha: %r" % alpha)
mse_ev, mse_rr, mse_rr_test = [], [], []
mse_simec, mse_simec_test = [], []
mse_simec_hl, mse_simec_hl_test = [], []
e_dims = [2, 3, 4, 5, 6, 7, 8, 9, 10, 15]
for e_dim in e_dims:
print(e_dim)
# eigenvalue based embedding
mse = check_similarity_match(X_embed[:,:e_dim], S)[0]
mse_ev.append(mse)
# train a linear ridge regression model to learn the mapping from X to Y
model = Ridge(alpha=alpha)
model.fit(X, X_embed[:,:e_dim])
X_embed_r = model.predict(X)
X_embed_test_r = model.predict(X_test)
mse = check_similarity_match(X_embed_r, S)[0]
mse_rr.append(mse)
mse = check_similarity_match(X_embed_test_r, S_test)[0]
mse_rr_test.append(mse)
# simec - linear
simec = SimilarityEncoder(X.shape[1], e_dim, n_targets, s_ll_reg=0.5, S_ll=S[:n_targets,:n_targets],
orth_reg=0.001 if e_dim > 8 else 0., l2_reg_emb=0.00001,
l2_reg_out=0.0000001, opt=keras.optimizers.Adamax(lr=0.001))
simec.fit(X, S[:,:n_targets])
X_embeds = simec.transform(X)
X_embed_tests = simec.transform(X_test)
mse = check_similarity_match(X_embeds, S)[0]
mse_simec.append(mse)
mse_t = check_similarity_match(X_embed_tests, S_test)[0]
mse_simec_test.append(mse_t)
# simec - 2hl
simec = SimilarityEncoder(X.shape[1], e_dim, n_targets, hidden_layers=[(25, 'tanh'), (25, 'tanh')],
s_ll_reg=0.5, S_ll=S[:n_targets,:n_targets], orth_reg=0.001 if e_dim > 7 else 0.,
l2_reg=0., l2_reg_emb=0.00001, l2_reg_out=0.0000001, opt=keras.optimizers.Adamax(lr=0.001))
simec.fit(X, S[:,:n_targets])
X_embeds = simec.transform(X)
X_embed_tests = simec.transform(X_test)
mse = check_similarity_match(X_embeds, S)[0]
mse_simec_hl.append(mse)
mse_t = check_similarity_match(X_embed_tests, S_test)[0]
mse_simec_hl_test.append(mse_t)
print("mse ev: %f; mse rr: %f (%f); mse simec (0hl): %f (%f); mse simec (2hl): %f (%f)" % (mse_ev[-1], mse_rr[-1], mse_rr_test[-1], mse_simec[-1], mse_simec_test[-1], mse, mse_t))
keras.backend.clear_session()
colors = get_colors(15)
plt.figure();
plt.plot(e_dims, mse_ev, '-o', markersize=3, c=colors[14], label='Eigendecomposition');
plt.plot(e_dims, mse_rr, '-o', markersize=3, c=colors[12], label='ED + Regression');
plt.plot(e_dims, mse_rr_test, '--o', markersize=3, c=colors[12], label='ED + Regression (test)');
plt.plot(e_dims, mse_simec, '-o', markersize=3, c=colors[8], label='SimEc 0hl');
plt.plot(e_dims, mse_simec_test, '--o', markersize=3, c=colors[8], label='SimEc 0hl (test)');
plt.plot(e_dims, mse_simec_hl, '-o', markersize=3, c=colors[4], label='SimEc 2hl');
plt.plot(e_dims, mse_simec_hl_test, '--o', markersize=3, c=colors[4], label='SimEc 2hl (test)');
plt.legend(loc=0);
plt.title('MNIST (class based similarities)');
plt.plot([0, e_dims[-1]], [0,0], 'k--', linewidth=0.5);
plt.xticks(e_dims, e_dims);
plt.xlabel('Number of Embedding Dimensions ($d$)')
plt.ylabel('Mean Squared Error of $\hat{S}$')
print("e_dims=", e_dims)
print("mse_ev=", mse_ev)
print("mse_rr=", mse_rr)
print("mse_rr_test=", mse_rr_test)
print("mse_simec=", mse_simec)
print("mse_simec_test=", mse_simec_test)
print("mse_simec_hl=", mse_simec_hl)
print("mse_simec_hl_test=", mse_simec_hl_test)
if savefigs: plt.savefig('fig_class_mse_edim.pdf', dpi=300)
## load the data and transform it into a tf-idf representation
categories = [
"comp.graphics",
"rec.autos",
"rec.sport.baseball",
"sci.med",
"sci.space",
"soc.religion.christian",
"talk.politics.guns"
]
newsgroups_train = fetch_20newsgroups(subset='train', remove=(
'headers', 'footers', 'quotes'), data_home='data', categories=categories, random_state=42)
newsgroups_test = fetch_20newsgroups(subset='test', remove=(
'headers', 'footers', 'quotes'), data_home='data', categories=categories, random_state=42)
# store in dicts (if the text contains more than 3 words)
textdict = {i: t for i, t in enumerate(newsgroups_train.data) if len(t.split()) > 3}
textdict.update({i: t for i, t in enumerate(newsgroups_test.data, len(newsgroups_train.data)) if len(t.split()) > 3})
train_ids = [i for i in range(len(newsgroups_train.data)) if i in textdict]
test_ids = [i for i in range(len(newsgroups_train.data), len(textdict)) if i in textdict]
print("%i training and %i test samples" % (len(train_ids), len(test_ids)))
# transform into tf-idf features
ft = FeatureTransform(norm='max', weight=True, renorm='max')
docfeats = ft.texts2features(textdict, fit_ids=train_ids)
# organize in feature matrix
X, featurenames = features2mat(docfeats, train_ids)
X_test, _ = features2mat(docfeats, test_ids, featurenames)
print("%i features" % len(featurenames))
targets = np.hstack([newsgroups_train.target,newsgroups_test.target])
y = targets[train_ids]
y_test = targets[test_ids]
n_targets = 1000
target_names = newsgroups_train.target_names
# compute label based simmat
Y = np.tile(y, (len(y), 1))
S = center_K(np.array(Y==Y.T, dtype=int))
Y = np.tile(y_test, (len(y_test), 1))
S_test = center_K(np.array(Y==Y.T, dtype=int))
D, V = np.linalg.eig(S)
# as a comparison: regular kpca embedding: take largest EV
D1, V1 = D[np.argsort(D)[::-1]], V[:,np.argsort(D)[::-1]]
X_embed = np.dot(V1.real, np.diag(np.sqrt(np.abs(D1.real))))
plot_20news(X_embed[:, :2], y, target_names, title='20 newsgroups - 2 largest EV', legend=True)
print("similarity approximation 2D - mse: %f" % check_similarity_match(X_embed[:,:2], S)[0])
print("similarity approximation 5D - mse: %f" % check_similarity_match(X_embed[:,:5], S)[0])
print("similarity approximation 7D - mse: %f" % check_similarity_match(X_embed[:,:7], S)[0])
print("similarity approximation 10D - mse: %f" % check_similarity_match(X_embed[:,:10], S)[0])
print("similarity approximation 25D - mse: %f" % check_similarity_match(X_embed[:,:25], S)[0])
n_targets = 2000
# get good alpha for RR model
m = Ridge()
rrm = GridSearchCV(m, {'alpha': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 1., 2.5, 5., 7.5, 10., 25., 50., 75., 100., 250., 500., 750., 1000.]})
rrm.fit(X, X_embed[:,:8])
alpha = rrm.best_params_["alpha"]
print("Ridge Regression with alpha: %r" % alpha)
mse_ev, mse_rr, mse_rr_test = [], [], []
mse_simec, mse_simec_test = [], []
mse_simec_hl, mse_simec_hl_test = [], []
e_dims = [2, 3, 4, 5, 6, 7, 8, 9, 10]
for e_dim in e_dims:
print(e_dim)
# eigenvalue based embedding
mse = check_similarity_match(X_embed[:,:e_dim], S)[0]
mse_ev.append(mse)
# train a linear ridge regression model to learn the mapping from X to Y
model = Ridge(alpha=alpha)
model.fit(X, X_embed[:,:e_dim])
X_embed_r = model.predict(X)
X_embed_test_r = model.predict(X_test)
mse = check_similarity_match(X_embed_r, S)[0]
mse_rr.append(mse)
mse = check_similarity_match(X_embed_test_r, S_test)[0]
mse_rr_test.append(mse)
# simec - linear
simec = SimilarityEncoder(X.shape[1], e_dim, n_targets, s_ll_reg=0.5, S_ll=S[:n_targets,:n_targets],
sparse_inputs=True, orth_reg=0.1 if e_dim > 6 else 0., l2_reg_emb=0.0001,
l2_reg_out=0.00001, opt=keras.optimizers.Adamax(lr=0.01))
simec.fit(X, S[:,:n_targets])
X_embeds = simec.transform(X)
X_embed_tests = simec.transform(X_test)
mse = check_similarity_match(X_embeds, S)[0]
mse_simec.append(mse)
mse_t = check_similarity_match(X_embed_tests, S_test)[0]
mse_simec_test.append(mse_t)
# simec - 2hl
simec = SimilarityEncoder(X.shape[1], e_dim, n_targets, hidden_layers=[(25, 'tanh'), (25, 'tanh')], sparse_inputs=True,
s_ll_reg=1., S_ll=S[:n_targets,:n_targets], orth_reg=0.1 if e_dim > 7 else 0.,
l2_reg=0., l2_reg_emb=0.01, l2_reg_out=0.00001, opt=keras.optimizers.Adamax(lr=0.01))
simec.fit(X, S[:,:n_targets])
X_embeds = simec.transform(X)
X_embed_tests = simec.transform(X_test)
mse = check_similarity_match(X_embeds, S)[0]
mse_simec_hl.append(mse)
mse_t = check_similarity_match(X_embed_tests, S_test)[0]
mse_simec_hl_test.append(mse_t)
print("mse ev: %f; mse rr: %f (%f); mse simec (0hl): %f (%f); mse simec (2hl): %f (%f)" % (mse_ev[-1], mse_rr[-1], mse_rr_test[-1], mse_simec[-1], mse_simec_test[-1], mse, mse_t))
keras.backend.clear_session()
colors = get_colors(15)
plt.figure();
plt.plot(e_dims, mse_ev, '-o', markersize=3, c=colors[14], label='Eigendecomposition');
plt.plot(e_dims, mse_rr, '-o', markersize=3, c=colors[12], label='ED + Regression');
plt.plot(e_dims, mse_rr_test, '--o', markersize=3, c=colors[12], label='ED + Regression (test)');
plt.plot(e_dims, mse_simec, '-o', markersize=3, c=colors[8], label='SimEc 0hl');
plt.plot(e_dims, mse_simec_test, '--o', markersize=3, c=colors[8], label='SimEc 0hl (test)');
plt.plot(e_dims, mse_simec_hl, '-o', markersize=3, c=colors[4], label='SimEc 2hl');
plt.plot(e_dims, mse_simec_hl_test, '--o', markersize=3, c=colors[4], label='SimEc 2hl (test)');
plt.legend(bbox_to_anchor=(1.02, 1), loc=2, borderaxespad=0.);
plt.title('20 newsgroups (class based similarities)');
plt.plot([0, e_dims[-1]], [0,0], 'k--', linewidth=0.5);
plt.xticks(e_dims, e_dims);
plt.xlabel('Number of Embedding Dimensions ($d$)')
plt.ylabel('Mean Squared Error of $\hat{S}$')
print("e_dims=", e_dims)
print("mse_ev=", mse_ev)
print("mse_rr=", mse_rr)
print("mse_rr_test=", mse_rr_test)
print("mse_simec=", mse_simec)
print("mse_simec_test=", mse_simec_test)
print("mse_simec_hl=", mse_simec_hl)
print("mse_simec_hl_test=", mse_simec_hl_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Handwritten Digits (8x8 px)
Step2: SimEc based on class labels
Step3: Lets first try a simple linear SimEc.
Step4: Great, we already see some clusters separating from the rest! What if we add more layers?
Step5: MNIST Dataset
Step6: "Kernel PCA" and Ridge Regression vs. SimEc
Step7: 20 Newsgroups
|
14,035
|
<ASSISTANT_TASK:>
Python Code:
count = 1
for elem in range(1, 3 + 1):
count *= elem
print(count)
from math import factorial as f
f(3)
.1*10**20
def n_max():
inpt = eval(input("Please enter some values: "))
maximum = max_val(inpt)
print("The largest value is", maximum)
def max_val(ints):
Input: collection of ints.
Returns: maximum of the collection
int - the max integer.
max = ints[0]
for x in ints:
if x > max:
max = x
return max
assert max_val([1, 2, 3]) == 3
assert max_val([1, 1, 1]) == 1
assert max_val([1, 2, 2]) == 2
n_max()
inpt = eval(input("Please enter three values: "))
list(inpt)
assert compress('AAAADDBBBBBCCEAA') == 'A4D2B5C2E1A2'
# %load ../scripts/compress/compressor.py
def groupby_char(lst):
Returns a list of strings containing identical characters.
Takes a list of characters produced by running split on a string.
Groups runs (in order sequences) of identical characters into string elements in the list.
Parameters
---------
Input:
lst: list
A list of single character strings.
Output:
grouped: list
A list of strings containing grouped characters.
new_lst = []
count = 1
for i in range(len(lst) - 1): # we range to the second to last index since we're checking if lst[i] == lst[i + 1].
if lst[i] == lst[i + 1]:
count += 1
else:
new_lst.append([lst[i],count]) # Create a lst of lists. Each list contains a character and the count of adjacent identical characters.
count = 1
new_lst.append((lst[-1],count)) # Return the last character (we didn't reach it with our for loop since indexing until second to last).
grouped = [char*count for [char, count] in new_lst]
return grouped
def compress_group(string):
Returns a compressed two character string containing a character and a number.
Takes in a string of identical characters and returns the compressed string
consisting of the character and the length of the original string.
Example
-------
"AAA"-->"A3"
Parameters:
-----------
Input:
string: str
A string of identical characters.
Output:
------
compressed_str: str
A compressed string of length two containing a character and a number.
return str(string[0]) + str(len(string))
def compress(string):
Returns a compressed representation of a string.
Compresses the string by mapping each run of identical characters to a
single character and a count.
Ex.
--
compress('AAABBCDDD')--> 'A3B2C1D3'.
Only compresses string if the compression is shorter than the original string.
Ex.
--
compress('A')--> 'A' # not 'A1'.
Parameters
----------
Input:
string: str
The string to compress
Output:
compressed: str
The compressed representation of the string.
try:
split_str = [char for char in string] # Create list of single characters.
grouped = groupby_char(split_str) # Group characters if characters are identical.
compressed = ''.join( # Compress each element of the grouped list and join to a string.
[compress_group(elem) for elem in grouped])
if len(compressed) < len(string): # Only return compressed if compressed is actually shorter.
return compressed
else:
return string
except IndexError: # If our input string is empty, return an empty string.
return ""
except TypeError: # If we get something that's not compressible (including NoneType) return None.
return None
# %load ../scripts/compress/compress_tests.py
# This will fail to run because in wrong directory
from compress.compressor import *
def compress_test():
assert compress('AAABBCDDD') == 'A3B2C1D3'
assert compress('A') == 'A'
assert compress('') == ''
assert compress('AABBCC') == 'AABBCC' # compressing doesn't shorten string so just return string.
assert compress(None) == None
def groupby_char_test():
assert groupby_char(["A", "A", "A", "B", "B"]) == ["AAA", "BB"]
def compress_group_test():
assert compress_group("AAA") == "A3"
assert compress_group("A") == "A1"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Extend your program to n objects. How many different combinations do I have for 5 objects? How about 15? What is the max number of objects I could calculate for if I was storing the result in a 32 bit integer? What happens if the combinations exceed 32 bits?
Step2: 4. What will the following code yield? Was it what you expected? What's going on here?
Step4: 5. Try typing in the command below and read this page
Step8: Strategy 1
|
14,036
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
14,037
|
<ASSISTANT_TASK:>
Python Code:
# %sh
# wget https://raw.githubusercontent.com/jgoodall/cinevis/master/data/csvs/moviedata.csv
# ls -l
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
hollywood_movies = pd.read_csv('moviedata.csv')
print hollywood_movies.head()
print hollywood_movies['exclude'].value_counts()
hollywood_movies = hollywood_movies.drop('exclude', axis=1)
fig = plt.figure(figsize=(6, 10))
ax1 = fig.add_subplot(2, 1, 1)
ax1.scatter(hollywood_movies['Profitability'], hollywood_movies['Audience Rating'])
ax1.set(xlabel='Profitability', ylabel='Audience Rating', title='Hollywood Movies, 2007-2011')
ax2 = fig.add_subplot(2, 1, 2)
ax2.scatter(hollywood_movies['Audience Rating'], hollywood_movies['Profitability'])
ax2.set(xlabel='Audience Rating', ylabel='Profitability', title='Hollywood Movies, 2007-2011')
plt.show()
from pandas.tools.plotting import scatter_matrix
normal_movies = hollywood_movies[hollywood_movies['Film'] != 'Paranormal Activity']
scatter_matrix(normal_movies[['Profitability', 'Audience Rating']], figsize=(6,6))
plt.show()
fig = plt.figure()
normal_movies.boxplot(['Critic Rating', 'Audience Rating'])
plt.show()
normal_movies = normal_movies.sort(columns='Year')
fig = plt.figure(figsize=(8,4))
ax1 = fig.add_subplot(1, 2, 1)
sns.boxplot(x=normal_movies['Year'], y=normal_movies['Critic Rating'], ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
sns.boxplot(x=normal_movies['Year'], y=normal_movies['Audience Rating'], ax=ax2)
plt.show()
def is_profitable(row):
if row["Profitability"] <= 1.0:
return False
return True
normal_movies["Profitable"] = normal_movies.apply(is_profitable, axis=1)
fig = plt.figure(figsize=(8,6))
ax1 = fig.add_subplot(1, 2, 1)
sns.boxplot(x=normal_movies['Profitable'], y=normal_movies['Audience Rating'], ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
sns.boxplot(x=normal_movies['Profitable'], y=normal_movies['Critic Rating'], ax=ax2)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2
Step2: 3
Step3: 4
Step4: 5
Step5: 6
|
14,038
|
<ASSISTANT_TASK:>
Python Code:
import sys
print("Path (sys.path):")
for f in sys.path:
print(f)
import os
print("Current directory:")
print(os.getcwd())
import agreg.memoisation
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing a file
|
14,039
|
<ASSISTANT_TASK:>
Python Code:
from salib import extend
import pandas as pd
import os, os.path
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import hashlib
from IPython.core.magic import register_cell_magic
import re
class Table(pd.DataFrame):
A Table is just like a pandas DataFrame except that it has
a table name, a data set name, and a file name - the latter two describing
the source of the data.
_internal_names = pd.DataFrame._internal_names + ['filename','tablename']
_internal_names_set = set(_internal_names)
_metadata = ['dsname']
def __init__(self,*args,**kwargs):
dsname = kwargs.pop('dsname',None)
tablename = kwargs.pop('tablename',None)
filename = kwargs.pop('filename',None)
super(self.__class__,self).__init__(*args,**kwargs)
if dsname is not None:
self.dsname = dsname
if tablename is not None:
self.tablename = tablename
if filename is not None:
self.filename = filename
@property
def _constructor(self):
return self.__class__
##test:
t = Table(data=[(10,20.,'a'),(11,22.,'b'),(12,23.,'c')],
columns=['I','F','S'],tablename='Test',dsname='Notebook')
t
##test:
t.dtypes
##test:
t.tablename, t.dsname
##test:
t2 = t[['S','I']]
t2
##test:
hasattr(t2,'tablename'), hasattr(t2,'dsname')
##test:
t2.dsname
##test:
t = pd.DataFrame(data=[(10,20.,'a'),(11,22.,'b'),(12,23.,'c')],columns=['I','F','S'])
u = Table(data=t,dsname='foo',copy=False)
u
##test:
u['F'] *= 3
u
##test:
t
##test:
u.dsname
class DataSource(object):
ROOT = 'data'
DSNAME = None # default data set name
DSTYPE = 'dir' # someday we will allow 'zip' for zip archives
#DSTYPE = 'cell' # for CSV data provided via %%Table cell magic
#DSTYPE = 'data' # for dataframe data provided directly
CELLDATA = {} # csv text from %%Table magic cells, indexed by table name
TABLES = {} # dataframes directly provided by client, indexed by table name
DATASOURCE = None # the one and only data source
def __init__(self):
cls = self.__class__
if cls.DATASOURCE is not None:
raise ValueError("Can only create one instance of class '{}'".format(cls.__name__))
self.root = cls.ROOT
self.dsname = cls.DSNAME
self.prefix = None
self.dstype = cls.DSTYPE
self.celldata = cls.CELLDATA
self.tables = cls.TABLES
cls.DATASOURCE = self
##test:
d = DataSource()
vars(d)
##test:
try:
d2 = DataSource()
except Exception as e:
print('*'*5,e)
d2 = None
d2
@extend
class DataSource:
@classmethod
def set_root(cls,newroot):
self = cls.DATASOURCE
if not os.path.exists(newroot):
raise ValueError("Root '{}' does not exist.".format(newroot))
self.root = newroot
@classmethod
def set_source(cls,dsname,dstype=None):
self = cls.DATASOURCE
if dsname is not None:
if dstype is None:
dirname = self.root + '/' + dsname + '.d'
if os.path.exists(dirname):
dstype = 'dir'
else:
dstype = 'unknown'
if dstype not in ['dir','cell','data']:
raise ValueError("dstype '{}' is invalid.".format(dstype))
self.dsname = dsname
self.dstype = dstype
self.celldata = {}
self.tables = {}
@classmethod
def set_table(cls,tablename,table):
self = cls.DATASOURCE
self.tables[tablename] = table
if tablename in self.celldata:
del self.celldata[tablename]
@classmethod
def set_celldata(cls,tablename,celltext):
self = cls.DATASOURCE
self.celldata[tablename] = celltext
if tablename in self.tables:
del self.tables[tablename]
def _file_name(self,tablename,prefix=None):
n = tablename
if prefix:
n = prefix + '/' + tablename
return self.root + '/' + self.dsname + '.d/' + n + '.csv'
##test:
DataSource.DATASOURCE = None
ds = DataSource()
vars(ds)
##test:
try:
DataSource.set_root('foo')
except Exception as e:
print('*'*5,e)
vars(ds)
##test:
DataSource.set_root('img')
vars(ds)
##test:
DataSource.set_root('data')
##test:
DataSource.set_source('frame-1')
vars(ds)
##test:
DataSource.set_table('joints',[dict(NODEID='A',X=10,Y=20),dict(NODEID='B',Y=20,X=30)])
vars(ds)
##test:
DataSource.set_celldata('joints','NODEID,X,Y\nA,10,20\nB,30,20')
vars(ds)
##test:
ds._file_name('joints')
##test:
ds._file_name('joints',prefix='lcase1')
@extend
class DataSource:
@classmethod
def read_table(cls,tablename,optional=False,prefix=None,columns=None,extrasok=True):
self = cls.DATASOURCE
stream = None
filename = None
t = None
def _chk(t,columns=columns):
if columns is None:
return t
prov = set(t.columns)
reqd = set(columns)
if reqd-prov:
raise ValueError("Columns missing for table '{}': {}. Required columns are: {}"
.format(tablename,list(reqd-prov),columns))
if prov-reqd:
if not extrasok:
raise ValueError("Extra columns for table '{}': {}. Required columns are: '{}'"
.format(tablename,list(prov-reqd),columns))
t = t[columns]
return t
if tablename in self.tables:
return _chk(self.tables[tablename])
if tablename in self.celldata:
stream = StringIO(self.celldata[tablename])
else:
if self.dsname is not None:
filename = self._file_name(tablename,prefix=prefix)
if os.path.exists(filename):
stream = open(filename,'r')
if stream is None:
if optional:
d = pd.DataFrame(columns=columns)
else:
raise ValueError("Table '{}' does not exist.".format(tablename))
else:
d = pd.read_csv(stream,index_col=None,skipinitialspace=True)
t = Table(d,dsname=self.dsname,tablename=tablename,filename=filename)
return _chk(t)
##test:
DataSource.set_source('frame-6')
t = DataSource.read_table('nodes')
t
##test:
type(t)
##test:
len(t)
##test:
t[['X','Y']] /= 3.
t
##test:
vars(t)
##test:
DataSource.read_table('nodes',columns=['NODEID','Y','X'])
##test:
try:
t = DataSource.read_table('nodes',columns=['NODEID','Y','X'],extrasok=False)
except Exception as e:
print('***',e)
t = None
t
##test:
try:
t = DataSource.read_table('nodes',columns=['NODEID','Y','X','C','D'])
except Exception as e:
print('***',e)
t = None
t
##test:
try:
t = DataSource.read_table('nodesxxx',columns=['NODEID','Y','X'],extrasok=False)
except Exception as e:
print('***',e)
t = None
t
##test:
try:
t = DataSource.read_table('nodesxxx',columns=['NODEID','Y','X'],extrasok=False,optional=True)
except Exception as e:
print('***',e)
t = None
t
@register_cell_magic('Table')
def cell_table(line,celltext):
mo = re.match(r'\s*(\S+)\s*$',line)
if not mo:
raise ValueError('Usage: %%Table tablename')
tablename = mo.group(1)
global DataSource
DataSource.set_celldata(tablename,celltext)
%%Table nodes
NODEID,X,Y,Z
A,0.,0.,50001
B,0,4000,50002
C,8000,4000,50003
D,8000,0,50004
##test:
t2 = DataSource.read_table('nodes')
t2
##test:
DataSource.set_table('nodes',t2+t2)
t3 = DataSource.read_table('nodes')
t3
##test:
vars(t2)
@extend
class DataSource:
@classmethod
def write_table(cls,table,root=None,dsname=None,tablename=None,prefix=None,precision=None,index=False,makedir=False):
self = cls.DATASOURCE
if root is None:
root = self.root
if dsname is None:
dsname = self.dsname
if tablename is None:
tablename = table.tablename
dirname = root + '/' + dsname + '.d'
if makedir and not os.path.exists(dirname):
os.mkdir(dirname)
if prefix is not None:
dirname = dirname + '/' + prefix
if makedir and not os.path.exists(dirname):
os.mkdir(dirname)
table.tablename = tablename
table.dsname = dsname
table.filename = filename = dirname + '/' + tablename + '.csv'
float_format = None
if precision is not None:
float_format = '%.{:d}g'.format(precision)
table.to_csv(filename,index=index,float_format=float_format)
return filename
@extend
class Table:
def signature(self):
filename = self.filename
if os.path.exists(filename):
return (self.tablename,self.filename,signature(filename))
raise ValueError("Table {}: filename: {} - does not exist.".format(self.tablename,self.filename))
def signature(filename):
f = open(filename,mode='rb')
m = hashlib.sha256(f.read())
f.close()
return m.hexdigest()
DataSource.DATASOURCE = None
__ds__ = DataSource()
%%Table nodes
NODEID,X,Y,Z
A,0.,0.,6002.
B,0,4000,7003
C,8000,4000,8004
D,8000,0,9005
##test:
t = DataSource.read_table('nodes')
t
##test:
t[['X','Z']] /= 3
t
##test:
vars(t)
##test:
try:
DataSource.write_table(t,dsname='test',prefix='pfx',tablename='nodes2')
except Exception as e:
print('*'*5,e)
##test:
%rm -rf data/test.d
try:
r = DataSource.write_table(t,dsname='test',prefix='pfx',tablename='nodes2',makedir=True,precision=15)
except Exception as e:
print('*'*5,e)
r
##test:
%cat data/test.d/pfx/nodes2.csv
##test:
t.signature()
##test:
%rm -rf data/test.d
##test:
vars(t)
DataSource.DATASOURCE = None
__ds__ = DataSource()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: class Table
Step3: class DataSource
Step4: Reading Tables
Step5: Writing Tables
|
14,040
|
<ASSISTANT_TASK:>
Python Code:
raw_data = [1,2,3,4,5,6,7,8,9,10]
# Define a generator that yields input+6
def add_6(numbers):
for x in numbers:
output = x+6
yield output
# Define a generator that yields input-2
def subtract_2(numbers):
for x in numbers:
output = x-2
yield output
# Define a generator that yields input*100
def multiply_by_100(numbers):
for x in numbers:
output = x*100
yield output
# Step 1 of the pipeline
step1 = add_6(raw_data)
# Step 2 of the pipeline
step2 = subtract_2(step1)
# Step 3 of the pipeline
pipeline = multiply_by_100(step2)
# First element of the raw data
next(pipeline)
# Second element of the raw data
next(pipeline)
# Process all data
for raw_data in pipeline:
print(raw_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Data Processing Functions
Step2: Create Data Pipeline
Step3: Send First Two Pieces Of Raw Data Through Pipeline
Step4: Send All Raw Data Through Pipeline
|
14,041
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
# requests_toolbelt module is used to handle the multipart responses.
# Need to `pip install requests-toolbelt` from a terminal to install. This might need doing each time the Notebook pod starts
from requests_toolbelt.multipart import decoder
# Define some URLs and params
base_url = 'https://jobexecutor.prod.openrisknet.org/jobexecutor/rest'
services_url = base_url + '/v1/services'
jobexecutor_url = base_url + '/v1/jobs'
keycloak_url = 'https://sso.prod.openrisknet.org/auth/realms/openrisknet/protocol/openid-connect/token'
# set to False if self signed certificates are being used
tls_verify=True
# Test the PING service. Should give a 200 response and return 'OK'.
# If not then nothing else is going to work.
url = base_url + '/ping'
print("Requesting GET " + url)
resp = requests.get(url, verify=tls_verify)
print('Response Code: ' + str(resp.status_code))
print(resp.text)
# Need to specify your Keycloak SSO username and password so that we can get a token
import getpass
username = input('Username')
password = getpass.getpass('Password')
# Get token from Keycloak. This will have a finite lifetime.
# If your requests are getting a 401 error your token has probably expired.
data = {'grant_type': 'password', 'client_id': 'squonk-jobexecutor', 'username': username, 'password': password}
kresp = requests.post(keycloak_url, data = data)
j = kresp.json()
token = j['access_token']
token
# Get a list of all the Squonk services that can be executed.
#
print("Requesting GET " + services_url)
jobs_resp = requests.get(services_url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
print(str(len(json)) + " services found")
print(json)
# find the service ID from the list in the list services cell
#service_id = 'core.dataset.filter.slice.v1'
#service_id = 'pipelines.rdkit.conformer.basic'
service_id = 'pipelines.rdkit.o3da.basic'
url = services_url + '/' + service_id
print("Requesting GET " + url)
jobs_resp = requests.get(url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
print(json)
# Result of the request is an array of JobStatus objects.
# The job ID and status are listed
print("Requesting GET " + jobexecutor_url)
jobs_resp = requests.get(jobexecutor_url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
print(str(len(json)) + " jobs found")
for status in json:
print(status['jobId'] + ' ' + status['status'])
# The 'Datast slice' takes a slice through a dataset specified by the number of records to skip and then the number to include.
# This is one of Squonk's 'internal' services.
# The job ID is stored in the job_id variable.
url = jobexecutor_url + '/core.dataset.filter.slice.v1'
data = {
'options': '{"skip":2,"count":3}',
'input_data': ('input_data', open('nci10.data', 'rb'), 'application/x-squonk-molecule-object+json'),
'input_metadata': ('input_metadata', open('nci10.metadata', 'rb'), 'application/x-squonk-dataset-metadata+json')
}
print("Requesting POST " + jobexecutor_url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# The job is defined by the job_id variable and is probably the last job executed
url = jobexecutor_url + '/' + job_id + '/status'
print("Requesting GET " + url )
jobs_resp = requests.get(url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
json
# The job is defined by the job_id variable and is probably the last job executed.
# The status of the job needs to be 'RESULTS_READY'
# The response is a multipart response, typically containing the job status, the results metadata and the results data.
# This method can be called for a job any number of times until the job is deleted.
url = jobexecutor_url + '/' + job_id + '/results'
print("Requesting GET " + url )
jobs_resp = requests.get(url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
multipart_data = decoder.MultipartDecoder.from_response(jobs_resp)
for part in multipart_data.parts:
print(part.content)
print(part.headers)
# Once you have fetched the results you MUST delete the job.
# The job is defined by the job_id variable and is probably the last job executed.
url = jobexecutor_url + '/' + job_id
print("Requesting DELETE " + url)
jobs_resp = requests.delete(url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
if 'status' in json and json['status'] == 'COMPLETED':
print('Job deleted')
else:
print('Problem deleting job')
# Delete all jobs
# First get the current jobs
jobs_resp = requests.get(jobexecutor_url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
json = jobs_resp.json()
print('Found ' + str(len(json)) + ' jobs')
# Now go through them and delete
# If successful the status of the job will then be COMPLETED.
for job in json:
id = job['jobId']
url = jobexecutor_url + '/' + id
print("Deleting " + url)
jobs_resp = requests.delete(url, headers={'Authorization': 'bearer ' + token}, verify=tls_verify)
j = jobs_resp.json()
print("Status: " + j['status'])
# The 'Lipinski filter' takes calculates the classical rule of five properties and allows to filter based on these.
# We have implementations for ChemAxon and RDKit. Here we use the RDKit one.
# The default filter is the classical drug-likeness one defined by Lipinski but you can specify your owwn criteria instaead.
# This is one of Squonk's 'HTTP' services.
# The job ID is stored in the job_id variable.
url = jobexecutor_url + '/rdkit.calculators.lipinski'
data = {
'options': '{"filterMode":"INCLUDE_PASS"}',
'input_data': ('input_data', open('nci10.data', 'rb'), 'application/x-squonk-molecule-object+json'),
'input_metadata': ('input_metadata', open('nci10.metadata', 'rb'), 'application/x-squonk-dataset-metadata+json')
}
print("Requesting POST " + url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# passing data as SDF
url = jobexecutor_url + '/rdkit.calculators.lipinski'
data = {
'options': '{"filterMode":"INCLUDE_PASS"}',
'input': ('input', open('Kinase_inhibs.sdf', 'rb'), 'chemical/x-mdl-sdfile')
}
print("Requesting POST " + url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# sucos scoring passing 2 inputs as SDF
url = jobexecutor_url + '/pipelines.rdkit.sucos.basic'
data = {
'options': '{}',
'input': ('input', open('mols.sdf', 'rb'), 'chemical/x-mdl-sdfile'),
'target': ('target', open('benzene.sdf', 'rb'), 'chemical/x-mdl-sdfile')
}
print("Requesting POST " + url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# open3dAlign scoring passing 2 inputs as SDF
# passing the queryMol as pyrimethamine.mol does not work - it needs tob e converted to SDF
url = jobexecutor_url + '/pipelines.rdkit.o3da.basic'
data = {
'options': '{"arg.crippen":"false"}',
'input': ('input', open('dhfr_3d.sdf', 'rb'), 'chemical/x-mdl-sdfile'),
'queryMol': ('queryMol', open('pyrimethamine.sdf', 'rb'), 'chemical/x-mdl-sdfile')
}
print("Requesting POST " + url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# open3dAlign scoring passing inputs as dataset and query as SDF
url = jobexecutor_url + '/pipelines.rdkit.o3da.basic'
data = {
'options': '{"arg.crippen":"false"}',
'input_data': ('input_data', open('dhfr_3d.data.gz', 'rb'), 'application/x-squonk-molecule-object+json'),
'input_metadata': ('input_metadata', open('dhfr_3d.metadata', 'rb'), 'application/x-squonk-dataset-metadata+json'),
'queryMol': ('queryMol', open('pyrimethamine.sdf', 'rb'), 'chemical/x-mdl-sdfile')
}
print("Requesting POST " + url)
jobs_resp = requests.post(url, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# The 'Conformer generator' used RDKit ETKDG conformer generation tool to generate a number of conformers for the input structures.
# This is one of Squonk's 'Docker' services.
# The job ID is stored in the job_id variable.
service_id = 'pipelines.rdkit.conformer.basic'
data = {
'options': '{"arg.num":10,"arg.method":"RMSD"}',
'input_data': ('input_data', open('nci10.data', 'rb'), 'application/x-squonk-molecule-object+json'),
'input_metadata': ('input_metadata', open('nci10.metadata', 'rb'), 'application/x-squonk-dataset-metadata+json')
}
jobs_resp = requests.post(jobexecutor_url + '/' + service_id, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
# Similarity screening using RDKit.
# This is one of Squonk's 'Nextflow' services.
# The job ID is stored in the job_id variable.
# NOTE: THIS IS NOT WORKING AS THE QUERY STRUCTURE IS NOT BEING PASSED CORRECTLY
service_id = 'pipelines.rdkit.screen.basic'
data = {
'options': '{"arg.query":{"source":"CC1=CC(=O)C=CC1=O","format":"smiles"},"arg.sim":{"minValue":0.5,"maxValue":1.0}}',
'input_data': ('input_data', open('nci10_data.json', 'rb'), 'application/x-squonk-molecule-object+json'),
'input_metadata': ('input_metadata', open('nci10_meta.json', 'rb'), 'application/x-squonk-dataset-metadata+json')
}
jobs_resp = requests.post(jobexecutor_url + '/' + service_id, files=data, headers = {'Authorization': 'bearer ' + token, 'Content-Type': 'multipart/form'}, verify=tls_verify)
print('Response Code: ' + str(jobs_resp.status_code))
job_status = jobs_resp.json()
job_id = job_status['jobId']
print(job_status)
print("\nJobID: " + job_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check basic operation
Step2: Authentication
Step3: List all services
Step4: Getting details of a particular service
Step5: List all jobs
Step6: Execute the 'Dataset Slice' service
Step7: Get the status of the current job
Step8: Get the results of a job.
Step9: Delete the job
Step10: Delete all jobs
Step11: Other services
|
14,042
|
<ASSISTANT_TASK:>
Python Code:
# Tensorflow
import tensorflow as tf
print('Tested with TensorFlow 1.2.0')
print('Your TensorFlow version:', tf.__version__)
# Feeding function for enqueue data
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
# Rnn common functions
from tensorflow.contrib.learn.python.learn.estimators import rnn_common
# Model builder
from tensorflow.python.estimator import model_fn as model_fn_lib
# Run an experiment
from tensorflow.contrib.learn.python.learn import learn_runner
# Helpers for data processing
import pandas as pd
import numpy as np
import argparse
import random
# data from: http://ai.stanford.edu/~amaas/data/sentiment/
TRAIN_INPUT = 'data/train.csv'
TEST_INPUT = 'data/test.csv'
# data manually generated
MY_TEST_INPUT = 'data/mytest.csv'
# wordtovec
# https://nlp.stanford.edu/projects/glove/
# the matrix will contain 400,000 word vectors, each with a dimensionality of 50.
word_list = np.load('word_list.npy')
word_list = word_list.tolist() # originally loaded as numpy array
word_list = [word.decode('UTF-8') for word in word_list] # encode words as UTF-8
print('Loaded the word list, length:', len(word_list))
word_vector = np.load('word_vector.npy')
print ('Loaded the word vector, shape:', word_vector.shape)
baseball_index = word_list.index('baseball')
print('Example: baseball')
print(word_vector[baseball_index])
max_seq_length = 10 # maximum length of sentence
num_dims = 50 # dimensions for each word vector
first_sentence = np.zeros((max_seq_length), dtype='int32')
first_sentence[0] = word_list.index("i")
first_sentence[1] = word_list.index("thought")
first_sentence[2] = word_list.index("the")
first_sentence[3] = word_list.index("movie")
first_sentence[4] = word_list.index("was")
first_sentence[5] = word_list.index("incredible")
first_sentence[6] = word_list.index("and")
first_sentence[7] = word_list.index("inspiring")
# first_sentence[8] = 0
# first_sentence[9] = 0
print(first_sentence.shape)
print(first_sentence) # shows the row index for each word
with tf.Session() as sess:
print(tf.nn.embedding_lookup(word_vector, first_sentence).eval().shape)
from os import listdir
from os.path import isfile, join
positiveFiles = ['positiveReviews/' + f for f in listdir('positiveReviews/') if isfile(join('positiveReviews/', f))]
negativeFiles = ['negativeReviews/' + f for f in listdir('negativeReviews/') if isfile(join('negativeReviews/', f))]
numWords = []
for pf in positiveFiles:
with open(pf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Positive files finished')
for nf in negativeFiles:
with open(nf, "r", encoding='utf-8') as f:
line=f.readline()
counter = len(line.split())
numWords.append(counter)
print('Negative files finished')
numFiles = len(numWords)
print('The total number of files is', numFiles)
print('The total number of words in the files is', sum(numWords))
print('The average number of words in the files is', sum(numWords)/len(numWords))
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(numWords, 50)
plt.xlabel('Sequence Length')
plt.ylabel('Frequency')
plt.axis([0, 1200, 0, 8000])
plt.show()
max_seq_len = 250
ids_matrix = np.load('ids_matrix.npy').tolist()
# Parameters for training
STEPS = 15000
BATCH_SIZE = 32
# Parameters for data processing
REVIEW_KEY = 'review'
SEQUENCE_LENGTH_KEY = 'sequence_length'
POSITIVE_REVIEWS = 12500
# copying sequences
data_sequences = [np.asarray(v, dtype=np.int32) for v in ids_matrix]
# generating labels
data_labels = [[1, 0] if i < POSITIVE_REVIEWS else [0, 1] for i in range(len(ids_matrix))]
# also creating a length column, this will be used by the Dynamic RNN
# see more about it here: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
data_length = [max_seq_len for i in range(len(ids_matrix))]
data = list(zip(data_sequences, data_labels, data_length))
random.shuffle(data) # shuffle
data = np.asarray(data)
# separating train and test data
limit = int(len(data) * 0.9)
train_data = data[:limit]
test_data = data[limit:]
LABEL_INDEX = 1
def _number_of_pos_labels(df):
pos_labels = 0
for value in df:
if value[LABEL_INDEX] == [1, 0]:
pos_labels += 1
return pos_labels
pos_labels_train = _number_of_pos_labels(train_data)
total_labels_train = len(train_data)
pos_labels_test = _number_of_pos_labels(test_data)
total_labels_test = len(test_data)
print('Total number of positive labels:', pos_labels_train + pos_labels_test)
print('Proportion of positive labels on the Train data:', pos_labels_train/total_labels_train)
print('Proportion of positive labels on the Test data:', pos_labels_test/total_labels_test)
def get_input_fn(df, batch_size, num_epochs=1, shuffle=True):
def input_fn():
sequences = np.asarray([v for v in df[:,0]], dtype=np.int32)
labels = np.asarray([v for v in df[:,1]], dtype=np.int32)
length = np.asarray(df[:,2], dtype=np.int32)
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/data
dataset = (
tf.contrib.data.Dataset.from_tensor_slices((sequences, labels, length)) # reading data from memory
.repeat(num_epochs) # repeat dataset the number of epochs
.batch(batch_size)
)
# for our "manual" test we don't want to shuffle the data
if shuffle:
dataset = dataset.shuffle(buffer_size=100000)
# create iterator
review, label, length = dataset.make_one_shot_iterator().get_next()
features = {
REVIEW_KEY: review,
SEQUENCE_LENGTH_KEY: length,
}
return features, label
return input_fn
features, label = get_input_fn(test_data, 2, shuffle=False)()
with tf.Session() as sess:
items = sess.run(features)
print(items[REVIEW_KEY])
print(sess.run(label))
train_input_fn = get_input_fn(train_data, BATCH_SIZE, None)
test_input_fn = get_input_fn(test_data, BATCH_SIZE)
def get_model_fn(rnn_cell_sizes,
label_dimension,
dnn_layer_sizes=[],
optimizer='SGD',
learning_rate=0.01,
embed_dim=128):
def model_fn(features, labels, mode):
review = features[REVIEW_KEY]
sequence_length = tf.cast(features[SEQUENCE_LENGTH_KEY], tf.int32)
# Creating embedding
data = tf.Variable(tf.zeros([BATCH_SIZE, max_seq_len, 50]),dtype=tf.float32)
data = tf.nn.embedding_lookup(word_vector, review)
# Each RNN layer will consist of a LSTM cell
rnn_layers = [tf.contrib.rnn.LSTMCell(size) for size in rnn_cell_sizes]
# Construct the layers
multi_rnn_cell = tf.contrib.rnn.MultiRNNCell(rnn_layers)
# Runs the RNN model dynamically
# more about it at:
# https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
outputs, final_state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
# Slice to keep only the last cell of the RNN
last_activations = rnn_common.select_last_activations(outputs, sequence_length)
# Construct dense layers on top of the last cell of the RNN
for units in dnn_layer_sizes:
last_activations = tf.layers.dense(
last_activations, units, activation=tf.nn.relu)
# Final dense layer for prediction
predictions = tf.layers.dense(last_activations, label_dimension)
predictions_softmax = tf.nn.softmax(predictions)
loss = None
train_op = None
eval_op = None
preds_op = {
'prediction': predictions_softmax,
'label': labels
}
if mode == tf.contrib.learn.ModeKeys.EVAL:
eval_op = {
"accuracy": tf.metrics.accuracy(
tf.argmax(input=predictions_softmax, axis=1),
tf.argmax(input=labels, axis=1))
}
if mode != tf.contrib.learn.ModeKeys.INFER:
loss = tf.losses.softmax_cross_entropy(labels, predictions)
if mode == tf.contrib.learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer=optimizer,
learning_rate=learning_rate)
return model_fn_lib.EstimatorSpec(mode,
predictions=predictions_softmax,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_op)
return model_fn
model_fn = get_model_fn(rnn_cell_sizes=[64], # size of the hidden layers
label_dimension=2, # since are just 2 classes
dnn_layer_sizes=[128, 64], # size of units in the dense layers on top of the RNN
optimizer='Adam',
learning_rate=0.001,
embed_dim=512)
# create experiment
def generate_experiment_fn():
Create an experiment function given hyperparameters.
Returns:
A function (output_dir) -> Experiment where output_dir is a string
representing the location of summaries, checkpoints, and exports.
this function is used by learn_runner to create an Experiment which
executes model code provided in the form of an Estimator and
input functions.
All listed arguments in the outer function are used to create an
Estimator, and input functions (training, evaluation, serving).
Unlisted args are passed through to Experiment.
def _experiment_fn(run_config, hparams):
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
return tf.contrib.learn.Experiment(
estimator,
train_input_fn=train_input_fn,
eval_input_fn=test_input_fn,
train_steps=STEPS
)
return _experiment_fn
# run experiment
learn_runner.run(generate_experiment_fn(), run_config=tf.contrib.learn.RunConfig(model_dir='testing2'))
def string_to_array(s, separator=' '):
return s.split(separator)
def generate_data_row(sentence, label, max_length):
sequence = np.zeros((max_length), dtype='int32')
for i, word in enumerate(string_to_array(sentence)):
sequence[i] = word_list.index(word)
return sequence, label, max_length
def generate_data(sentences, labels, max_length):
data = []
for s, l in zip(sentences, labels):
data.append(generate_data_row(s, l, max_length))
return np.asarray(data)
sentences = ['i thought the movie was incredible and inspiring',
'this is a great movie',
'this is a good movie but isnt the best',
'it was fine i guess',
'it was definitely bad',
'its not that bad',
'its not that bad i think its a good movie',
'its not bad i think its a good movie']
labels = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[1, 0],
[1, 0],
[1, 0]] # [1, 0]: positive, [0, 1]: negative
my_test_data = generate_data(sentences, labels, 10)
preds = estimator.predict(input_fn=get_input_fn(my_test_data, 1, 1, shuffle=False))
print()
for p, s in zip(preds, sentences):
print('sentence:', s)
print('good review:', p[0], 'bad review:', p[1])
print('-' * 10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Data
Step2: We can search our word list for a word like "baseball", and then access its corresponding vector through the embedding matrix.
Step3: Now that we have our vectors, our first step is taking an input sentence and then constructing the its vector representation. Let's say that we have the input sentence "I thought the movie was incredible and inspiring". In order to get the word vectors, we can use Tensorflow's embedding lookup function. This function takes in two arguments, one for the embedding matrix (the wordVectors matrix in our case), and one for the ids of each of the words. The ids vector can be thought of as the integerized representation of the training set. This is basically just the row index of each of the words. Let's look at a quick example to make this concrete.
Step4: TODO### Insert image
Step5: Before creating the ids matrix for the whole training set, let’s first take some time to visualize the type of data that we have. This will help us determine the best value for setting our maximum sequence length. In the previous example, we used a max length of 10, but this value is largely dependent on the inputs you have.
Step6: We can also use the Matplot library to visualize this data in a histogram format.
Step7: From the histogram as well as the average number of words per file, we can safely say that most reviews will fall under 250 words, which is the max sequence length value we will set.
Step8: Data
Step9: Parameters
Step10: Separating train and test data
Step11: Then, let's shuffle the data and use 90% of the reviews for training and the other 10% for testing.
Step12: Verifying if the train and test data have enough positive and negative examples
Step13: Input functions
Step14: Creating the Estimator model
Step16: Create and Run Experiment
Step17: Making Predictions
Step18: Now, let's generate predictions for the sentences
|
14,043
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
14,044
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
from my_scopus import MY_API_KEY, PROXY_URL, MY_AUTHOR_ID
def print_json(resp_json):
print(json.dumps(resp_json,
sort_keys=True,
indent=4, separators=(',', ': ')))
def scopus_get_info_api(url, proxy=PROXY_URL,*,verbose=False,json=True):
Returns the information obtained by the Elseview API
proxies = {
"http": PROXY_URL
}
resp = requests.get("http://api.elsevier.com/content/" +url,
headers={'Accept':'application/json',
'X-ELS-APIKey': MY_API_KEY}, proxies=proxies)
if verbose:
print_json(resp.json())
if json:
return resp.json()
else:
return resp.text.encode('utf-8')
def scopus_get_author(author_id):
msg = "author?author_id={}&view=metrics".format(author_id)
resp = scopus_get_info_api(msg)
return resp['author-retrieval-response'][0]
author_info = scopus_get_author(MY_AUTHOR_ID)
print_json(author_info)
h_index = author_info['h-index']
print("My automatic h_index is {}".format(h_index))
def scopus_search_list(query, field, max=100, *, debug=False):
msg = "search/scopus?query={}&nofield={}&count={}".format(query, field, max)
if debug:
print_json(scopus_get_info_api(msg))
resp = scopus_get_info_api(msg)['search-results']
list = []
if resp['entry']:
list = resp['entry']
return list
def extract_info_papers(list):
def get_type(code):
if code in ['ar','re', 'ed', 'ip']:
return 'article'
elif code == 'cp':
return 'congress'
else:
return code
return [{'id': info['dc:identifier'],
'title': info['dc:title'],
'url': info['prism:url'],
'citations': int(info['citedby-count']),
'type': get_type(info['subtype']),
'year': info['prism:coverDate'][:4],
'journal': info['prism:publicationName']} for info in list]
def scopus_papers_from_author(author_id, *, max=100):
Return the list of papers from the author
query = "AU-ID({})".format(author_id)
field = "dc:identifier"
list = scopus_search_list(query, field, max)
#print_json(list)
return extract_info_papers(list)
papers = scopus_papers_from_author(MY_AUTHOR_ID)
print('{} papers'.format(len(papers)))
import pandas as pd
df = pd.DataFrame.from_dict(papers)
print(df.head())
papers_journal = df[df['type']=='article']
citations = papers_journal.groupby(['year']).sum()
%pylab inline
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
ax = citations.plot(kind='bar', legend=None)
ax.set_xlabel('Year')
ax.set_ylabel('Citations')
def get_scopus_info(SCOPUS_ID):
url = ("abstract/scopus_id/"
+ SCOPUS_ID
+ "?field=authors,title,publicationName,volume,issueIdentifier,"
+ "prism:pageRange,coverDate,article-number,doi,citedby-count,prism:aggregationType")
resp = scopus_get_info_api(url, json=True)
results = resp['abstracts-retrieval-response']
authors_info = results['authors']
info = results['coredata']
fstring = '{authors}, {title}, {journal}, {volume}, {articlenum}, ({date}). {doi} (cited {cites} times).\n'
return fstring.format(authors=', '.join([au['ce:indexed-name'] for au in authors_info['author']]),
title=info['dc:title'],
journal=info['prism:publicationName'],
volume=info.get('prism:volume') or 1,
articlenum=info.get('prism:pageRange') or
info.get('article-number'),
date=info['prism:coverDate'],
doi='doi:' +(info.get('prism:doi') or 'NA'),
cites=int(info['citedby-count']))
df_lasts = df[df['year']=='2015']
for id in df.sort(['citations'], ascending=[0])['id']:
#print("id: '{}'".format(id))
print(get_scopus_info(id))
def get_author_info(paper_id):
url = ("abstract/scopus_id/"
+ paper_id
+ "?field=authors,title,publicationName,volume,issueIdentifier,"
+ "prism:pageRange,coverDate,article-number,doi,citedby-count,prism:aggregationType")
resp = scopus_get_info_api(url, json=True)
results = resp['abstracts-retrieval-response']
authors_info = results['authors']['author']
authors_id = [au['ce:indexed-name'] for au in authors_info]
return authors_id
from collections import defaultdict
def get_authors_list(papers_id):
number = defaultdict(int)
for i, paper_id in enumerate(papers_id):
authors = get_author_info(paper_id)
for author in authors:
number[author] += 1
return number
authors = get_authors_list(df['id'])
print(authors)
def show_author_list(authors):
names = authors.keys()
names = sorted(names, key=lambda k: authors[k], reverse=True)
for name in names:
print("{}: {}".format(name, authors[name]))
show_author_list(authors = get_authors_list(df.id))
revistas = sorted(set(papers_journal['journal']))
for revista in revistas:
print(revista)
def scopus_search_papers(words, type='ar'):
Return the list of papers from the author
query = "TITLE-ABS-KEY({}) AND PUBYEAR > 2010 AND DOCTYPE({})".format(words, type)
field = "dc:identifier"
list = scopus_search_list(query, field, 200)
return extract_info_papers(list)
results = scopus_search_papers("large scale optimization evolutionary")
papers_lsgo = pd.DataFrame.from_dict(results)
num_total = len(papers_lsgo)
print(sorted(set(papers_lsgo['year'])))
lsgo_journal = papers_lsgo.groupby(['journal']).sum()
lsgo_journal.columns = ['number']
lsgo_journal = lsgo_journal.sort('number', ascending=False)
lsgo_journal = lsgo_journal[lsgo_journal['number']>0]
print(lsgo_journal)
papers_lsgo = papers_lsgo.sort('citations', ascending=False)
papers_lsgo = papers_lsgo[papers_lsgo['citations']>0]
for id in papers_lsgo['id'][:10]:
print(get_scopus_info(id))
show_author_list(authors = get_authors_list(papers_lsgo.id))
results_cp = scopus_search_papers("large scale optimization evolutionary", type='cp')
results = pd.DataFrame.from_dict(results_cp)
show_author_list(authors = get_authors_list(results.id))
results_cp = scopus_search_papers("large scale optimization differential evolution", type='cp')
results = pd.DataFrame.from_dict(results_cp)
show_author_list(authors = get_authors_list(results.id))
results = results.sort(['citations'], ascending=False)
for id in results.id[:10]:
print(get_scopus_info(id))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: First, we define a function to access to the information
Step3: Then, a util function to show the information
Step4: Example, to obtain my h-index
Step6: Obtaining list of references
Step7: Translate to pandas
Step8: Ploting results
Step9: Get complete reference of a paper
Step10: Get authors from a list
Step11: List of journals in which I have published
Step13: Searching by a criterion
Step14: Get the number of journals with the results
Step15: Count the references and number for each author
Step16: For Congress
|
14,045
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib,scipy,scikit-learn
# to install watermark just uncomment the following line:
#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=150,
n_features=2,
centers=3,
cluster_std=0.5,
shuffle=True,
random_state=0)
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(X[:,0], X[:,1], c='white', marker='o', s=50)
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/spheres.png', dpi=300)
plt.show()
from sklearn.cluster import KMeans
km = KMeans(n_clusters=3,
init='random',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
plt.scatter(X[y_km==0,0],
X[y_km==0,1],
s=50,
c='lightgreen',
marker='s',
label='cluster 1')
plt.scatter(X[y_km==1,0],
X[y_km==1,1],
s=50,
c='orange',
marker='o',
label='cluster 2')
plt.scatter(X[y_km==2,0],
X[y_km==2,1],
s=50,
c='lightblue',
marker='v',
label='cluster 3')
plt.scatter(km.cluster_centers_[:,0],
km.cluster_centers_[:,1],
s=250,
marker='*',
c='red',
label='centroids')
plt.legend()
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/centroids.png', dpi=300)
plt.show()
print('Distortion: %.2f' % km.inertia_)
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
init='k-means++',
n_init=10,
max_iter=300,
random_state=0)
km.fit(X)
distortions .append(km.inertia_)
plt.plot(range(1,11), distortions , marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.tight_layout()
#plt.savefig('./figures/elbow.png', dpi=300)
plt.show()
import numpy as np
from matplotlib import cm
from sklearn.metrics import silhouette_samples
km = KMeans(n_clusters=3,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km == c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette.png', dpi=300)
plt.show()
km = KMeans(n_clusters=2,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-04,
random_state=0)
y_km = km.fit_predict(X)
plt.scatter(X[y_km==0,0],
X[y_km==0,1],
s=50,
c='lightgreen',
marker='s',
label='cluster 1')
plt.scatter(X[y_km==1,0],
X[y_km==1,1],
s=50,
c='orange',
marker='o',
label='cluster 2')
plt.scatter(km.cluster_centers_[:,0], km.cluster_centers_[:,1], s=250, marker='*', c='red', label='centroids')
plt.legend()
plt.grid()
plt.tight_layout()
#plt.savefig('./figures/centroids_bad.png', dpi=300)
plt.show()
cluster_labels = np.unique(y_km)
n_clusters = cluster_labels.shape[0]
silhouette_vals = silhouette_samples(X, y_km, metric='euclidean')
y_ax_lower, y_ax_upper = 0, 0
yticks = []
for i, c in enumerate(cluster_labels):
c_silhouette_vals = silhouette_vals[y_km == c]
c_silhouette_vals.sort()
y_ax_upper += len(c_silhouette_vals)
color = cm.jet(i / n_clusters)
plt.barh(range(y_ax_lower, y_ax_upper), c_silhouette_vals, height=1.0,
edgecolor='none', color=color)
yticks.append((y_ax_lower + y_ax_upper) / 2)
y_ax_lower += len(c_silhouette_vals)
silhouette_avg = np.mean(silhouette_vals)
plt.axvline(silhouette_avg, color="red", linestyle="--")
plt.yticks(yticks, cluster_labels + 1)
plt.ylabel('Cluster')
plt.xlabel('Silhouette coefficient')
plt.tight_layout()
# plt.savefig('./figures/silhouette_bad.png', dpi=300)
plt.show()
import pandas as pd
import numpy as np
np.random.seed(123)
variables = ['X', 'Y', 'Z']
labels = ['ID_0','ID_1','ID_2','ID_3','ID_4']
X = np.random.random_sample([5,3])*10
df = pd.DataFrame(X, columns=variables, index=labels)
df
from scipy.spatial.distance import pdist,squareform
row_dist = pd.DataFrame(squareform(pdist(df, metric='euclidean')), columns=labels, index=labels)
row_dist
# 1. incorrect approach: Squareform distance matrix
from scipy.cluster.hierarchy import linkage
row_clusters = linkage(row_dist, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
# 2. correct approach: Condensed distance matrix
row_clusters = linkage(pdist(df, metric='euclidean'), method='complete')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
# 3. correct approach: Input sample matrix
row_clusters = linkage(df.values, method='complete', metric='euclidean')
pd.DataFrame(row_clusters,
columns=['row label 1', 'row label 2', 'distance', 'no. of items in clust.'],
index=['cluster %d' %(i+1) for i in range(row_clusters.shape[0])])
from scipy.cluster.hierarchy import dendrogram
# make dendrogram black (part 1/2)
# from scipy.cluster.hierarchy import set_link_color_palette
# set_link_color_palette(['black'])
row_dendr = dendrogram(row_clusters,
labels=labels,
# make dendrogram black (part 2/2)
# color_threshold=np.inf
)
plt.tight_layout()
plt.ylabel('Euclidean distance')
#plt.savefig('./figures/dendrogram.png', dpi=300,
# bbox_inches='tight')
plt.show()
# plot row dendrogram
fig = plt.figure(figsize=(8,8))
axd = fig.add_axes([0.09,0.1,0.2,0.6])
row_dendr = dendrogram(row_clusters, orientation='right')
# reorder data with respect to clustering
df_rowclust = df.ix[row_dendr['leaves'][::-1]]
axd.set_xticks([])
axd.set_yticks([])
# remove axes spines from dendrogram
for i in axd.spines.values():
i.set_visible(False)
# plot heatmap
axm = fig.add_axes([0.23,0.1,0.6,0.6]) # x-pos, y-pos, width, height
cax = axm.matshow(df_rowclust, interpolation='nearest', cmap='hot_r')
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
# plt.savefig('./figures/heatmap.png', dpi=300)
plt.show()
from sklearn.cluster import AgglomerativeClustering
ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
labels = ac.fit_predict(X)
print('Cluster labels: %s' % labels)
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
plt.scatter(X[:,0], X[:,1])
plt.tight_layout()
#plt.savefig('./figures/moons.png', dpi=300)
plt.show()
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8,3))
km = KMeans(n_clusters=2, random_state=0)
y_km = km.fit_predict(X)
ax1.scatter(X[y_km==0,0], X[y_km==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
ax1.scatter(X[y_km==1,0], X[y_km==1,1], c='red', marker='s', s=40, label='cluster 2')
ax1.set_title('K-means clustering')
ac = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='complete')
y_ac = ac.fit_predict(X)
ax2.scatter(X[y_ac==0,0], X[y_ac==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
ax2.scatter(X[y_ac==1,0], X[y_ac==1,1], c='red', marker='s', s=40, label='cluster 2')
ax2.set_title('Agglomerative clustering')
plt.legend()
plt.tight_layout()
#plt.savefig('./figures/kmeans_and_ac.png', dpi=300)
plt.show()
from sklearn.cluster import DBSCAN
db = DBSCAN(eps=0.2, min_samples=5, metric='euclidean')
y_db = db.fit_predict(X)
plt.scatter(X[y_db==0,0], X[y_db==0,1], c='lightblue', marker='o', s=40, label='cluster 1')
plt.scatter(X[y_db==1,0], X[y_db==1,1], c='red', marker='s', s=40, label='cluster 2')
plt.legend()
plt.tight_layout()
#plt.savefig('./figures/moons_dbscan.png', dpi=300)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: <br>
Step3: <br>
Step4: Comparison to "bad" clustering
Step5: <br>
Step6: <br>
Step7: We can either pass a condensed distance matrix (upper triangular) from the pdist function, or we can pass the "original" data array and define the 'euclidean' metric as function argument n linkage. However, we should nott pass the squareform distance matrix, which would yield different distance values although the overall clustering could be the same.
Step8: <br>
Step9: <br>
Step10: <br>
Step11: K-means and hierarchical clustering
Step12: Density-based clustering
|
14,046
|
<ASSISTANT_TASK:>
Python Code:
from theano.sandbox import cuda
cuda.use('gpu1')
%matplotlib inline
from __future__ import print_function, division
#path = "data/state/"
path = "data/state/sample/"
import utils; reload(utils)
from utils import *
from IPython.display import FileLink
batch_size=64
%cd data/state
%cd train
%mkdir ../sample
%mkdir ../sample/train
%mkdir ../sample/valid
for d in glob('c?'):
os.mkdir('../sample/train/'+d)
os.mkdir('../sample/valid/'+d)
from shutil import copyfile
g = glob('c?/*.jpg')
shuf = np.random.permutation(g)
for i in range(1500): copyfile(shuf[i], '../sample/train/' + shuf[i])
%cd ../valid
g = glob('c?/*.jpg')
shuf = np.random.permutation(g)
for i in range(1000): copyfile(shuf[i], '../sample/valid/' + shuf[i])
%cd ../../..
%mkdir data/state/results
%mkdir data/state/sample/test
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
(val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames,
test_filename) = get_classes(path)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.summary()
10*3*224*224
np.round(model.predict_generator(batches, batches.N)[:10],2)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr=0.001
model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
rnd_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=True)
val_res = [model.evaluate_generator(rnd_batches, rnd_batches.nb_sample) for i in range(10)]
np.round(val_res, 2)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax', W_regularizer=l2(0.01))
])
model.compile(Adam(lr=10e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr=0.001
model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(100, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr = 0.01
model.fit_generator(batches, batches.nb_sample, nb_epoch=5, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
def conv1(batches):
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Convolution2D(32,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Convolution2D(64,3,3, activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, batches.nb_sample, nb_epoch=2, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.optimizer.lr = 0.001
model.fit_generator(batches, batches.nb_sample, nb_epoch=4, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
return model
conv1(batches)
gen_t = image.ImageDataGenerator(width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(height_shift_range=0.05)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(shear_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(rotation_range=15)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(channel_shift_range=20)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
model.optimizer.lr = 0.0001
model.fit_generator(batches, batches.nb_sample, nb_epoch=5, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
model.fit_generator(batches, batches.nb_sample, nb_epoch=25, validation_data=val_batches,
nb_val_samples=val_batches.nb_sample)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create sample
Step2: Create batches
Step3: Basic models
Step4: As you can see below, this training is going nowhere...
Step5: Let's first check the number of parameters to see that there's enough parameters to find some useful relationships
Step6: Over 1.5 million parameters - that should be enough. Incidentally, it's worth checking you understand why this is the number of parameters in this layer
Step7: Since we have a simple model with no regularization and plenty of parameters, it seems most likely that our learning rate is too high. Perhaps it is jumping to a solution where it predicts one or two classes with high confidence, so that it can give a zero prediction to as many classes as possible - that's the best approach for a model that is no better than random, and there is likely to be where we would end up with a high learning rate. So let's check
Step8: Our hypothesis was correct. It's nearly always predicting class 1 or 6, with very high confidence. So let's try a lower learning rate
Step9: Great - we found our way out of that hole... Now we can increase the learning rate and see where we can get to.
Step10: We're stabilizing at validation accuracy of 0.39. Not great, but a lot better than random. Before moving on, let's check that our validation set on the sample is large enough that it gives consistent results
Step11: Yup, pretty consistent - if we see improvements of 3% or more, it's probably not random, based on the above samples.
Step12: Looks like we can get a bit over 50% accuracy this way. This will be a good benchmark for our future models - if we can't beat 50%, then we're not even beating a linear model trained on a sample, so we'll know that's not a good approach.
Step13: Not looking very encouraging... which isn't surprising since we know that CNNs are a much better choice for computer vision problems. So we'll try one.
Step14: The training set here is very rapidly reaching a very high accuracy. So if we could regularize this, perhaps we could get a reasonable result.
Step15: Height shift
Step16: Random shear angles (max in radians) -
Step17: Rotation
Step18: Channel shift
Step19: And finally, putting it all together!
Step20: At first glance, this isn't looking encouraging, since the validation set is poor and getting worse. But the training set is getting better, and still has a long way to go in accuracy - so we should try annealing our learning rate and running more epochs, before we make a decisions.
Step21: Lucky we tried that - we starting to make progress! Let's keep going.
|
14,047
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Install imageio for creating animations.
!pip -q install imageio
!pip -q install scikit-image
!pip install git+https://github.com/tensorflow/docs
#@title Imports and function definitions
from absl import logging
import imageio
import PIL.Image
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
tf.random.set_seed(0)
import tensorflow_hub as hub
from tensorflow_docs.vis import embed
import time
try:
from google.colab import files
except ImportError:
pass
from IPython import display
from skimage import transform
# We could retrieve this value from module.get_input_shapes() if we didn't know
# beforehand which module we will be using.
latent_dim = 512
# Interpolates between two vectors that are non-zero and don't both lie on a
# line going through origin. First normalizes v2 to have the same norm as v1.
# Then interpolates between the two vectors on the hypersphere.
def interpolate_hypersphere(v1, v2, num_steps):
v1_norm = tf.norm(v1)
v2_norm = tf.norm(v2)
v2_normalized = v2 * (v1_norm / v2_norm)
vectors = []
for step in range(num_steps):
interpolated = v1 + (v2_normalized - v1) * step / (num_steps - 1)
interpolated_norm = tf.norm(interpolated)
interpolated_normalized = interpolated * (v1_norm / interpolated_norm)
vectors.append(interpolated_normalized)
return tf.stack(vectors)
# Simple way to display an image.
def display_image(image):
image = tf.constant(image)
image = tf.image.convert_image_dtype(image, tf.uint8)
return PIL.Image.fromarray(image.numpy())
# Given a set of images, show an animation.
def animate(images):
images = np.array(images)
converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
imageio.mimsave('./animation.gif', converted_images)
return embed.embed_file('./animation.gif')
logging.set_verbosity(logging.ERROR)
progan = hub.load("https://tfhub.dev/google/progan-128/1").signatures['default']
def interpolate_between_vectors():
v1 = tf.random.normal([latent_dim])
v2 = tf.random.normal([latent_dim])
# Creates a tensor with 25 steps of interpolation between v1 and v2.
vectors = interpolate_hypersphere(v1, v2, 50)
# Uses module to generate images from the latent space.
interpolated_images = progan(vectors)['default']
return interpolated_images
interpolated_images = interpolate_between_vectors()
animate(interpolated_images)
image_from_module_space = True # @param { isTemplate:true, type:"boolean" }
def get_module_space_image():
vector = tf.random.normal([1, latent_dim])
images = progan(vector)['default'][0]
return images
def upload_image():
uploaded = files.upload()
image = imageio.imread(uploaded[list(uploaded.keys())[0]])
return transform.resize(image, [128, 128])
if image_from_module_space:
target_image = get_module_space_image()
else:
target_image = upload_image()
display_image(target_image)
tf.random.set_seed(42)
initial_vector = tf.random.normal([1, latent_dim])
display_image(progan(initial_vector)['default'][0])
def find_closest_latent_vector(initial_vector, num_optimization_steps,
steps_per_image):
images = []
losses = []
vector = tf.Variable(initial_vector)
optimizer = tf.optimizers.Adam(learning_rate=0.01)
loss_fn = tf.losses.MeanAbsoluteError(reduction="sum")
for step in range(num_optimization_steps):
if (step % 100)==0:
print()
print('.', end='')
with tf.GradientTape() as tape:
image = progan(vector.read_value())['default'][0]
if (step % steps_per_image) == 0:
images.append(image.numpy())
target_image_difference = loss_fn(image, target_image[:,:,:3])
# The latent vectors were sampled from a normal distribution. We can get
# more realistic images if we regularize the length of the latent vector to
# the average length of vector from this distribution.
regularizer = tf.abs(tf.norm(vector) - np.sqrt(latent_dim))
loss = target_image_difference + regularizer
losses.append(loss.numpy())
grads = tape.gradient(loss, [vector])
optimizer.apply_gradients(zip(grads, [vector]))
return images, losses
num_optimization_steps=200
steps_per_image=5
images, loss = find_closest_latent_vector(initial_vector, num_optimization_steps, steps_per_image)
plt.plot(loss)
plt.ylim([0,max(plt.ylim())])
animate(np.stack(images))
display_image(np.concatenate([images[-1], target_image], axis=1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CelebA Progressive GAN 모델로 인공 얼굴 생성하기
Step2: 잠재 공간 보간
Step3: 잠재 공간에서 가장 가까운 벡터 찾기
Step4: 대상 이미지와 잠재 공간 변수에 의해 생성된 이미지 사이의 손실 함수를 정의한 후, 경사 하강을 사용하여 손실을 최소화하는 변수 값을 찾을 수 있습니다.
Step5: 결과를 대상과 비교합니다.
|
14,048
|
<ASSISTANT_TASK:>
Python Code:
theta0 = 0.6
a0, b0 = 1, 1
print("step 0: mode = unknown")
xx = np.linspace(0, 1, 1000)
plt.plot(xx, sp.stats.beta(a0, b0).pdf(xx), label="initial");
np.random.seed(0)
x = sp.stats.bernoulli(theta0).rvs(50)
N0, N1 = np.bincount(x, minlength=2)
a1, b1 = a0 + N1, b0 + N0
plt.plot(xx, sp.stats.beta(a1, b1).pdf(xx), label="1st");
print("step 1: mode =", (a1 - 1)/(a1 + b1 - 2))
x = sp.stats.bernoulli(theta0).rvs(50)
N0, N1 = np.bincount(x, minlength=2)
a2, b2 = a1 + N1, b1 + N0
plt.plot(xx, sp.stats.beta(a2, b2).pdf(xx), label="2nd");
print("step 2: mode =", (a2 - 1)/(a2 + b2 - 2))
x = sp.stats.bernoulli(theta0).rvs(50)
N0, N1 = np.bincount(x, minlength=2)
a3, b3 = a2 + N1, b2 + N0
plt.plot(xx, sp.stats.beta(a3, b3).pdf(xx), label="3rd");
print("step 3: mode =", (a3 - 1)/(a3 + b3 - 2))
x = sp.stats.bernoulli(theta0).rvs(50)
N0, N1 = np.bincount(x, minlength=2)
a4, b4 = a3 + N1, b3 + N0
plt.plot(xx, sp.stats.beta(a4, b4).pdf(xx), label="4th");
print("step 4: mode =", (a4 - 1)/(a4 + b4 - 2))
plt.legend()
plt.show()
def plot_dirichlet(alpha):
def project(x):
n1 = np.array([1, 0, 0])
n2 = np.array([0, 1, 0])
n3 = np.array([0, 0, 1])
n12 = (n1 + n2)/2
m1 = np.array([1, -1, 0])
m2 = n3 - n12
m1 = m1/np.linalg.norm(m1)
m2 = m2/np.linalg.norm(m2)
return np.dstack([(x-n12).dot(m1), (x-n12).dot(m2)])[0]
def project_reverse(x):
n1 = np.array([1, 0, 0])
n2 = np.array([0, 1, 0])
n3 = np.array([0, 0, 1])
n12 = (n1 + n2)/2
m1 = np.array([1, -1, 0])
m2 = n3 - n12
m1 = m1/np.linalg.norm(m1)
m2 = m2/np.linalg.norm(m2)
return x[:,0][:, np.newaxis] * m1 + x[:,1][:, np.newaxis] * m2 + n12
eps = np.finfo(float).eps * 10
X = project([[1-eps,0,0], [0,1-eps,0], [0,0,1-eps]])
import matplotlib.tri as mtri
triang = mtri.Triangulation(X[:,0], X[:,1], [[0, 1, 2]])
refiner = mtri.UniformTriRefiner(triang)
triang2 = refiner.refine_triangulation(subdiv=6)
XYZ = project_reverse(np.dstack([triang2.x, triang2.y, 1-triang2.x-triang2.y])[0])
pdf = sp.stats.dirichlet(alpha).pdf(XYZ.T)
plt.tricontourf(triang2, pdf)
plt.axis("equal")
plt.show()
theta0 = np.array([0.2, 0.6, 0.2])
np.random.seed(0)
x1 = np.random.choice(3, 20, p=theta0)
N1 = np.bincount(x1, minlength=3)
x2 = np.random.choice(3, 100, p=theta0)
N2 = np.bincount(x2, minlength=3)
x3 = np.random.choice(3, 1000, p=theta0)
N3 = np.bincount(x3, minlength=3)
a0 = np.ones(3) / 3
plot_dirichlet(a0)
a1 = a0 + N1
plot_dirichlet(a1)
print((a1 - 1)/(a1.sum() - 3))
a2 = a1 + N2
plot_dirichlet(a2)
print((a2 - 1)/(a2.sum() - 3))
a3 = a2 + N3
plot_dirichlet(a3)
print((a3 - 1)/(a3.sum() - 3))
mu, sigma2 = 2, 4
mu0, sigma20 = 0, 1
xx = np.linspace(1, 3, 1000)
np.random.seed(0)
N = 10
x = sp.stats.norm(mu).rvs(N)
mu0 = sigma2/(N*sigma20 + sigma2) * mu0 + (N*sigma20)/(N*sigma20 + sigma2)*x.mean()
sigma20 = 1/(1/sigma20 + N/sigma2)
plt.plot(xx, sp.stats.norm(mu0, sigma20).pdf(xx), label="1st");
print(mu0)
N = 20
x = sp.stats.norm(mu).rvs(N)
mu0 = sigma2/(N*sigma20 + sigma2) * mu0 + (N*sigma20)/(N*sigma20 + sigma2)*x.mean()
sigma20 = 1/(1/sigma20 + N/sigma2)
plt.plot(xx, sp.stats.norm(mu0, sigma20).pdf(xx), label="2nd");
print(mu0)
N = 50
x = sp.stats.norm(mu).rvs(N)
mu0 = sigma2/(N*sigma20 + sigma2) * mu0 + (N*sigma20)/(N*sigma20 + sigma2)*x.mean()
sigma20 = 1/(1/sigma20 + N/sigma2)
plt.plot(xx, sp.stats.norm(mu0, sigma20).pdf(xx), label="3rd");
print(mu0)
N = 100
x = sp.stats.norm(mu).rvs(N)
mu0 = sigma2/(N*sigma20 + sigma2) * mu0 + (N*sigma20)/(N*sigma20 + sigma2)*x.mean()
sigma20 = 1/(1/sigma20 + N/sigma2)
plt.plot(xx, sp.stats.norm(mu0, sigma20).pdf(xx), label="4th");
print(mu0)
plt.axis([1, 3, 0, 20])
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 카테고리 분포의 모수 추정
Step2: 정규 분포의 기댓값 모수 추정
|
14,049
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import polo
import demo
data = demo.generate_data()
print data.head()
facets = {'subplot' : 'nx',
'line' : 'solver_type',
'slider' : 'time'}
# Select only rows for WENO5 and TVD runs
tvd_weno5 = data[(data['weno_order']==5) | np.isnan(data['weno_order'])]
fig = plt.figure(figsize=(8,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : None,
'line' : 'solver_type',
'slider' : ['time','nx']}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets)
facets = {'subplot' : None,
'line' : None,
'slider' : ['time','nx','solver_type']}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : 'nx',
'line' : ('solver_type',('classic',)),
'slider' : 'time'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(data,fig,facets);
facets = {'subplot' : 'nx',
'line' : 'time',
'slider' : 'solver_type'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : 'solver_type',
'line' : 'time',
'slider' : 'nx'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : 'time',
'line' : 'solver_type',
'slider' : 'nx'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : 'time',
'line' : 'nx',
'slider' : 'solver_type'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
facets = {'subplot' : 'nx',
'line' : 'time',
'slider' : 'solver_type'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
t05 = tvd_weno5[tvd_weno5['time']==0.5]
t05.head()
facets = {'subplot' : 'nx',
'line' : 'solver_type'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(t05,fig,facets);
facets = {'line' : 'solver_type',
'slider' : 'nx'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(t05,fig,facets);
facets = {'subplot' : 'solver_type',
'slider' : 'nx'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(t05,fig,facets);
facets = {'subplot' : 'solver_type',
'line' : ('time',(0.5,)),
'slider' : 'nx'}
fig = plt.figure(figsize=(10,6))
polo.comparison_plot(tvd_weno5,fig,facets);
def q_minus_one(row):
rSubtract 1 from q.
sol = row.data[row.index[0]]
xc = sol.state.grid.p_centers[0] + 0
q = sol.state.q[0,:] - 1
return xc, q
facets = {'subplot' : 'nx',
'line' : 'solver_type',
'slider' : 'time'}
fig = plt.figure(figsize=(8,6))
polo.comparison_plot(tvd_weno5,fig,facets,data_fun = q_minus_one,ylim=(-1.1,0.1));
def total_mass(row):
rGet data for 1D PyClaw results.
sol = row.data[row.index[0]]
time = row['time']
xc = sol.state.grid.p_centers[0] + 0
q = sol.state.q[0,:]
dx = sol.state.grid.delta[0]
mass = np.sum(q)*dx
return time, mass
facets = {'subplot' : 'nx',
'line' : 'solver_type',
'slider' : 'time'}
fig = plt.figure(figsize=(8,6))
polo.comparison_plot(tvd_weno5,fig,facets,data_fun = total_mass);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's generate some demonstration data from a set of PyClaw runs
Step2: Here data is a Pandas DataFrame; each row corresponds to one output time from a PyClaw simulation. The simulations have been run with a variety of algorithms and grid sizes.
Step3: Play with the slider above to see the difference between the two method solutions at various times.
Step4: Restrict to a subset of the data
Step5: Mixing it up
Step6: Comparing over fewer dimensions
Step7: The same can be achieved via
Step9: Manipulating the data
Step11: Plotting scalar functionals
|
14,050
|
<ASSISTANT_TASK:>
Python Code:
# Here we make the list of the free models
# the fixed component is handled seperately
modelList = [ draco, galdif ]
par_index = 0 # This is the index of the source we care about (i.e., Draco)
help(lfu.NLL_func)
ftomin = lfu.NLL_func(n_obs,fixed,modelList)
init_pars = np.ones((2))
print "NLL = %.1f"%ftomin(init_pars)
help(lfu.Minimize)
# Ok, let's fit the function and parse out the results
result = lfu.Minimize(ftomin,init_pars)
mle_pars = result[0]
mle = mle_pars[par_index]
nll_min = result[1]
nll_null = ftomin([0.,mle_pars[1]])
TS = 2*(nll_null-nll_min)
pvalue = 0.5 # fixme
print "Maximum likelihood estimate = %.1e cm-2 s-1 MeV-1"%(mle*1e-12)
print "Minimum function value = %.2f"%nll_min
print "Test Statistic = %.2f"%TS
print "p-value = %.2f"%pvalue
# Set up a likelihood scan
par_bounds = (1e-2,2.0)
nsteps = 25
# Make a set of input parameter vectors that will serve to do the scan
par_sets = lfu.MakeParSets_1DScan(mle_pars,par_index,par_bounds[0],par_bounds[1],nsteps,log=True)
print par_sets
# This tells us to fix the Draco normalization during the Profile likelihood scan
fix_par_mask = np.zeros((2),'?')
fix_par_mask[par_index] = True
pf1 = lfu.ParameterScan(ftomin,par_sets)
print pf1
# Let's plot the likelihood, actually what we are going to plot is the
# delta log-likelihood (w.r.t. the maximum)
fig1,ax1 = lfu.PlotNLLScan(par_sets[0:,par_index],nll_min-pf1)
interp1 = lfu.BuildInterpolator(par_sets,par_index,pf1)
help(lfu.SolveForErrorLevel)
lim1 = lfu.SolveForErrorLevel(interp1,nll_min,1.35,mle,par_bounds)
print "Simple upper limit %.2e cm-2 s-1 MeV-1"%(lim1*1e-12)
fig2,ax2 = lfu.PlotNLLScan(par_sets[0:,par_index],nll_min-pf1)
ax2.hlines(-1.35,0,2.0,linestyles=u'dotted')
ax2.vlines(lim1,-10,1,linestyles=u'dashed')
leg = ax2.legend()
help(lfu.ProfileScan)
pf2 = lfu.ProfileScan(n_obs, par_sets, fix_par_mask, fixed, modelList)
interp2 = lfu.BuildInterpolator(par_sets,par_index,pf2)
lim2 = lfu.SolveForErrorLevel(interp2,nll_min,1.35,mle,par_bounds)
print "Delta Log-Likelihood at 1.0e-12 cm-2 s-1 MeV-1 is %.1f"%(interp2(1.0)-nll_min)
print "Profile upper limit %.2e cm-2 s-1 MeV-1"%(lim2*1e-12)
# Ok, let's plot both sets of limits
fig3,ax3 = lfu.PlotNLLScan(par_sets[0:,par_index],nll_min-pf1)
ax3.plot(par_sets[0:,par_index],nll_min-pf2,'b-',label="Profile")
ax3.hlines(-1.35,0,2.0,linestyles=u'dotted')
ax3.vlines(lim1,-10,1,linestyles=u'dashed')
leg = ax3.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ok, now we are going to construct a function that will return the negative log-likelihood for a particular set of normailzation of the draco and Galactic Diffuse flux.
Step2: Ok, now we are going to want to minize the function. I added a function to LikeFitUtils.py to do this using scipy.optimize.fmin, the function is called Minimize
Step3: Ok, so here we have seen that the best-fit value is $1.0^{-13}$cm$^{-2}$s$^{-1}$MeV$^{-1}$, but that the test statistic is only 0.47. This tells us that we have not significantly detected emission from Draco with a powerlaw index of -2.
Step4: Now, we are going to scan likelihood two different ways.
Step5: In our case it doesn't really matter because the fit is simple, but it can be useful to interpolate the likelihood between the scan points rather that recompute it.
Step6: Now we are going to solve for the 95% confidence level upper limits. In the likelihood ratio test where the test hypothesis has one extra degree of freedom, and we are at the boundary of that additional degree of freedom this occurs at the point that the log-likelihood is 1.35 less than maximum.
Step7: Ok, now let's make a plot of this.
Step8: The dashed vertical line indicates the upper limit we have computed
|
14,051
|
<ASSISTANT_TASK:>
Python Code:
print(__doc__)
import numpy as np
np.random.seed(237)
import matplotlib.pyplot as plt
noise_level = 0.1
def f(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2))\
+ np.random.randn() * noise_level
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
plt.legend()
plt.grid()
plt.show()
from skopt import gp_minimize
res = gp_minimize(f, # the function to minimize
[(-2.0, 2.0)], # the bounds on each dimension of x
acq_func="EI", # the acquisition function
n_calls=15, # the number of evaluations of f
n_random_starts=5, # the number of random initialization points
noise=0.1**2, # the noise level (optional)
random_state=1234) # the random seed
"x^*=%.4f, f(x^*)=%.4f" % (res.x[0], res.fun)
print(res)
from skopt.plots import plot_convergence
plot_convergence(res);
from skopt.acquisition import gaussian_ei
plt.rcParams["figure.figsize"] = (8, 14)
x = np.linspace(-2, 2, 400).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = np.array([f(x_i, noise_level=0.0) for x_i in x])
for n_iter in range(5):
gp = res.models[n_iter]
curr_x_iters = res.x_iters[:5+n_iter]
curr_func_vals = res.func_vals[:5+n_iter]
# Plot true function.
plt.subplot(5, 2, 2*n_iter+1)
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([fx - 1.9600 * noise_level,
fx[::-1] + 1.9600 * noise_level]),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(curr_x_iters, curr_func_vals,
"r.", markersize=8, label="Observations")
# Adjust plot layout
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
# Plot EI(x)
plt.subplot(5, 2, 2*n_iter+2)
acq = gaussian_ei(x_gp, gp, y_opt=np.min(curr_func_vals))
plt.plot(x, acq, "b", label="EI(x)")
plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')
next_x = res.x_iters[5+n_iter]
next_acq = gaussian_ei(res.space.transform([next_x]), gp,
y_opt=np.min(curr_func_vals))
plt.plot(next_x, next_acq, "bo", markersize=6, label="Next query point")
# Adjust plot layout
plt.ylim(0, 0.1)
plt.grid()
if n_iter == 0:
plt.legend(loc="best", prop={'size': 6}, numpoints=1)
if n_iter != 4:
plt.tick_params(axis='x', which='both', bottom='off',
top='off', labelbottom='off')
plt.show()
plt.rcParams["figure.figsize"] = (6, 4)
# Plot f(x) + contours
x = np.linspace(-2, 2, 400).reshape(-1, 1)
x_gp = res.space.transform(x.tolist())
fx = [f(x_i, noise_level=0.0) for x_i in x]
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate(([fx_i - 1.9600 * noise_level for fx_i in fx],
[fx_i + 1.9600 * noise_level for fx_i in fx[::-1]])),
alpha=.2, fc="r", ec="None")
# Plot GP(x) + contours
gp = res.models[-1]
y_pred, sigma = gp.predict(x_gp, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu_{GP}(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(res.x_iters,
res.func_vals,
"r.", markersize=15, label="Observations")
plt.title(r"$x^* = %.4f, f(x^*) = %.4f$" % (res.x[0], res.fun))
plt.legend(loc="best", prop={'size': 8}, numpoints=1)
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Toy example
Step2: Note. In skopt, functions $f$ are assumed to take as input a 1D
Step3: Bayesian optimization based on gaussian process regression is implemented in
Step4: Accordingly, the approximated minimum is found to be
Step5: For further inspection of the results, attributes of the res named tuple
Step6: Together these attributes can be used to visually inspect the results of the
Step7: Let us now visually examine
Step8: Plot the 5 iterations following the 5 random points
Step9: The first column shows the following
|
14,052
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import fetch_olivetti_faces
dataset = fetch_olivetti_faces()
X = dataset.data
y = dataset.target
import numpy as np
np.random.seed(21)
idx_rand = np.random.randint(len(X), size=8)
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(14, 8))
for p, i in enumerate(idx_rand):
plt.subplot(2, 4, p + 1)
plt.imshow(X[i, :].reshape((64, 64)), cmap='gray')
plt.axis('off')
n_samples, n_features = X.shape
X -= X.mean(axis=0)
X -= X.mean(axis=1).reshape(n_samples, -1)
plt.figure(figsize=(14, 8))
for p, i in enumerate(idx_rand):
plt.subplot(2, 4, p + 1)
plt.imshow(X[i, :].reshape((64, 64)), cmap='gray')
plt.axis('off')
plt.savefig('olivetti-pre.png')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=21
)
import cv2
rtree = cv2.ml.RTrees_create()
num_trees = 50
eps = 0.01
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
num_trees, eps)
rtree.setTermCriteria(criteria)
rtree.setMaxCategories(len(np.unique(y)))
rtree.setMinSampleCount(2)
rtree.setMaxDepth(1000)
rtree.train(X_train, cv2.ml.ROW_SAMPLE, y_train);
rtree.getMaxDepth()
_, y_hat = rtree.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_hat)
from sklearn.tree import DecisionTreeClassifier
tree = DecisionTreeClassifier(random_state=21, max_depth=25)
tree.fit(X_train, y_train)
tree.score(X_test, y_test)
num_trees = 100
eps = 0.01
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS,
num_trees, eps)
rtree.setTermCriteria(criteria)
rtree.train(X_train, cv2.ml.ROW_SAMPLE, y_train);
_, y_hat = rtree.predict(X_test)
accuracy_score(y_test, y_hat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Although the original images consisted of 92 x 112 pixel images, the version available
Step2: We can plot these example images using Matplotlib, but we need to make sure we reshape
Step3: You can see how all the faces are taken against a dark background and are upright. The
Step4: We repeat this procedure for every image to make sure the feature values of every data
Step5: The preprocessed data can be visualized using the preceding code
Step6: Training and testing the random forest
Step7: Then we are ready to apply a random forest to the data
Step8: Here we want to create an ensemble with 50 decision trees
Step9: Because we have a large number of categories (that is, 40), we want to make sure the
Step10: We can play with other optional arguments, such as the number of data points required in a
Step11: However, we might not want to limit the depth of each tree. This is again, a parameter we
Step12: Then we can fit the classifier to the training data
Step13: We can check the resulting depth of the tree using the following function
Step14: This means that although we allowed the tree to go up to depth 1000, in the end only 25
Step15: We find 87% accuracy, which turns out to be much better than with a single decision tree
Step16: Not bad! We can play with the optional parameters to see if we get better. The most
|
14,053
|
<ASSISTANT_TASK:>
Python Code:
print("Happy Birthday, dear Carol!")
def happy_birthday_carol():
print("Happy Birthday, dear Carol!")
happy_birthday_carol()
happy_birthday_carol
def happy_birthday_rise():
print("Happy Birthday, dear Rise!")
happy_birthday_rise()
def happy_birthday(name):
print("Happy Birthday, dear " + name + "!")
happy_birthday("Trey")
names = ["Carol", "Rise", "Trey", "Alain"]
for name in names:
happy_birthday(name)
def happy_birthday(name):
print("Happy Birthday, dear {0}!".format(name))
happy_birthday(100)
happy_birthday(3.1415927)
happy_birthday("Ƭ̵̬̊")
def happy_birthday(name):
print("Happy Birthday, dear " + str(name) + "!")
str(u'\xff')
def print_factors(age):
for i in range(1, age + 1):
if age % i == 0:
print(i)
print_factors(32)
print_factors(33)
def cupcake_tally(guests):
Given number of party guests, returns how many cupcakes are needed.
return 2 * guests + 13
cupcake_tally(30)
cupcake_tally(guests=1)
cupcakes = cupcake_tally(10)
print("We need to make {0} cupcakes.".format(cupcakes))
def cupcake_tally(cupcakes, guests):
return cupcakes * guests + 13
cupcake_tally(4, 15)
print("On the day of Sept 20, 2014, were you at Intro to Python?")
answer = input("Answer truthfully, yes or no --> ")
if answer == "no":
answer = input("Are you sure that you weren't? Tell the truth, now --> ")
print("Were you thinking of skipping the workshop to go to Sea World?")
answer = input("Answer truthfully, yes or no --> ")
if answer == "no":
answer = input("Are you sure that you weren't? Tell the truth, now --> ")
import random
random.randint(1, 6)
vegas_die_1 = random.randint(1, 6)
vegas_die_2 = random.randint(1, 6)
print("First die: " + str(vegas_die_1))
print("Second die: " + str(vegas_die_2))
print("You rolled a " + str(vegas_die_1 + vegas_die_2))
random.choice('abcdefghijklmnopqrstuvwxyz')
def random_happy_birthday(names):
name = random.choice(names)
happy_birthday(name)
random_happy_birthday(["Alain", "Carol", "Rise", "Trey"])
from IPython.display import YouTubeVideo
# a tutorial about Python at PyCon 2014 in Montreal, Canada by Jessica McKellar
YouTubeVideo('MirG-vJOg04')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can make this code into a function like so. Let's take a quick look at what we have here.
Step2: Examine the function in detail
Step3: Don't forget those parentheses.
Step4: Here, the Python interpreter is basically saying, "The value of happy_birthday_carol is a function."
Step5: Now, let's call the function
Step6: Refactor time!
Step7: Here, we are concatenating 3 strings
Step8: Calling it over and over in a loop
Step9: Exercise
Step10: Now, the function can print happy birthday greetings for any value of name, even for people with bizarre names like
Step11: It's better to use format() just in case name isn't guaranteed to be a string.
Step12: Answer
Step13: Now you have some advanced knowledge about how Python 3 handles Unicode better than Python 2!
Step14: This year, I turned 32.
Step15: From the output of print_factors(), I now know that I'm
Step17: Example
Step18: How many cupcakes do we need for a party with 30 guests?
Step19: What about if I'm celebrating my birthday at home with just my husband, Daniel?
Step20: Ooh, 15 cupcakes sounds about right for our little party, yeah? ;)
Step21: We call our function, cupcake_tally(), and pass in 10 as the value of guests.
Step22: Note that we have 2 parameters now
Step23: Look up when you're done, so that we know to move on to the next step.
Step24: The random module has a randint function which takes two parameters. randint will return a random integer in between (or including) the two parameters. For example we can generate a random number between 1 and 6.
Step25: Let's use the random integer function to simulate rolling a die in a game
Step26: choice is a function in the random module that returns a random item from a given list or string
Step27: We could use random.choice to wish happy birthday to a random friend.
Step28: Now let's try it out with a list of names
Step29: Modules and functions can make things really simple to do. For example we could in a few lines of code get and display a YouTube video
|
14,054
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import seaborn as sns
from pylab import rcParams
from sklearn.model_selection import train_test_split
from keras.models import Model, load_model
from keras.layers import Input, Dense
from keras.callbacks import ModelCheckpoint, TensorBoard
from keras import regularizers
%matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
LABELS = ["Normal", "Fraud"]
df = pd.read_csv("creditcard.csv")
df.shape
df.isnull().values.any()
count_classes = pd.value_counts(df['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction class distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency");
frauds = df[df.Class == 1]
normal = df[df.Class == 0]
frauds.shape
plt.hist(normal.Amount, bins = 100)
plt.xlim([0,20000])
plt.ylim([0,10000])
plt.tight_layout()
f, axes = plt.subplots(nrows = 2, ncols = 1, sharex = True)
axes[0].hist(normal.Amount, bins = 100)
axes[0].set_xlim([0,20000])
axes[0].set_ylim([0,10000])
axes[0].set_title('Normal')
axes[1].hist(frauds.Amount, bins = 50)
axes[1].set_xlim([0,10000])
axes[1].set_ylim([0,200])
axes[1].set_title('Frauds')
from sklearn.preprocessing import StandardScaler
data = df.drop(['Time'], axis=1)
data['Amount'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1))
X_train, X_test = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED)
X_train = X_train[X_train.Class == 0]
X_train = X_train.drop(['Class'], axis=1)
y_test = X_test['Class']
X_test = X_test.drop(['Class'], axis=1)
X_train = X_train.values
X_test = X_test.values
X_train.shape
input_dim = X_train.shape[1]
encoding_dim = 32
input_layer = Input(shape=(input_dim, ))
encoder = Dense(encoding_dim, activation="relu",
activity_regularizer=regularizers.l1(10e-5))(input_layer)
encoder = Dense(int(encoding_dim / 2), activation="sigmoid")(encoder)
decoder = Dense(int(encoding_dim / 2), activation='sigmoid')(encoder)
decoder = Dense(input_dim, activation='relu')(decoder)
autoencoder = Model(inputs=input_layer, outputs=decoder)
import h5py as h5py
nb_epoch = 100
batch_size = 32
autoencoder.compile(optimizer='adam',
loss='mean_squared_error',
metrics=['accuracy'])
checkpointer = ModelCheckpoint(filepath="model.h5",
verbose=0,
save_best_only=True)
tensorboard = TensorBoard(log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=True)
history = autoencoder.fit(X_train, X_train,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_data=(X_test, X_test),
verbose=1, callbacks=[checkpointer, tensorboard]).history
autoencoder = load_model('model.h5')
plt.plot(history['loss'])
plt.plot(history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right');
predictions = autoencoder.predict(X_test)
mse = np.mean(np.power(X_test - predictions, 2), axis=1)
error_df = pd.DataFrame({'reconstruction_error': mse,
'true_class': y_test})
error_df.describe()
fig = plt.figure()
ax = fig.add_subplot(111)
normal_error_df = error_df[(error_df['true_class']== 0) & (error_df['reconstruction_error'] < 10)]
_ = ax.hist(normal_error_df.reconstruction_error.values, bins=10)
fig = plt.figure()
ax = fig.add_subplot(111)
fraud_error_df = error_df[error_df['true_class'] == 1]
_ = ax.hist(fraud_error_df.reconstruction_error.values, bins=10)
threshold = 2.9
groups = error_df.groupby('true_class')
fig, ax = plt.subplots()
for name, group in groups:
ax.plot(group.index, group.reconstruction_error, marker='o', ms=3.5, linestyle='',
label= "Fraud" if name == 1 else "Normal")
ax.hlines(threshold, ax.get_xlim()[0], ax.get_xlim()[1], colors="r", zorder=100, label='Threshold')
ax.legend()
plt.title("Reconstruction error for different classes")
plt.ylabel("Reconstruction error")
plt.xlabel("Data point index")
plt.show();
from sklearn.metrics import (confusion_matrix)
y_pred = [1 if e > threshold else 0 for e in error_df.reconstruction_error.values]
conf_matrix = confusion_matrix(error_df.true_class, y_pred)
plt.figure(figsize=(12, 12))
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the data
Step2: Exploration
Step3: 31 columns, 2 of which are Time and Amount. The rest are output from the PCA transformation. Let's check for missing values
Step4: We have a highly imbalanced dataset on our hands. Normal transactions overwhelm the fraudulent ones by a large margin. Let's look at the two types of transactions
Step5: Let's have a more graphical representation
Step6: Autoencoders
Step7: Training our Autoencoder is gonna be a bit different from what we are used to. Let's say you have a dataset containing a lot of non fraudulent transactions at hand. You want to detect any anomaly on new transactions. We will create this situation by training our model on the normal transactions, only. Reserving the correct class on the test set will give us a way to evaluate the performance of our model. We will reserve 20% of our data for testing
Step8: Building the model
Step9: Let's train our model for 200 epochs with a batch size of 32 samples and save the best performing model to a file. The ModelCheckpoint provided by Keras is really handy for such tasks. Additionally, the training progress will be exported in a format that TensorBoard understands.
Step10: Evaluation
Step11: The reconstruction error on our training and test data seems to converge nicely. Is it low enough? Let's have a closer look at the error distribution
Step12: Reconstruction error without fraud
Step13: Reconstruction error with fraud
Step14: Prediction
Step15: And see how well we're dividing the two types of transactions
Step16: That chart might be a bit deceiving. Let's have a look at the confusion matrix
|
14,055
|
<ASSISTANT_TASK:>
Python Code:
# import pandas, but call it pd. Why? Because that's What People Do.
import pandas as pd
# We're going to call this df, which means "data frame"
# It isn't in UTF-8 (I saved it from my mac!) so we need to set the encoding
df = pd.read_csv("NBA-Census-10.14.2013.csv", encoding='mac_roman')
# Let's look at all of it
df
# Look at the first few rows
df.head()
# Let's look at MORE of the first few rows
df.head(10)
# Let's look at the final few rows
df.tail(4)
# Show the 6th through the 8th rows
df[5:8]
# Get the names of the columns, just because
df.columns
# If we want to be "correct" we add .values on the end of it
df.columns.values
# Select only name and age
columns_to_show = ['Name', 'Age']
df[columns_to_show]
# Combing that with .head() to see not-so-many rows
columns_to_show = ['Name', 'Age']
df[columns_to_show].head()
# We can also do this all in one line, even though it starts looking ugly
# (unlike the cute bears pandas looks ugly pretty often)
df[['Name', 'Age']].head()
df.head()
# Grab the POS column, and count the different values in it.
df['POS'].value_counts()
# Summary statistics for Age
df['Age'].describe()
# That's pretty good. Does it work for everything? How about the money?
df['2013 $'].describe()
# Doing more describing
df['Ht (In.)'].describe()
# Take another look at our inches, but only the first few
df['Ht (In.)'].head()
# Divide those inches by 12
df['Ht (In.)'].head() / 12
# Let's divide ALL of them by 12
feet = df['Ht (In.)'] / 12
feet
# Can we get statistics on those?
feet.describe()
# Let's look at our original data again
df.head(2)
# Store a new column
df['feet'] = df['Ht (In.)'] / 12
df.head()
# Can't just use .replace
df['2013 $'].head().replace("$","")
# Need to use this weird .str thing
df['2013 $'].head().str.replace("$","")
# Can't just immediately replace the , either
df['2013 $'].head().str.replace("$","").replace(",","")
# Need to use the .str thing before EVERY string method
df['2013 $'].head().str.replace("$","").str.replace(",","")
# Describe still doesn't work.
df['2013 $'].head().str.replace("$","").str.replace(",","").describe()
# Let's convert it to an integer using .astype(int) before we describe it
df['2013 $'].head().str.replace("$","").str.replace(",","").astype(int).describe()
df['2013 $'].head().str.replace("$","").str.replace(",","").astype(int)
# Maybe we can just make them millions?
df['2013 $'].head().str.replace("$","").str.replace(",","").astype(int) / 1000000
# Unfortunately one is "n/a" which is going to break our code, so we can make n/a be 0
df['2013 $'].str.replace("$","").str.replace(",","").str.replace("n/a", "0").astype(int) / 1000000
# Remove the .head() piece and save it back into the dataframe
df['millions'] = df['2013 $'].str.replace("$","").str.replace(",","").str.replace("n/a","0").astype(int) / 1000000
df.head()
df.describe()
# This is just the first few guys in the dataset. Can we order it?
df.head(3)
# Let's try to sort them
df.sort_values(by='millions').head(3)
# It isn't descending = True, unfortunately
df.sort_values(by='millions', ascending=False).head(3)
# We can use this to find the oldest guys in the league
df.sort_values(by='Age', ascending=False).head(3)
# Or the youngest, by taking out 'ascending=False'
df.sort_values(by='Age').head(3)
# Get a big long list of True and False for every single row.
df['feet'] > 7
# We could use value counts if we wanted
above_seven_feet = df['feet'] > 7
above_seven_feet.value_counts()
# But we can also apply this to every single row to say whether YES we want it or NO we don't
df['feet'].head() > 7
# Instead of putting column names inside of the brackets, we instead
# put the True/False statements. It will only return the players above
# seven feet tall
df[df['feet'] > 7]
# Or only the guards
df[df['POS'] == 'G']
# Or only the guards who make more than 15 million
df[(df['POS'] == 'G') & (df['millions'] > 15)]
# It might be easier to break down the booleans into separate variables
is_guard = df['POS'] == 'G'
more_than_fifteen_million = df['millions'] > 15
df[is_guard & more_than_fifteen_million]
# We can save this stuff
short_players = df[df['feet'] < 6.5]
short_players
short_players.describe()
# Maybe we can compare them to taller players?
df[df['feet'] >= 6.5].describe()
df['Age'].head()
# This will scream we don't have matplotlib.
df['Age'].hist()
!pip install matplotlib
# this will open up a weird window that won't do anything
df['Age'].hist()
# So instead you run this code
%matplotlib inline
df['Age'].hist()
plt.savefig('networth.svg')
import matplotlib.pyplot as plt
plt.style.available
plt.style.use('ggplot')
df['Age'].hist()
plt.style.use('seaborn-deep')
df['Age'].hist()
plt.style.use('fivethirtyeight')
df['Age'].hist()
# Pass in all sorts of stuff!
# Most from http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html
# .range() is a matplotlib thing
df['Age'].hist(bins=20, xlabelsize=10, ylabelsize=10, range=(0,40))
df.plot(kind='scatter', x='feet', y='millions')
df.head()
# How does experience relate with the amount of money they're making?
df.plot(kind='scatter', x='EXP', y='millions')
# At least we can assume height and weight are related
df.plot(kind='scatter', x='WT', y='feet')
# At least we can assume height and weight are related
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
df.plot(kind='scatter', x='WT', y='feet', xlim=(100,300), ylim=(5.5, 8))
plt.style.use('ggplot')
df.plot(kind='scatter', x='WT', y='feet', xlim=(100,300), ylim=(5.5, 8))
# We can also use plt separately
# It's SIMILAR but TOTALLY DIFFERENT
centers = df[df['POS'] == 'C']
guards = df[df['POS'] == 'G']
forwards = df[df['POS'] == 'F']
plt.scatter(y=centers["feet"], x=centers["WT"], c='c', alpha=0.75, marker='x')
plt.scatter(y=guards["feet"], x=guards["WT"], c='y', alpha=0.75, marker='o')
plt.scatter(y=forwards["feet"], x=forwards["WT"], c='m', alpha=0.75, marker='v')
plt.xlim(100,300)
plt.ylim(5.5,8)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When you import pandas, you use import pandas as pd. That means instead of typing pandas in your code you'll type pd.
Step2: A dataframe is basically a spreadsheet, except it lives in the world of Python or the statistical programming language R. They can't call it a spreadsheet because then people would think those programmers used Excel, which would make them boring and normal and they'd have to wear a tie every day.
Step3: If we scroll we can see all of it. But maybe we don't want to see all of it. Maybe we hate scrolling?
Step4: ...but maybe we want to see more than a measly five results?
Step5: But maybe we want to make a basketball joke and see the final four?
Step6: So yes, head and tail work kind of like the terminal commands. That's nice, I guess.
Step7: It's kind of like an array, right? Except where in an array we'd say df[0] this time we need to give it two numbers, the start and the end.
Step8: NOTE
Step9: I want to know how many people are in each position. Luckily, pandas can tell me!
Step10: Now that was a little weird, yes - we used df['POS'] instead of df[['POS']] when viewing the data's details.
Step11: Unfortunately because that has dollar signs and commas it's thought of as a string. We'll fix it in a second, but let's try describing one more thing.
Step12: That's stupid, though, what's an inch even look like? What's 80 inches? I don't have a clue. If only there were some wa to manipulate our data.
Step13: Okay that was nice but unfortunately we can't do anything with it. It's just sitting there, separate from our data. If this were normal code we could do blahblah['feet'] = blahblah['Ht (In.)'] / 12, but since this is pandas, we can't. Right? Right?
Step14: That's cool, maybe we could do the same thing with their salary? Take out the $ and the , and convert it to an integer?
Step15: The average basketball player makes 3.8 million dollars and is a little over six and a half feet tall.
Step16: Those guys are making nothing! If only there were a way to sort from high to low, a.k.a. descending instead of ascending.
Step17: But sometimes instead of just looking at them, I want to do stuff with them. Play some games with them! Dunk on them~ describe them! And we don't want to dunk on everyone, only the players above 7 feet tall.
Step18: Drawing pictures
Step19: matplotlib is a graphing library. It's the Python way to make graphs!
Step20: But that's ugly. There's a thing called ggplot for R that looks nice. We want to look nice. We want to look like ggplot.
Step21: That might look better with a little more customization. So let's customize it.
Step22: I want more graphics! Do tall people make more money?!?!
|
14,056
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import patches, cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
from IPython.display import display, Latex, Markdown
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
q1_answer = r
Put your answer here, replacing this text.
$$\frac{\partial}{\partial \theta_j} Loss(\theta) = \frac{1}{n} \sum_{i=1}^n \dots$$
display(Markdown(q1_answer))
q1_answer = r
*Write your answer here, replacing this text.*
$$\frac{\partial}{\partial \theta_j} Loss(\theta) = \frac{2}{n} \sum_{i=1}^n -x_{i,j} \left(y_i - f_\theta(x_i)\right)$$
display(Markdown(q1_answer))
q2_answer = r
Put your answer here, replacing this text.
$$\frac{\partial}{\partial \theta} Loss(X) = \dots$$
display(Markdown(q2_answer))
q2_answer = r
*Write your answer here, replacing this text.*
$$\frac{\partial}{\partial \theta} Loss(X) = -\frac{2}{n} X^T (y - X^T \theta)$$
display(Markdown(q2_answer))
def linear_regression_grad(X, y, theta):
grad = ...
return grad
theta = [1, 4]
simple_X = np.vstack([np.ones(10), np.arange(10)]).T
simple_y = np.arange(10) * 3 + 2
linear_regression_grad(simple_X, simple_y, theta)
def plot_surface_3d(X, Y, Z, angle):
highest_Z = max(Z.reshape(-1,1))
lowest_Z = min(Z.reshape(-1,1))
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z,
cmap=cm.coolwarm,
linewidth=0,
antialiased=False,
rstride=5, cstride=5)
ax.zaxis.set_major_locator(LinearLocator(5))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.view_init(45, angle)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.title("Regression Loss Function")
plt.xlabel("Theta_0")
plt.ylabel("Theta_1")
plt.show()
np.random.seed(100)
X_1 = np.arange(50)/5 + 5
X = np.vstack([np.ones(50), X_1]).T
y = (X_1 * 2 + 3) + np.random.normal(0, 2.5, size=50)
plt.plot(X_1, y, ".")
angle_slider = widgets.FloatSlider(min=0, max=360, step=15, value=45)
def plot_regression_loss(angle):
t0_vals = np.linspace(-10,10,100)
t1_vals = np.linspace(-2,5,100)
theta_0,theta_1 = np.meshgrid(t0_vals, t1_vals)
thetas = np.vstack((theta_0.flatten(), theta_1.flatten()))
loss_vals = 2/X.shape[0] * sum(((y - (X @ thetas).T)**2).T)
loss_vals = loss_vals.reshape(100, -100)
plot_surface_3d(theta_0, theta_1, loss_vals, angle)
interact(plot_regression_loss, angle=angle_slider);
def gradient_descent(X, y, theta0, gradient_function, learning_rate = 0.001, max_iter=1000000, epsilon=0.001):
theta_hat = theta0 # Initial guess
for t in range(1, max_iter):
grad = gradient_function(X, y, theta_hat)
# Now for the update step
theta_hat = ...
# When our gradient is small enough, we have converged
if np.linalg.norm(grad) < epsilon:
print("converged after {} steps".format(t))
return theta_hat
# If we hit max_iter iterations
print("Warning - Failed to converge")
return theta_hat
theta_0 = [10, -1]
gradient_descent(X, y, theta_0, linear_regression_grad)
theta_0s = []
theta_1s = []
plot_idx = [1, 5, 20, 100, 500, 2000, 10000]
def plot_gradient_wrapper(X, y, theta):
grad = linear_regression_grad(X, y, theta)
theta_0s.append(theta[0])
theta_1s.append(theta[1])
t = len(theta_0s)
if t in plot_idx:
plt.subplot(121)
plt.xlim([4, 12])
plt.ylim([-2, 3])
plt.plot(theta_0s, theta_1s)
plt.plot(theta[0], theta[1], ".", color="b")
plt.title('theta(s) over time, t={}'.format(t))
plt.subplot(122)
plt.xlim([0, 20])
plt.ylim([-10, 40])
plt.plot(np.arange(50)/2.5, y, ".")
plt.plot(np.arange(50)/2.5, X @ theta)
plt.title('Regression line vs. data, t={}'.format(t))
plt.show()
return grad
gradient_descent(X, y, theta_0, plot_gradient_wrapper)
def sigmoid(t):
return 1/(1 + np.e**-t)
def logistic_regression_grad(X, y, theta):
grad = ...
return grad
theta = [0, 1]
simple_X_1 = np.hstack([np.arange(10)/10, np.arange(10)/10 + 0.75])
simple_X = np.vstack([np.ones(20), simple_X_1]).T
simple_y = np.hstack([np.zeros(10), np.ones(10)])
linear_regression_grad(simple_X, simple_y, theta)
import sklearn.datasets
data_dict = sklearn.datasets.load_breast_cancer()
data = pd.DataFrame(data_dict['data'], columns=data_dict['feature_names'])
data['malignant'] = (data_dict['target'] == 0)
data['malignant'] = data['malignant'] + 0.1*np.random.rand(len(data['malignant'])) - 0.05
X_log_1 = data['mean radius']
X_log = np.vstack([np.ones(len(X_log_1)), X_log_1.values]).T
y_log = data['malignant'].values
plt.plot(X_log_1, y_log, ".")
theta_log = ...
theta_log
y_lowX = X_log_1[sigmoid(X_log @ theta_log) < 0.5]
y_lowy = y_log[sigmoid(X_log @ theta_log) < 0.5]
y_highX = X_log_1[sigmoid(X_log @ theta_log) > 0.5]
y_highy = y_log[sigmoid(X_log @ theta_log) > 0.5]
sigrange = np.arange(5, 30, 0.05)
sigrange_X = np.vstack([np.ones(500), sigrange]).T
d_boundary = -theta_log[0]/theta_log[1]
plt.plot(sigrange, sigmoid(sigrange_X @ theta_log), ".", color="g")
plt.hlines(0.5, 5, 30, "g")
plt.vlines(d_boundary, -0.2, 1.2, "g")
plt.plot(y_lowX, y_lowy, ".", color="b")
plt.plot(y_highX, y_highy, ".", color="r")
plt.title("Classification (blue=benign, red=malignant), assuming a P=0.5 decision boundary")
n_errors = sum(y_lowy > 0.5) + sum(y_highy < 0.5)
accuracy = round((len(y_log)-n_errors)/len(y_log) * 1000)/10
print("Classification Accuracy - {}%".format(accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Understanding Gradient Descent
Step6: Question 2
Step7: Question 3
Step8: Question 4
Step9: We create some toy data in two dimensions to perform our regressions on
Step10: And plot our loss
Step11: Consider
Step12: Now let's visualize how our regression estimates change as we perform gradient descent
Step13: Question 6
Step14: And then complete the gradient function. You should get a gradient of about $[0.65, 0.61]$ for the given values $\theta$ on this example dataset.
Step15: Now let's see how we can use our gradient descent tools to fit a regression on some real data! First, let's load the breast cancer dataset from lecture, and plot breast mass radius versus category - malignant or benign. As in lecture, we jitter the response variable to avoid overplotting.
Step16: Question 8
Step17: With optimal $\theta$ chosen, we can now plot our logistic curve and our decision boundary, and look at how our model categorizes our data
Step18: And, we can calculate our classification accuracy.
|
14,057
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
!pip install -q papermill
!pip install -q matplotlib
!pip install -q networkx
import os
import tfx_utils
%matplotlib notebook
def _make_default_sqlite_uri(pipeline_name):
return os.path.join(os.environ['HOME'], 'airflow/tfx/metadata', pipeline_name, 'metadata.db')
def get_metadata_store(pipeline_name):
return tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(_make_default_sqlite_uri(pipeline_name))
pipeline_name = 'taxi'
pipeline_db_path = _make_default_sqlite_uri(pipeline_name)
print('Pipeline DB:\n{}'.format(pipeline_db_path))
store = get_metadata_store(pipeline_name)
# Visualize properties of example artifacts
store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.EXAMPLES)
# Visualize stats for data
store.display_stats_for_examples(<insert ID here>)
# Try different IDs here. Click stop in the plot when changing IDs.
%matplotlib notebook
store.plot_artifact_lineage(<insert ID here>)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now print out the data artifacts
Step2: Now visualize the dataset features.
Step3: Now plot the artifact lineage
|
14,058
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import zarr
zarr.__version__
import numcodecs
numcodecs.__version__
z = zarr.empty(10, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())
z
z.info
z[0] = 'foo'
z[1] = b'bar' # msgpack doesn't support bytes objects correctly
z[2] = 1
z[3] = [2, 4, 6, 'baz']
z[4] = {'a': 'b', 'c': 'd'}
a = z[:]
a
z = zarr.empty(10, chunks=5, dtype=object)
z = zarr.empty(10, chunks=5, dtype=object, filters=[numcodecs.MsgPack()])
z = zarr.empty(10, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())
z._filters = None # try to live dangerously, manually wipe filters
z[0] = 'foo'
from numcodecs.tests.common import greetings
z = zarr.array(greetings, chunks=5, dtype=object, object_codec=numcodecs.MsgPack())
z[:]
z._filters = [] # try to live dangerously, manually wipe filters
z[:]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: API changes in Zarr version 2.2
Step2: To maintain backwards compatibility with previously-created data, the object codec is treated as a filter and inserted as the first filter in the chain
Step3: If no object_codec is provided, a ValueError is raised
Step4: For API backward-compatibility, if object codec is provided via filters, issue a warning but don't raise an error.
Step5: If a user tries to subvert the system and create an object array with no object codec, a runtime check is added to ensure no object arrays are passed down to the compressor (which could lead to nasty errors and/or segfaults)
Step6: Here is another way to subvert the system, wiping filters after storing some data. To cover this case a runtime check is added to ensure no object arrays are handled inappropriately during decoding (which could lead to nasty errors and/or segfaults).
|
14,059
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
# A first attempt (we ignore the target for now)
image_size = (1280, 1024) # Size of background in pixels
nDistractors = 10 # Number of distractors
distractor_size = 500
# Generate positions where to put the distractors
xr = np.random.randint(0, image_size[0], nDistractors)
yr = np.random.randint(0, image_size[1], nDistractors)
plt.scatter(xr, yr, s=distractor_size ,c='b',marker='v')
plt.axis([0, image_size[0], 0, image_size[1]])
plt.show()
# Divide the plot into a 10 x 8 grid, and allow only one distractor in each grid
image_size = [1280, 1024]
grid_size = [10, 8]
grid_size_pixels_x = image_size[0] / grid_size[0]
grid_size_pixels_y = image_size[1] / grid_size[1]
x_c = np.arange(grid_size_pixels_x / 2.0, image_size[0], grid_size_pixels_x)
y_c = np.arange(grid_size_pixels_y / 2.0, image_size[1], grid_size_pixels_y)
# Plot the positions of the new grid
xx = np.ones(len(x_c))
yy = np.ones(len(y_c))
plt.plot(x_c, xx, 'ro')
plt.plot(yy, y_c, 'bo')
# plt.axis([0, image_size[0], 0, image_size[1]])
plt.show()
# Meshgrid creats the whole grid (you could also use a double for-)
x_all, y_all = np.meshgrid(x_c, y_c)
# Reshape the positions into a N x 2 array (N rows, 2 columns), to make it easier to work with later
xy_all = np.vstack((x_all.flatten(), y_all.flatten())).T
# Plot all grid elements
plt.figure()
plt.plot(xy_all[:, 0], xy_all[:, 1], 'g+')
plt.show()
import time # Used to animate below
nSelect = 10
# Randomly change the positions of the locations in the array
np.random.shuffle(xy_all)
# Plot the result (looks much better!)
plt.scatter(xy_all[:nSelect, 0], xy_all[:nSelect, 1], s=distractor_size ,c='b',marker='v')
plt.axis([0, image_size[0], 0, image_size[1]])
plt.show()
# Example of how dictionaries are defined...
d1 = {'key1': 4, 'key2': 'my_value2'}
#... and how the values are accessed from them
print(d1['key2'])
# Unlike lists and arrays, variables in dictionaries are not ordered, so you can't do, e.g.,
# print(d1[0])
# Specify the size and color of the background. Use a dictionary
background = {'size':np.array([1280, 1024]),'color':0.5} # zero - black, 1 - white
# Specify the target
target = {'shape':'^', 'size':10, 'color':'r', 'face_color':'r'}
# Specify the distractors
distractor = {'shape':'o', 'size':10, 'color':'b', 'number_of':10}
# Test prints
print(background['color'], distractor['size'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two problem are visible
Step2: New problem. Seems like only the x-, and y-, coordinates of the grid elements were defined, but not the locations for ALL grid elements. How can this be done?
Step3: Now we know where distractors can be placed. But we don't want to put a distractor at each grid position, but draw a number of them (say 10) at random. One way to do this is the 'shuffle' the array, and then select the 10 first elements.
Step4: Dictionaries
Step5: In this assignment, the dictionaries contain information about the visual search images
|
14,060
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=2)
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
digits = load_digits()
X, y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=1,
stratify=y,
test_size=0.25)
classifier = LinearSVC(random_state=1).fit(X_train, y_train)
y_test_pred = classifier.predict(X_test)
print("CCR: %f"%(classifier.score(X_test, y_test)))
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_test_pred)
plt.imshow(confusion_matrix(y_test, y_test_pred), cmap="Blues")
plt.colorbar(shrink=0.8)
plt.xticks(range(10))
plt.yticks(range(10))
plt.xlabel("Etiqueta predicha")
plt.ylabel("Etiqueta real");
from sklearn.metrics import classification_report
print(classification_report(y_test, y_test_pred))
np.bincount(y) / y.shape[0]
X, y = digits.data, digits.target == 3
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
cross_val_score(SVC(), X, y)
from sklearn.dummy import DummyClassifier
cross_val_score(DummyClassifier(strategy="most_frequent"), X, y)
np.bincount(y) / y.shape[0]
from sklearn.metrics import roc_curve, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
for gamma in [.01, .095, 1]:
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate (recall)")
svm = SVC(gamma=gamma).fit(X_train, y_train)
decision_function = svm.decision_function(X_test)
fpr, tpr, _ = roc_curve(y_test, decision_function)
acc = svm.score(X_test, y_test)
auc = roc_auc_score(y_test, svm.decision_function(X_test))
plt.plot(fpr, tpr, label="gamma: %.2f (acc:%.2f auc:%.2f)" % (gamma, acc, auc), linewidth=3)
plt.legend(loc="best");
from sklearn.model_selection import cross_val_score
cross_val_score(SVC(), X, y, scoring="roc_auc")
from sklearn.linear_model import LogisticRegression
cross_val_score(LogisticRegression(), X, y, scoring="roc_auc")
from sklearn.metrics import get_scorer
print(get_scorer('accuracy'))
def my_accuracy_scoring(est, X, y):
return np.mean(est.predict(X) == y)
cross_val_score(SVC(), X, y, scoring=my_accuracy_scoring)
y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2])
y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2])
confusion_matrix(y_true, y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vemos que hemos predicho alrededor de un 95% de patrones de forma correcta. Para problemas multi-clase, a veces es muy útil saber qué clases son más difíciles de predecir y cuáles más fáciles o incluso qué tipo de errores son los más comunes. Una forma de tener más información en este sentido es la matriz de confusión, que muestra para cada clase (filas) cuántas veces se predicen qué clases (columnas).
Step2: A veces un gráfico es más fácil de leer
Step3: Podemos ver que la mayoría de valores están en la diagonal principal, lo que significa que predecimos casi todos los ejemplos correctamente. Las entradas que no están en la diagonal principal nos muestran que hay bastantes ochos clasificados como unos, y que los nueves son fácilmente confundibles con el resto de clases.
Step4: Estas métricas son especialmente útiles en dos casos particulares
Step5: Para probar este escenario, vamos a clasificar el dígito 3 contra el resto (el problema de clasificación es un problema binario, ¿es este dígito un 3?)
Step6: Ahora vamos a aplicar validación cruzada con un clasificador para ver que tal funciona
Step7: Nuestro clasificador tienen un 90% de acierto siempre. ¿Es bueno o malo? Ten en cuenta que el 90% de los dígitos no son un 3. Vamos a ver que tal funciona un clasificador simple, que siempre predice la clase más frecuenta (ZeroR)
Step8: También un 90%, como esperábamos. Por tanto, podemos pensar que el clasificador SVC no es demasiado bueno, ya que funciona igual que una estrategia que ni si quiera mira los datos de entrada. De todas formas, esto sería sacar conclusiones muy rápido ya que, en general, el accuracy no es una buena medida de rendimiento para bases de datos no balanceadas.
Step9: Curvas ROC
Step10: Si el valor de umbral es muy bajo, tendremos muchos falsos positivos y por tanto un TPR muy alto y un FPR muy alto (porque casi todo lo clasificamos como positivo). Si usamos un umbral muy alto, habrá muy pocos falsos positivos (casi todo se predice como negativo), y por tanto el TPR será bajo y el FPR también. Por lo que, en general, la curva va desde arriba a la derecha hasta abajo a la izquierda. Una línea diagonal indica que el rendimiento es aleatorio, mientras que el objetivo ideal sería que la curva se desplace arriba a la izquierda. Esto significa que el clasificador daría siempre valores más altos de la función de decisión a los ejemplos positivos que a los ejemplos negativos.
Step11: Compara el rendimiento con el DummyClassifier
Step12: Funciones de rendimiento por defecto y personalizadas
Step13: También es posible escribir tu propia medida de rendimiento. En lugar de una cadena, puedes pasar un nombre de función como argumento scoring, esto es, un objeto con un método __call__ (o lo que es lo mismo, una función). Esa función debe recibir un modelo, un conjunto de características X_test y un conjutno de etiquetas y_test, y devolver un valor real. Los valores más altos deberían indicar que el modelo es mejor.
Step14: <div class="alert alert-success">
|
14,061
|
<ASSISTANT_TASK:>
Python Code:
def get_glove(name):
with open(path+ 'glove.' + name + '.txt', 'r') as f: lines = [line.split() for line in f]
words = [d[0] for d in lines]
vecs = np.stack(np.array(d[1:], dtype=np.float32) for d in lines)
wordidx = {o:i for i,o in enumerate(words)}
save_array(res_path+name+'.dat', vecs)
pickle.dump(words, open(res_path+name+'_words.pkl','wb'))
pickle.dump(wordidx, open(res_path+name+'_idx.pkl','wb'))
# Before running the following make sure you put the files under the right path.
# see https://nlp.stanford.edu/projects/glove/
get_glove('6B.50d')
get_glove('6B.100d')
get_glove('6B.200d')
get_glove('6B.300d')
def load_glove(loc):
return (load_array(loc+'.dat'),
pickle.load(open(loc+'_words.pkl','rb')),
pickle.load(open(loc+'_idx.pkl','rb')))
vecs, words, wordidx = load_glove(res_path+'6B.50d')
vecs.shape
' '.join(words[:25])
def w2v(w): return vecs[wordidx[w]]
w2v('of')
# use only for Python2
# reload(sys)
# sys.setdefaultencoding('utf8')
tsne = TSNE(n_components=2, random_state=0)
Y = tsne.fit_transform(vecs[:500])
start=0; end=350
dat = Y[start:end]
plt.figure(figsize=(15,15))
plt.scatter(dat[:, 0], dat[:, 1])
for label, x, y in zip(words[start:end], dat[:, 0], dat[:, 1]):
plt.text(x,y,label, color=np.random.rand(3)*0.7,
fontsize=14)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Looking at the vectors
Step2: Here's the first 25 "words" in glove.
Step3: This is how you can look up a word vector.
Step4: Just for fun, let's take a look at a 2d projection of the first 350 words, using T-SNE.
|
14,062
|
<ASSISTANT_TASK:>
Python Code:
def caps(val):
caps returns double the value of the provided value
return val*2
a = caps("TEST ")
print(a)
print(caps.__doc__)
a = caps(1234)
print(a)
def isValid(data):
if 10 in data:
return True
return False
a = isValid([10, 200, 33, "asf"])
print(a)
a = isValid((10,))
print(a)
isValid((10,))
a = isValid((110,))
print(a)
def isValid_new(data):
return 10 in data
print(isValid_new([10, 200, 33, "asf"]))
a = isValid_new((110,))
print(a)
def fatorial(n):#{
n = n if n > 1 else 1
j = 1
for i in range(1, n + 1):
j = j * i
return j
#}
# Testing...
for i in range(1, 6):
print (i, '->', fatorial(i))
def factorial(num):
Fatorial implemented with recursion.
if num <= 1:
return 1
else:
return(num * factorial(num - 1))
# Testing factorial()
print (factorial(5))
# 5 * (4 * (3 * (2) * (1))
def fib(n):
Fibonacci:
fib(n) = fib(n - 1) + fib(n - 2) se n > 1
fib(n) = 1 se n <= 1
if n > 1:
return fib(n - 1) + fib(n - 2)
else:
return 1
# Show Fibonacci from 1 to 5
for i in [1, 2, 3, 4, 5]:
print (i, '=>', fib(i))
def fib(n):
# the first two values
l = [1, 1]
# Calculating the others
for i in range(2, n + 1):
l.append(l[i -1] + l[i - 2])
return l[n]
# Show Fibonacci from 1 to 5
for i in [1, 2, 3, 4, 5]:
print (i, '=>', fib(i))
def test(a, b):
print(a, b)
return a + b
print(test(1, 2))
test(b=1, a=2)
def test_abc(a, b, c):
print(a, b, c)
return a + b + c
try:
test_abc(b=1, a=2, 3)
except SyntaxError as e:
print("error", e)
test_abc(2, c=3, b=2)
test_abc(2, b=2, c=3)
def test_new(a, b, c):
pass
def test(a, b):
print(a, b)
return a*a, b*b
x, a = test(2, 5)
print(x)
print(type(x))
print(a)
print(type(a))
print(type(test(2, 5)))
def test(a, b):
print(a, b)
return a*a, b*b, a*b
x = test(2 , 5)
print(x)
print(type(x))
def test(a, b):
print(a, b)
return a*a, b*b, "asdf"
x = test(2 , 5)
print(x)
print(type(x))
def test(a=100, b=1000):
print(a, b)
return a, b
x = test(2, 5)
print(x)
print(test(10))
def test(a=100, b=1000):
print(a, b)
return a, b
print(test(b=10))
print(test(101))
def test(d, c, a=100, b=1000):
print(d, c, a, b)
return d, c, a, b
x = test(c=2, d=10, b=5)
print(x)
x = test(1, 2, 3, 4)
print(x)
print(test(10, 2))
def rgb_html(r=0, g=0, b=0):
Converts R, G, B to #RRGGBB
return '#%02x%02x%02x' % (r, g, b)
def html_rgb(color='#000000'):
Converts #RRGGBB em R, G, B
if color.startswith('#'): color = color[1:]
r = int(color[:2], 16)
g = int(color[2:4], 16)
b = int(color[4:], 16)
return r, g, b # a sequence
print (rgb_html(200, 200, 255))
print (rgb_html(b=200, g=200, r=255)) # what's happened?
print (html_rgb('#c8c8ff'))
def test(d, a=100, c, b=1000):
print(d, c, a, b)
return d, c, a, b
x = test(c=2, d=10, b=5)
print(x)
x = test(1, 2, 3, 4)
print(x)
print(test(10, 2))
def test(c, d, a=100, b=1000):
print(d, c, a, b)
return d, c, a, b
x = test(c=2, d=10, b=5)
print(x)
x = test(1, 2, 3, 4)
print(x)
print(test(10, 2))
# *args - arguments without name (list)
# **kargs - arguments with name (ditcionary)
def func(*args, **kargs):
print (args)
print (kargs)
func('weigh', 10, unit='k')
def func(*args, **kargs):
print (args)
print (kargs)
a = {
"name": "Mohan kumar Shah",
"age": 24 + 1
}
func('weigh', 10, unit='k', val=a)
def func(*args):
print(args)
func('weigh', 10, "test")
data = [(4, 3), (5, 1), (7, 2), (9, 0)]
# Comparing by the last element
def _cmp(x, y):
return cmp(x[-1], y[-1])
print ('List:', data)
print (eval('12. / 2 + 3.3'))
def listing(lst):
for l in lst:
print(l)
d = {"Mayank Johri":40, "Janki Mohan Johri":68}
listing(d)
d = {
"name": "Mohan",
"age": 24
}
a = {
"name": "Mohan kumar Shah",
"age": 24 + 1
}
def process_dict(d=a):
print(d)
process_dict(d)
process_dict()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functions
Step3: In the above example, we have caps as function, which takes val as argument and returns val * 2.
Step4: Functions can return any data type, next example returns a boolean value.
Step5: Example (factorial without recursion)
Step7: Example (factorial with recursion)
Step9: Example (Fibonacci series with recursion)
Step10: Example (Fibonacci series without recursion)
Step11: NOTE
Step12: Functions can also not return anything like in the below example
Step13: Functions can also return multiple values, usually in form of tuple.
Step16: Example (RGB conversion)
Step17: Note
Step18: Observations
Step19: In the example, kargs will receive the named arguments and args will receive the others.
Step20: Python also has a builtin function eval(), which evaluates code (source or object) and returns the value.
|
14,063
|
<ASSISTANT_TASK:>
Python Code:
from functools import reduce
import os
import subprocess
import tempfile
import numpy as np
from planet import api
from planet.api import downloader, filters
import rasterio
from skimage import feature, filters
from sklearn.ensemble import RandomForestClassifier
# load local modules
from utils import Timer
import visual
#uncomment if visual is in development
# import importlib
# importlib.reload(visual)
# Import functionality from local notebooks
from ipynb.fs.defs.drc_roads_classification import get_label_mask, get_unmasked_count \
load_4band, get_feature_bands, combine_masks, num_valid, perc_masked, bands_to_X, \
make_same_size_samples, classify_forest, y_to_band, classified_band_to_rgb
# uncomment to see what mosaics are available and to make sure the PLMosaic driver is working
# !gdalinfo "PLMosaic:"
# get mosaic names for July 2017 to March 2018
mosaic_dates = [('2017', '{0:02d}'.format(m)) for m in range(7, 13)] + \
[('2018', '{0:02d}'.format(m)) for m in range(1, 4)]
mosaic_names = ['global_monthly_{}_{}_mosaic'.format(yr, mo)
for (yr, mo) in mosaic_dates]
def get_mosaic_filename(mosaic_name):
return os.path.join('data', mosaic_name + '.tif')
for name in mosaic_names:
print('{} -> {}'.format(name, get_mosaic_filename(name)))
aoi_filename = 'pre-data/aoi.geojson'
def _gdalwarp(input_filename, output_filename, options):
commands = ['gdalwarp'] + options + \
['-overwrite',
input_filename,
output_filename]
print(' '.join(commands))
subprocess.check_call(commands)
# lossless compression of an image
def _compress(input_filename, output_filename):
commands = ['gdal_translate',
'-co', 'compress=LZW',
'-co', 'predictor=2',
input_filename,
output_filename]
print(' '.join(commands))
subprocess.check_call(commands)
def download_mosaic(mosaic_name,
output_filename,
crop_filename,
overwrite=False,
compress=True):
# typically gdalwarp would require `-oo API_KEY={PL_API_KEY}`
# but if the environmental variable PL_API_KEY is set, gdal will use that
options = ['-cutline', crop_filename, '-crop_to_cutline',
'-oo', 'use_tiles=YES']
# use PLMosaic driver
input_name = 'PLMosaic:mosaic={}'.format(mosaic_name)
# check to see if output file exists, if it does, do not warp
if os.path.isfile(output_filename) and not overwrite:
print('{} already exists. Aborting download of {}.'.format(output_filename, mosaic_name))
elif compress:
with tempfile.NamedTemporaryFile(suffix='.vrt') as vrt_file:
options += ['-of', 'vrt']
_gdalwarp(input_name, vrt_file.name, options)
_compress(vrt_file.name, output_filename)
else:
_gdalwarp(input_name, output_filename, options)
for name in mosaic_names:
download_mosaic(name, get_mosaic_filename(name), aoi_filename)
forest_img = os.path.join('pre-data', 'forestroad_forest.tif')
road_img = os.path.join('pre-data', 'forestroad_road.tif')
forest_mask = get_label_mask(forest_img)
print(get_unmasked_count(forest_mask))
road_mask = get_label_mask(road_img)
print(get_unmasked_count(road_mask))
forest_mask.shape
# specify the training dataset mosaic image file
image_file = get_mosaic_filename(mosaic_names[0])
image_file
# this is the georeferenced image that was used to create the forest and non-forest label images
label_image = 'pre-data/roads.tif'
# get label image crs, bounds, and pixel dimensions
with rasterio.open(label_image, 'r') as ref:
dst_crs = ref.crs['init']
(xmin, ymin, xmax, ymax) = ref.bounds
width = ref.width
height = ref.height
print(dst_crs)
print((xmin, ymin, xmax, ymax))
print((width, height))
# this is the warped training mosaic image we will create with gdal
training_file = os.path.join('data', 'mosaic_training.tif')
# use gdalwarp to warp mosaic image to match label image
!gdalwarp -t_srs $dst_crs \
-te $xmin $ymin $xmax $ymax \
-ts $width $height \
-overwrite $image_file $training_file
feature_bands = get_feature_bands(training_file)
print(feature_bands[0].shape)
total_mask = combine_masks(feature_bands)
print(total_mask.shape)
# combine the label masks with the valid data mask and then create X dataset for each label
total_forest_mask = np.logical_or(total_mask, forest_mask)
print('{} valid pixels ({}% masked)'.format(num_valid(total_forest_mask),
round(perc_masked(total_forest_mask), 2)))
X_forest = bands_to_X(feature_bands, total_forest_mask)
total_road_mask = np.logical_or(total_mask, road_mask)
print('{} valid pixels ({}% masked)'.format(num_valid(total_road_mask),
round(perc_masked(total_road_mask), 2)))
X_road = bands_to_X(feature_bands, total_road_mask)
[X_forest_sample, X_road_sample] = \
make_same_size_samples([X_forest, X_road], size_percent=100)
print(X_forest_sample.shape)
print(X_road_sample.shape)
forest_label_value = 0
road_label_value = 1
X_training = np.concatenate((X_forest_sample, X_road_sample), axis=0)
y_training = np.array(X_forest_sample.shape[0] * [forest_label_value] + \
X_road_sample.shape[0] * [road_label_value])
print(X_training.shape)
print(y_training.shape)
with Timer():
y_band_rf = classify_forest(image_file, X_training, y_training)
visual.plot_image(classified_band_to_rgb(y_band_rf),
title='Classified Training Image (Random Forests)',
figsize=(15, 15))
classified_bands_file = os.path.join('data', 'classified_mosaic_bands.npz')
def save_to_cache(classified_bands, mosaic_names):
save_bands = dict((s, classified_bands[s])
for s in mosaic_names)
# masked arrays are saved as just arrays, so save mask for later
save_bands.update(dict((s+'_msk', classified_bands[s].mask)
for s in mosaic_names))
np.savez_compressed(classified_bands_file, **save_bands)
def load_from_cache():
classified_bands = np.load(classified_bands_file)
scene_ids = [k for k in classified_bands.keys() if not k.endswith('_msk')]
# reform masked array from saved array and saved mask
classified_bands = dict((s, np.ma.array(classified_bands[s], mask=classified_bands[s+'_msk']))
for s in scene_ids)
return classified_bands
use_cache = True
if use_cache and os.path.isfile(classified_bands_file):
print('using cached classified bands')
classified_bands = load_from_cache()
else:
with Timer():
def classify(mosaic_name):
img = get_mosaic_filename(mosaic_name)
# we only have two values, 0 and 1. Convert to uint8 for memory
band = (classify_forest(img, X_training, y_training)).astype(np.uint8)
return band
classified_bands = dict((s, classify(s)) for s in mosaic_names)
# save to cache
save_to_cache(classified_bands, mosaic_names)
# Decimate classified arrays for memory conservation
def decimate(arry, num=8):
return arry[::num, ::num].copy()
do_visualize = True # set to True to view images
if do_visualize:
for mosaic_name, classified_band in classified_bands.items():
visual.plot_image(classified_band_to_rgb(decimate(classified_band)),
title='Classified Image ({})'.format(mosaic_name),
figsize=(8, 8))
# labeled change images, not georeferenced
change_img_orig = os.path.join('pre-data', 'difference_change.tif')
nochange_img_orig = os.path.join('pre-data', 'difference_nochange.tif')
# georeferenced source image
src_img = os.path.join('pre-data', 'difference.tif')
# destination georeferened label images
change_img_geo = os.path.join('data', 'difference_change.tif')
nochange_img_geo = os.path.join('data', 'difference_nochange.tif')
# get crs and transform from the georeferenced source image
with rasterio.open(src_img, 'r') as src:
src_crs = src.crs
src_transform = src.transform
# create the georeferenced label images
for (label_img, geo_img) in ((change_img_orig, change_img_geo),
(nochange_img_orig, nochange_img_geo)):
with rasterio.open(label_img, 'r') as src:
profile = {
'width': src.width,
'height': src.height,
'driver': 'GTiff',
'count': src.count,
'compress': 'lzw',
'dtype': rasterio.uint8,
'crs': src_crs,
'transform': src_transform
}
with rasterio.open(geo_img, 'w', **profile) as dst:
dst.write(src.read())
# get dest crs, bounds, and shape from mosaic image
image_file = get_mosaic_filename(mosaic_names[0])
with rasterio.open(image_file, 'r') as ref:
dst_crs = ref.crs['init']
(xmin, ymin, xmax, ymax) = ref.bounds
width = ref.width
height = ref.height
print(dst_crs)
print((xmin, ymin, xmax, ymax))
print((width, height))
# destination matched images
change_img = os.path.join('data', 'mosaic_difference_change.tif')
nochange_img = os.path.join('data', 'mosaic_difference_nochange.tif')
# resample and resize to match mosaic
!gdalwarp -t_srs $dst_crs \
-te $xmin $ymin $xmax $ymax \
-ts $width $height \
-overwrite $change_img_geo $change_img
!gdalwarp -t_srs $dst_crs \
-te $xmin $ymin $xmax $ymax \
-ts $width $height \
-overwrite $nochange_img_geo $nochange_img
change_mask = get_label_mask(change_img)
print(get_unmasked_count(change_mask))
nochange_mask = get_label_mask(nochange_img)
print(get_unmasked_count(nochange_mask))
# combine the label masks with the valid data mask and then create X dataset for each label
classified_bands_arrays = classified_bands.values()
total_mask = combine_masks(classified_bands_arrays)
total_change_mask = np.logical_or(total_mask, change_mask)
print('Change: {} valid pixels ({}% masked)'.format(num_valid(total_change_mask),
round(perc_masked(total_change_mask), 2)))
X_change = bands_to_X(classified_bands_arrays, total_change_mask)
total_nochange_mask = np.logical_or(total_mask, nochange_mask)
print('No Change: {} valid pixels ({}% masked)'.format(num_valid(total_nochange_mask),
round(perc_masked(total_nochange_mask), 2)))
X_nochange = bands_to_X(classified_bands_arrays, total_nochange_mask)
# create a training sample set that is equal in size for all categories
# and uses 10% of the labeled change pixels
[X_change_sample, X_nochange_sample] = \
make_same_size_samples([X_change, X_nochange], size_percent=10)
print(X_change_sample.shape)
print(X_nochange_sample.shape)
change_label_value = 0
nochange_label_value = 1
X_rf = np.concatenate((X_change_sample, X_nochange_sample), axis=0)
y_rf = np.array(X_change_sample.shape[0] * [change_label_value] + \
X_nochange_sample.shape[0] * [nochange_label_value])
print(X_rf.shape)
print(y_rf.shape)
# NOTE: This relative import isn't working so the following code is directly
# copied from the temporal analysis notebook
# from ipynb.fs.defs.drc_roads_temporal_analysis import classify_change
def classify_change(classified_bands, mask, X_training, y_training):
clf = RandomForestClassifier()
with Timer():
clf.fit(X_training, y_training)
X = bands_to_X(classified_bands, total_mask)
with Timer():
y_pred = clf.predict(X)
y_band = y_to_band(y_pred, total_mask)
return y_band
with Timer():
y_band_rf = classify_change(classified_bands_arrays, total_mask, X_rf, y_rf)
visual.plot_image(classified_band_to_rgb(y_band_rf), title='RF Classified Image', figsize=(25, 25))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download Mosaics
Step2: Classify Mosaics into Forest and Non-Forest
Step3: Warp Mosaic to Match Label Masks
Step4: Create Training Datasets
Step5: Classify Training Image
Step6: Classification on All Mosaic Images
Step7: These classified mosaics look a lot better than the classified PSOrthoTile strips. This bodes well for the quality of our change detection results!
Step8: Match Georeferenced Label Images to Mosaic Images
Step9: Load Label Masks
Step10: Get Features from Labels
Step11: Classify Change
|
14,064
|
<ASSISTANT_TASK:>
Python Code:
random_seed = 2000
data_in_shape = (3, 6)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Dense(4))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
np.random.seed(random_seed)
data_in = 2 * np.random.random(data_in_shape) - 1
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(random_seed + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
DATA['wrappers.TimeDistributed.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
random_seed = 2000
data_in_shape = (5, 4, 4, 2)
layer_0 = Input(shape=data_in_shape)
layer_1 = TimeDistributed(Conv2D(6, (3,3), data_format='channels_last'))(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
np.random.seed(random_seed)
data_in = 2 * np.random.random(data_in_shape) - 1
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(random_seed + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
DATA['wrappers.TimeDistributed.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
import os
filename = '../../../test/data/layers/wrappers/TimeDistributed.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [wrappers.TimeDistributed.1] wrap a Conv2D layer with 6 3x3 filters (input
Step2: export for Keras.js tests
|
14,065
|
<ASSISTANT_TASK:>
Python Code:
# ConWhAt stuff
from conwhat import VolConnAtlas,StreamConnAtlas,VolTractAtlas,StreamTractAtlas
from conwhat.viz.volume import plot_vol_scatter
# Neuroimaging stuff
import nibabel as nib
from nilearn.plotting import (plot_stat_map,plot_surf_roi,plot_roi,
plot_connectome,find_xyz_cut_coords)
from nilearn.image import resample_to_img
# Viz stuff
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
# Generic stuff
import glob, numpy as np, pandas as pd, networkx as nx
from datetime import datetime
lesion_file = 'synthetic_lesion_20mm_sphere_-46_-60_6.nii.gz' # we created this file from scratch in the previous example
lesion_img = nib.load(lesion_file)
plot_roi(lesion_file);
cw_atlases_dir = '/global/scratch/hpc3230/Data/conwhat_atlases' # change this accordingly
atlas_name = 'CWL2k8Sc33Vol3d100s_v01'
atlas_dir = '%s/%s' %(cw_atlases_dir, atlas_name)
cw_vca = VolConnAtlas(atlas_dir=atlas_dir)
idxs = 'all' # alternatively, something like: range(1,100), indicates the first 100 cnxns (rows in .vmfs)
jlc_dir = '/global/scratch/hpc3230/joblib_cache_dir' # this is the cache dir where joblib writes temporary files
lo_df,lo_nx = cw_vca.compute_hit_stats(lesion_file,idxs,n_jobs=4,joblib_cache_dir=jlc_dir)
lo_df.head()
lo_df[['TPR', 'corr_thrbin']].iloc[:10].T
tpr_adj = nx.to_pandas_adjacency(lo_nx,weight='TPR')
cpr_adj = nx.to_pandas_adjacency(lo_nx,weight='corr_thrbin')
np.corrcoef(tpr_adj.values.ravel(), cpr_adj.values.ravel())
fig, ax = plt.subplots(ncols=2, figsize=(12,4))
sns.heatmap(tpr_adj,xticklabels='',yticklabels='',vmin=0,vmax=0.5,ax=ax[0]);
sns.heatmap(cpr_adj,xticklabels='',yticklabels='',vmin=0,vmax=0.5,ax=ax[1]);
fig, ax = plt.subplots(ncols=2, figsize=(12,4))
sns.heatmap(tpr_adj, xticklabels='',yticklabels='',cmap='Reds',
mask=tpr_adj.values==0,vmin=0,vmax=0.5,ax=ax[0]);
sns.heatmap(cpr_adj,xticklabels='',yticklabels='',cmap='Reds',
mask=cpr_adj.values==0,vmin=0,vmax=0.5,ax=ax[1]);
cw_vca.vfms.loc[lo_df.index].head()
parc_img = cw_vca.region_nii
parc_dat = parc_img.get_data()
parc_vals = np.unique(parc_dat)[1:]
ccs = {roival: find_xyz_cut_coords(nib.Nifti1Image((dat==roival).astype(int),img.affine),
activation_threshold=0) for roival in roivals}
ccs_arr = np.array(ccs.values())
fig, ax = plt.subplots(figsize=(16,6))
plot_connectome(tpr_adj.values,ccs_arr,axes=ax,edge_threshold=0.2,colorbar=True,
edge_cmap='Reds',edge_vmin=0,edge_vmax=1.,
node_color='lightgrey',node_kwargs={'alpha': 0.4});
#edge_vmin=0,edge_vmax=1)
fig, ax = plt.subplots(figsize=(16,6))
plot_connectome(cpr_adj.values,ccs_arr,axes=ax)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We now use the synthetic lesion constructed in the previous example in a ConWhAt lesion analysis.
Step2: Take another quick look at this mask
Step3: Since our lesion mask does not (by construction) have a huge amount of spatial detail, it makes sense to use one of the lower-resolution atlas. As one might expect, computation time is considerably faster for lower-resolution atlases.
Step4: See the previous tutorial on 'exploring the conwhat atlases' for more info on how to examine the components of a given atlas in ConWhAt.
Step5: Choose which connections to evaluate.
Step6: Now, compute lesion overlap statistics.
Step7: This takes about 20 minutes to run.
Step8: Typically we will be mainly interested in two of these metric scores
Step9: We can obtain these numbers as a 'modification matrix' (connectivity matrix)
Step10: These two maps are, unsurprisingly, very similar
Step11: (...with an alternative color scheme...)
Step12: We can list directly the most affected (greatest % overlap) connections,
Step13: To plot the modification matrix information on a brain, we first need to some spatial locations to plot as nodes. For these, we calculate (an approprixation to) each atlas region's centriod location
Step14: Now plotting on a glass brain
|
14,066
|
<ASSISTANT_TASK:>
Python Code:
%%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
!pip3 install $KFP_PACKAGE --upgrade
import kfp.components as comp
dataproc_create_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/dataproc/create_cluster/component.yaml')
help(dataproc_create_cluster_op)
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
# Optional Parameters
EXPERIMENT_NAME = 'Dataproc - Create Cluster'
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc create cluster pipeline',
description='Dataproc create cluster pipeline'
)
def dataproc_create_cluster_pipeline(
project_id = PROJECT_ID,
region = 'us-central1',
name='',
name_prefix='',
initialization_actions='',
config_bucket='',
image_version='',
cluster='',
wait_interval='30'
):
dataproc_create_cluster_op(
project_id=project_id,
region=region,
name=name,
name_prefix=name_prefix,
initialization_actions=initialization_actions,
config_bucket=config_bucket,
image_version=image_version,
cluster=cluster,
wait_interval=wait_interval)
pipeline_func = dataproc_create_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the component using KFP SDK
Step2: Sample
Step3: Example pipeline that uses the component
Step4: Compile the pipeline
Step5: Submit the pipeline for execution
|
14,067
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
list = ['a', 'b', 'c', 'd']
list
np.array(list)
list_matrix = [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
list_matrix
np.array(list_matrix)
np.arange(1, 11)
np.arange(5, 60, 5)
np.zeros(10)
np.zeros((5, 5))
np.ones(10)
np.ones((5,5))
np.linspace(10, 20, 5)
np.linspace(100, 101, 10)
np.eye(4)
np.random.rand(5)
np.random.rand(3,3)
np.random.rand(2,3,4)
np.random.randn(3)
np.random.randn(4,4)
np.random.randint(5)
np.random.randint(1,11)
np.random.randint(1, 100, 10)
np.random.randint(1, 100, (4, 4))
list = np.arange(1,10)
list.max()
list.min()
list.argmax()
list.argmin()
reshape_list = np.arange(1,10)
reshape_list
reshape_list.reshape(3,3)
shape_list = np.arange(1,10)
shape_list
shape_list.shape
shape_list.reshape(3,3)
shape_list.reshape(3,3).shape
ravel_list = np.arange(1,26).reshape(5,5)
ravel_list
ravel_list = ravel_list.ravel()
ravel_list
flatten_list = np.arange(1,26).reshape(5,5)
flatten_list
flatten_list = flatten_list.flatten()
flatten_list
list = np.arange(1,11)
list.dtype
list = np.array(['a', 'b', 'c'])
list.dtype
selection_list = np.arange(1,26)
selection_list
selection_list[9]
selection_list[24]
selection_list = selection_list.reshape(5, 5)
selection_list
selection_list[2:,1:]
selection_list[1:4,2:4]
selection_list = selection_list.ravel()
selection_list
selection_list[selection_list>10]
uni_list = np.arange(1,11)
uni_list
np.square(uni_list)
np.sin(uni_list)
np.log(uni_list)
np.log10(uni_list)
np.isfinite(uni_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NumPy Arrays
Step2: arange method
Step3: zeros method
Step4: ones method
Step5: linspace
Step6: eye
Step7: Random
Step8: randn
Step9: randint
Step10: Array Attributes
Step11: Reshape, Shape and Ravel
Step12: Shape
Step13: ravel
Step14: flatten
Step15: dtype
Step16: Selection
Step17: For multi dimensional lists
Step18: comparison selectors
Step19: Universal Functions
|
14,068
|
<ASSISTANT_TASK:>
Python Code:
def getNumber(n , k ) :
if(n % 2 == 0 ) :
pos = n // 2 ;
else :
pos =(n // 2 ) + 1 ;
if(k <= pos ) :
return(k * 2 - 1 ) ;
else :
return(( k - pos ) * 2 ) ;
if __name__== "__main __":
n = 8 ; k = 5 ;
print(getNumber(n , k ) ) ;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,069
|
<ASSISTANT_TASK:>
Python Code:
import tushare as ts
import pandas as pd
from IPython.display import HTML
stock_selected='600487'
#历年前十大股东持股情况
#df1为季度统计摘要,data1为前十大持股明细统计
df1, data1 = ts.top10_holders(code=stock_selected, gdtype='0') #gdtype等于1时表示流通股,默认为0
#df1, data1 = ts.top10_holders(code='002281', year=2015, quarter=1, gdtype='1')
df1 = df1.sort_values('quarter', ascending=False)
df1.head(10)
#qts = list(df1['quarter'])
#data = list(df1['props'])
#name = ts.get_realtime_quotes(stock_selected)['name'][0]
import tushare as ts
import pandas as pd
from IPython.display import HTML
#浦发银行2016三季度前十大流通股东情况
df2, data2 = ts.top10_holders(code=stock_selected, year=2016, quarter=3, gdtype='1')
#取前十大流通股东名称
top10name = str(list(data2['name']))
print(top10name)
import tushare as ts
df=ts.get_stock_basics()
df.head(5)
att=df.columns.values.tolist()
print(att)
#df.ix['002281']
#df.ix['002281']
#df.info()
df[df.name == u'四维图新']['esp']
from xpinyin import Pinyin
pin=Pinyin()
pin.get_initials(u'四维图新', u'')
df.name
df['UP'] = None
for index, row in df.iterrows():
name_str = df.name[index]
#print(name_str)
up_letter = pin.get_initials(name_str,u'')
#print(up_letter)
df.at[index,['UP']]=up_letter
df[df['UP']=='HTGD']
df_out=df[(df.profit>20) &
(df.gpr > 25) &
(df.pe <120) &
(df.pe >0) &
(df.rev >0)][['name','industry','pe','profit','esp','rev','holders','gpr','npr']]
df_out.sort_values(by='npr',ascending=False, inplace = True)
df_out.rename(columns={'name':u'股票','industry':u'行业','pe':u'市盈率',
'profit':u'利润同比','esp':u'每股收益','rev':u'收入同比',
'holders':u'股东人数','gpr':u'毛利率','npr':u'净利率'})[:50]
import tushare as ts
df=ts.get_report_data(2016,4)
#df[df.code=='002405']
df
import tushare as ts
df_profit = ts.get_profit_data(2017,1)
#df_profit.info()
#df_profit[df_profit.code == '002405']
df_out=df_profit[(df_profit.roe>10) & (df_profit.gross_profit_rate > 25) & (df_profit.net_profits >0)]
df_out.sort_values(by='roe',ascending=False, inplace = True)
df_out[:50]
import tushare as ts
df_operation = ts.get_operation_data(2017,1)
df_out=df_operation[df_operation.currentasset_days<120]
df_out.sort_values(by='currentasset_days',ascending=False, inplace = True)
df_out[:50]
# -*- coding: UTF-8 -*-
import tushare as ts
df_growth = ts.get_growth_data(2017,1)
import numpy as np
df_out = df_growth[(df_growth.nprg >20) &
(df_growth.mbrg >20)]
df_out.sort_values(by= 'nprg', ascending = True, inplace=True)
#df_out.to_csv(".\growth.csv",encoding="utf_8_sig",dtype={'code':np.string})
df_out[:50]
import tushare as ts
df_cash = ts.get_cashflow_data(2016,4)
df_out = df_cash[(df_cash.cf_sales > 0)]
df_out.sort_values(by = 'cf_sales', ascending = True, inplace = True)
df_out[:50]
import tushare as ts
import pandas as pd
from IPython.display import HTML
#中国联通前复权数据
#df = ts.get_k_data(stock_selected, start='2016-01-01', end='2016-12-02')
df = ts.get_k_data(stock_selected, start='2016-01-01')
datastr = ''
for idx in df.index:
rowstr = '[\'%s\',%s,%s,%s,%s]' % (df.ix[idx]['date'], df.ix[idx]['open'],
df.ix[idx]['close'], df.ix[idx]['low'],
df.ix[idx]['high'])
datastr += rowstr + ','
datastr = datastr[:-1]
#取股票名称
name = ts.get_realtime_quotes(stock_selected)['name'][0]
datahead =
<div id="chart" style="width:800px; height:600px;"></div>
<script>
require.config({ paths:{ echarts: '//cdn.bootcss.com/echarts/3.2.3/echarts.min', } });
require(['echarts'],function(ec){
var myChart = ec.init(document.getElementById('chart'));
datavar = 'var data0 = splitData([%s]);' % datastr
funcstr =
function splitData(rawData) {
var categoryData = [];
var values = []
for (var i = 0; i < rawData.length; i++) {
categoryData.push(rawData[i].splice(0, 1)[0]);
values.push(rawData[i])
}
return {
categoryData: categoryData,
values: values
};
}
function calculateMA(dayCount) {
var result = [];
for (var i = 0, len = data0.values.length; i < len; i++) {
if (i < dayCount) {
result.push('-');
continue;
}
var sum = 0;
for (var j = 0; j < dayCount; j++) {
sum += data0.values[i - j][1];
}
result.push((sum / dayCount).toFixed(2));
}
return result;
}
option = {
title: {
namestr = 'text: \'%s\',' %name
functail =
left: 0
},
tooltip: {
trigger: 'axis',
axisPointer: {
type: 'line'
}
},
legend: {
data: ['日K', 'MA5', 'MA10', 'MA20', 'MA30']
},
grid: {
left: '10%',
right: '10%',
bottom: '15%'
},
xAxis: {
type: 'category',
data: data0.categoryData,
scale: true,
boundaryGap : false,
axisLine: {onZero: false},
splitLine: {show: false},
splitNumber: 20,
min: 'dataMin',
max: 'dataMax'
},
yAxis: {
scale: true,
splitArea: {
show: true
}
},
dataZoom: [
{
type: 'inside',
start: 50,
end: 100
},
{
show: true,
type: 'slider',
y: '90%',
start: 50,
end: 100
}
],
series: [
{
name: '日K',
type: 'candlestick',
data: data0.values,
markPoint: {
label: {
normal: {
formatter: function (param) {
return param != null ? Math.round(param.value) : '';
}
}
},
data: [
{
name: '标点',
coord: ['2013/5/31', 2300],
value: 2300,
itemStyle: {
normal: {color: 'rgb(41,60,85)'}
}
},
{
name: 'highest value',
type: 'max',
valueDim: 'highest'
},
{
name: 'lowest value',
type: 'min',
valueDim: 'lowest'
},
{
name: 'average value on close',
type: 'average',
valueDim: 'close'
}
],
tooltip: {
formatter: function (param) {
return param.name + '<br>' + (param.data.coord || '');
}
}
},
markLine: {
symbol: ['none', 'none'],
data: [
[
{
name: 'from lowest to highest',
type: 'min',
valueDim: 'lowest',
symbol: 'circle',
symbolSize: 10,
label: {
normal: {show: false},
emphasis: {show: false}
}
},
{
type: 'max',
valueDim: 'highest',
symbol: 'circle',
symbolSize: 10,
label: {
normal: {show: false},
emphasis: {show: false}
}
}
],
{
name: 'min line on close',
type: 'min',
valueDim: 'close'
},
{
name: 'max line on close',
type: 'max',
valueDim: 'close'
}
]
}
},
{
name: 'MA5',
type: 'line',
data: calculateMA(5),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA10',
type: 'line',
data: calculateMA(10),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA20',
type: 'line',
data: calculateMA(20),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
{
name: 'MA30',
type: 'line',
data: calculateMA(30),
smooth: true,
lineStyle: {
normal: {opacity: 0.5}
}
},
]
};
myChart.setOption(option);
});
</script>
HTML(datahead + datavar + funcstr + namestr + functail)
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
stock_selected='002281'
df = ts.get_k_data(stock_selected, start='2016-01-01')
df.info()
#df['close'].plot(grid=True)
#df['42d']= np.round(pd.rolling_mean(df['close'],window=42),2)
#df['252d']= np.round(pd.rolling_mean(df['close'],window=252),2)
df['42d']= np.round(pd.Series.rolling(df['close'],window=42).mean(),2)
df['252d']= np.round(pd.Series.rolling(df['close'],window=252).mean(),2)
#df[['close','42d','252d']].tail(10)
df[['close','42d','252d']].plot(grid=True)
df['42-252']=df['42d']-df['252d']
#df['42-252'].tail(10)
SD=1
df['regime'] = np.where(df['42-252']>SD,1,0)
df['regime'] = np.where(df['42-252'] < -SD,-1,df['regime'])
#df['regime'].head(10)
df['regime'].tail(10)
#df['regime'].plot(lw=1.5)
#plt.ylim(-1.1, 1.1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2、Top 10 share holder
Step2: 获取沪深上市公司基本情况。属性包括:
Step3: 业绩报告(主表)
Step4: 盈利能力
Step5: 营运能力
Step6: 成长能力
Step7: 偿债能力
Step11: 3、CandleStick
|
14,070
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
FIELDS = {
'auth_read': 'user', # Credentials used for reading data.
'dataset': '',
'query': 'SELECT * FROM `Dataset.Table`;',
'legacy': False,
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'lineitem': {
'auth': 'user',
'write': {
'dry_run': False,
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 1,'default': ''}},
'query': {'field': {'name': 'query','kind': 'string','order': 2,'default': 'SELECT * FROM `Dataset.Table`;'}},
'legacy': {'field': {'name': 'legacy','kind': 'boolean','order': 3,'default': False}}
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Get Cloud Project ID
Step2: 3. Get Client Credentials
Step3: 4. Enter Line Item From BigQuery Parameters
Step4: 5. Execute Line Item From BigQuery
|
14,071
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
radian = UNITS.radian
m = UNITS.meter
s = UNITS.second
kg = UNITS.kilogram
N = UNITS.newton
params = Params(Rmin = 0.02 * m,
Rmax = 0.055 * m,
Mcore = 15e-3 * kg,
Mroll = 215e-3 * kg,
L = 47 * m,
tension = 2e-4 * N,
t_end = 120 * s)
def make_system(params):
Make a system object.
params: Params with Rmin, Rmax, Mcore, Mroll,
L, tension, and t_end
returns: System with init, k, rho_h, Rmin, Rmax,
Mcore, Mroll, ts
L, Rmax, Rmin = params.L, params.Rmax, params.Rmin
Mroll = params.Mroll
init = State(theta = 0 * radian,
omega = 0 * radian/s,
y = L)
area = pi * (Rmax**2 - Rmin**2)
rho_h = Mroll / area
k = (Rmax**2 - Rmin**2) / 2 / L / radian
return System(params, init=init, area=area, rho_h=rho_h, k=k)
system = make_system(params)
system.init
def moment_of_inertia(r, system):
Moment of inertia for a roll of toilet paper.
r: current radius of roll in meters
system: System object with Mcore, rho, Rmin, Rmax
returns: moment of inertia in kg m**2
Mcore, Rmin, rho_h = system.Mcore, system.Rmin, system.rho_h
Icore = Mcore * Rmin**2
Iroll = pi * rho_h / 2 * (r**4 - Rmin**4)
return Icore + Iroll
moment_of_inertia(system.Rmin, system)
moment_of_inertia(system.Rmax, system)
# Solution
def slope_func(state, t, system):
Computes the derivatives of the state variables.
state: State object with theta, omega, y
t: time
system: System object with Rmin, k, Mcore, rho_h, tension
returns: sequence of derivatives
theta, omega, y = state
k, Rmin, tension = system.k, system.Rmin, system.tension
r = sqrt(2*k*y + Rmin**2)
I = moment_of_inertia(r, system)
tau = r * tension
alpha = tau / I
dydt = -r * omega
return omega, alpha, dydt
# Solution
slope_func(system.init, 0*s, system)
# Solution
results, details = run_ode_solver(system, slope_func)
details
results.tail()
def plot_theta(results):
plot(results.theta, color='C0', label='theta')
decorate(xlabel='Time (s)',
ylabel='Angle (rad)')
plot_theta(results)
def plot_omega(results):
plot(results.omega, color='C2', label='omega')
decorate(xlabel='Time (s)',
ylabel='Angular velocity (rad/s)')
plot_omega(results)
def plot_y(results):
plot(results.y, color='C1', label='y')
decorate(xlabel='Time (s)',
ylabel='Length (m)')
plot_y(results)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unrolling
Step2: And a few more parameters in the Params object.
Step4: make_system computes rho_h, which we'll need to compute moment of inertia, and k, which we'll use to compute r.
Step5: Testing make_system
Step7: Here's how we compute I as a function of r
Step8: When r is Rmin, I is small.
Step9: As r increases, so does I.
Step11: Exercises
Step12: Test slope_func with the initial conditions.
Step13: Run the simulation.
Step14: And look at the results.
Step15: Check the results to see if they seem plausible
Step16: Plot omega
Step17: Plot y
|
14,072
|
<ASSISTANT_TASK:>
Python Code:
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity(明智的) check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)#10
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)#得到所有标签为y的x对应的index
idxs = np.random.choice(idxs, samples_per_class, replace=False)#从这里面随机选取7个 samples
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)#7行 每一列表示一个class
plt.imshow(X_train[idx].astype('uint8'))#如果不使用uint8有时候会莫名其妙出问题
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print X_train.shape, X_test.shape
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop(无操作):
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print dists.shape
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Let's compare how fast the implementations are
def time_function(f, *args):
Call a function f with args and return the time (in seconds) that it took to execute.
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print 'Two loop version took %f seconds' % two_loop_time
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print 'One loop version took %f seconds' % one_loop_time
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print 'No loop version took %f seconds' % no_loop_time
# you should see significantly faster performance with the fully vectorized implementation
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
# X_train_folds.extend(np.array_split(X_train, num_folds))
# y_train_folds.extend(np.array_split(y_train, num_folds))
y_train_ = y_train.reshape(-1, 1)
X_train_folds , y_train_folds = np.array_split(X_train, 5), np.array_split(y_train_, 5)
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
# import copy
# for k in k_choices:
# accuracies = []
# for test_set_index in range(num_folds):
# X_test_tmp = np.array(X_train_folds[test_set_index])
# y_test_tmp = np.array(y_train_folds[test_set_index])
# X_train_tmp = copy.deepcopy(X_train_folds)
# y_train_tmp = copy.deepcopy(y_train_folds)
# X_train_tmp.pop(test_set_index)
# y_train_tmp.pop(test_set_index)
# X_train_tmp = np.array(X_train_tmp).reshape((-1,3072))
# y_train_tmp = np.array(y_train_tmp).reshape((X_train_tmp.shape[0],))
# classifier.train(X_train_tmp, y_train_tmp)
# y_pred_test_tmp = classifier.predict(X_test_tmp, k)
# accuracy = np.sum(y_test_tmp == y_pred_test_tmp) / float(X_test_tmp.shape[0])
# accuracies.append(accuracy)
# k_to_accuracies[k] = accuracies
for k_ in k_choices:
k_to_accuracies.setdefault(k_, [])
for i in range(num_folds):
classifier = KNearestNeighbor()
X_val_train = np.vstack(X_train_folds[0:i] + X_train_folds[i+1:])
y_val_train = np.vstack(y_train_folds[0:i] + y_train_folds[i+1:])
y_val_train = y_val_train[:,0]
classifier.train(X_val_train, y_val_train)
for k_ in k_choices:
y_val_pred = classifier.predict(X_train_folds[i], k=k_)
num_correct = np.sum(y_val_pred == y_train_folds[i][:,0])
accuracy = float(num_correct) / len(y_val_pred)
k_to_accuracies[k_] = k_to_accuracies[k_] + [accuracy]
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print 'k = %d, accuracy = %f' % (k, accuracy)
print X_train.shape
print X_test.shape
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
#k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps
Step2: Inline Question #1
Step3: You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5
Step5: You should expect to see a slightly better performance than with k = 1.
Step6: Cross-validation
|
14,073
|
<ASSISTANT_TASK:>
Python Code:
st = 'Print only the words that start with s in this sentence'
#Code here
#Code Here
#Code in this cell
[]
st = 'Print every word in this sentence that has an even number of letters'
#Code in this cell
#Code in this cell
st = 'Create a list of the first letters of every word in this string'
#Code in this cell
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use range() to print all the even numbers from 0 to 10.
Step2: Use List comprehension to create a list of all numbers between 1 and 50 that are divisble by 3.
Step3: Go through the string below and if the length of a word is even print "even!"
Step4: Write a program that prints the integers from 1 to 100. But for multiples of three print "Fizz" instead of the number, and for the multiples of five print "Buzz". For numbers which are multiples of both three and five print "FizzBuzz".
Step5: Use List Comprehension to create a list of the first letters of every word in the string below
|
14,074
|
<ASSISTANT_TASK:>
Python Code:
# standard library
import numpy as np
# Parametrization
num_agents = 1000
num_covars = 3
betas_true = np.array([0.22, 0.30, -0.1]).T
sd_true = 0.01
# Sampling of observables
np.random.seed(123)
X = np.random.rand(num_agents, num_covars)
X[:,0] = 1
# Sampling disturbances
eps = np.random.normal(loc=0.0, scale=sd_true, size=num_agents)
# Create endogenous outcome
idx_true = np.dot(X, betas_true)
Y = idx_true + eps
# Checks
assert (X.dtype == 'float')
assert (Y.dtype == 'float')
assert (np.all(np.isfinite(X)))
assert (np.all(np.isfinite(Y)))
assert (X.shape == (num_agents, num_covars))
assert (Y.shape == (num_agents, ))
assert (np.all(X[:,0] == 1.0))
%pylab inline
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylabel(r'$Y$'), ax.set_xlabel(r'$ X\beta $')
ax.plot(idx_true, Y, 'o')
# Let us get the estimates.
beta_hat = np.dot(np.dot(np.linalg.inv(np.dot(X.T,X)), X.T), Y)
sd_hat = np.sqrt(np.var(Y - np.dot(X, beta_hat)))
# Let us have a look now.
print('Results for beta', beta_hat, ' Results for sd', sd_hat)
# standard library
from scipy.optimize import minimize
from scipy.stats import norm
# Auxiliary functions.
def sample_likelihood(paras, X, Y):
''' Construct sample likelihood.
'''
# Antibugging.
assert (isinstance(paras, np.ndarray))
assert (paras.dtype == 'float')
assert (X.ndim == 2), (Y.ndim == 2)
# Auxiliary objects.
num_agents = Y.shape[0]
# Summing over the sample.
contribs = 0.0
for i in range(num_agents):
contrib = individual_likelihood(paras, X[i,:], Y[i])
contribs += contrib
# Modifications.
contribs = np.mean(contribs)
# Finishing.
return contribs
def individual_likelihood(paras, x, y):
''' This function determines the an individual's contribution to the sample likelihood.
'''
# Antibugging.
assert (isinstance(paras, np.ndarray))
assert (paras.dtype == 'float')
assert (x.ndim == 1), (y.ndim == 1)
# Distribute parameters.
betas, sd = paras[:-1], paras[-1]
# Calculate likelihood contribution.
resid = (y - np.dot(x, betas))/sd
contrib = (1.0/sd)*norm.pdf(resid)
# Modifications.
contrib = np.clip(contrib, 1e-20, np.inf)
contrib = -np.log(contrib)
# Finishing.
return contrib
''' Main calculations.
'''
# Construct parameters.
paras = np.concatenate((betas_true, [sd_true]))
# Single individual.
individual_likelihood(paras, X[1,:], Y[1])
# Full sample.
sample_likelihood(paras, X, Y)
# Optimization.
x0 = paras
#x0 = [0.0, 0.0, 0.0, 1.0]
for optimizer in ['BFGS', 'Nelder-Mead']:
rslt = minimize(sample_likelihood, x0, args=(X, Y), method=optimizer)
import urllib; from IPython.core.display import HTML
HTML(urllib.urlopen('http://bit.ly/1Ki3iXw').read())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us have a look at the relationship.
Step2: Estimation using Linear Algebra Tools
Step3: Estimation using Optimization Tools
Step4: Formatting
|
14,075
|
<ASSISTANT_TASK:>
Python Code:
# to make sure things are working, run this
import pandas as pd
print('Pandas version: ', pd.__version__)
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
%matplotlib inline
url = 'http://pages.stern.nyu.edu/~dbackus/Data/beer_production_1947-2004.xlsx'
beer = pd.read_excel(url, skiprows=12, index_col=0)
print('Dimensions:', beer.shape)
beer[list(range(1,11))].head(3)
vars = list(range(1,101)) # extract top 100 firms
pdf = beer[vars].T # transpose (flip rows and columns)
pdf[[1947, 1967, 1987, 2004]].head()
# a basic plot
fig, ax = plt.subplots()
pdf[1947].plot(ax=ax, logy=True)
pdf[1967].plot(ax=ax, logy=True)
pdf[1987].plot(ax=ax, logy=True)
pdf[2004].plot(ax=ax, logy=True)
ax.legend()
# for help
ax.set_title?
# this is easier if we put the basic plot in a function
def make_plot():
fig, ax = plt.subplots()
pdf[1947].plot(ax=ax, logy=True)
pdf[1967].plot(ax=ax, logy=True)
pdf[1987].plot(ax=ax, logy=True)
pdf[2004].plot(ax=ax, logy=True)
ax.legend()
return ax
ax = make_plot()
ax.set_title('Beer sales by industry rank', fontsize=14)
# line width: put lw=2 in each of the plot statements
ax = make_plot()
ax.set_xlabel('Industry Rank')
ax.set_ylabel('Sales (log scale)')
# log scale: otherwise the differences are too large
# we can't show the alternative because some of the numbers are zero
# color: we add color='somecolor' in each of the plot statements
# data input (takes about 20 seconds on a wireless network)
url1 = 'http://esa.un.org/unpd/wpp/DVD/Files/'
url2 = '1_Indicators%20(Standard)/EXCEL_FILES/1_Population/'
url3 = 'WPP2017_POP_F07_1_POPULATION_BY_AGE_BOTH_SEXES.XLSX'
url = url1 + url2 + url3
cols = [2, 4, 5] + list(range(6,28))
prj = pd.read_excel(url, sheetname=1, skiprows=16, parse_cols=cols, na_values=['…'])
print('Dimensions: ', prj.shape)
print('Column labels: ', prj.columns)
# rename some variables
pop = prj
pop = pop.rename(columns={'Reference date (as of 1 July)': 'Year',
'Region, subregion, country or area *': 'Country',
'Country code': 'Code'})
# select countries and years
countries = ['Japan']
years = [2015, 2035, 2055, 2075, 2095]
pop = pop[pop['Country'].isin(countries) & pop['Year'].isin(years)]
pop = pop.drop(['Country', 'Code'], axis=1)
pop = pop.set_index('Year').T
pop = pop/1000 # convert population from thousands to millions
pop.head()
pop.tail()
pop[[2015]].plot()
pop[[2015]].plot(kind='bar')
# my fav
pop[[2015]].plot(kind='barh')
fig, ax = plt.subplots(figsize=(10,6))
pop.plot(ax=ax)
ax.set_title('Population by age')
ax.set_xlabel('Population (millions)')
ax.set_ylabel('Age Range')
pop.plot(kind='bar', subplots=True, figsize=(6,8), sharey=True)
# data input (takes about 20 seconds on a wireless network)
url = 'http://pages.stern.nyu.edu/~dbackus/Data/feds200628.csv'
gsw = pd.read_csv(url, skiprows=9, index_col=0, usecols=list(range(11)), parse_dates=True)
print('Dimensions: ', gsw.shape)
print('Column labels: ', gsw.columns)
print('Row labels: ', gsw.index)
# grab recent data
df = gsw[gsw.index >= dt.datetime(2010,1,1)]
# convert to annual, last day of year
df = df.resample('A', how='last').sort_index()
df.head()
df.columns = list(range(1,11))
ylds = df.T
ylds.head(3)
fig, ax = plt.subplots()
ylds.plot(ax=ax)
ax.set_title('US Treasury Yields')
ax.set_ylabel('Yield')
ax.set_xlabel('Maturity in Years')
ybar = ylds.mean(axis=1)
ybar.plot(ax=ax, color='black', linewidth=3, linestyle='dashed')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you get something like "Pandas version
Step2: Remind yourself
Step3: Question. Can you see consolidation here?
Step4: Answer these questions below. Code is sufficient, but it's often helpful to add comments to remind yourself what you did, and why.
Step5: Question 4. Japan's aging population
Step6: Comment. Now we have the number of people in any five-year age group running down columns. The column labels are the years.
Step7: Question 5. Dynamics of the yield curve
Step8: With the dataframe ylds
|
14,076
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df0 = pd.read_csv("../../data/interim/001_normalised_keyed_reviews.csv", sep="\t", low_memory=False)
df0.head()
# For monitoring duration of pandas processes
from tqdm import tqdm, tqdm_pandas
# To avoid RuntimeError: Set changed size during iteration
tqdm.monitor_interval = 0
# Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
# (can use `tqdm_gui`, `tqdm_notebook`, optional kwargs, etc.)
tqdm.pandas(desc="Progress:")
# Now you can use `progress_apply` instead of `apply`
# and `progress_map` instead of `map`
# can also groupby:
# df.groupby(0).progress_apply(lambda x: x**2)
def convert_text_to_list(review):
return review.replace("[","").replace("]","").replace("'","").split(",")
# Convert "reviewText" field to back to list
df0['reviewText'] = df0['reviewText'].astype(str)
df0['reviewText'] = df0['reviewText'].progress_apply(lambda text: convert_text_to_list(text));
df0['reviewText'].head()
df0['reviewText'][12]
import nltk
nltk.__version__
# Split negs
def split_neg(review):
new_review = []
for token in review:
if '_' in token:
split_words = token.split("_")
new_review.append(split_words[0])
new_review.append(split_words[1])
else:
new_review.append(token)
return new_review
df0["reviewText"] = df0["reviewText"].progress_apply(lambda review: split_neg(review))
df0["reviewText"].head()
### Remove Stop Words
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
def remove_stopwords(review):
return [token for token in review if not token in stop_words]
df0["reviewText"] = df0["reviewText"].progress_apply(lambda review: remove_stopwords(review))
df0["reviewText"].head()
from nltk.tag import StanfordPOSTagger
from nltk import word_tokenize
# import os
# os.getcwd()
# Add the jar and model via their path (instead of setting environment variables):
jar = '../../models/stanford-postagger-full-2017-06-09/stanford-postagger.jar'
model = '../../models/stanford-postagger-full-2017-06-09/models/english-left3words-distsim.tagger'
pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')
def pos_tag(review):
if(len(review)>0):
return pos_tagger.tag(review)
# Example
text = pos_tagger.tag(word_tokenize("What's the airspeed of an unladen swallow ?"))
print(text)
tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: pos_tag(review)))
tagged_df.head()
# tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: nltk.pos_tag(review)))
# tagged_df.head()
tagged_df['reviewText'][8]
## Join with Original Key and Persist Locally to avoid RE-processing
uniqueKey_series_df = df0[['uniqueKey']]
uniqueKey_series_df.head()
pos_tagged_keyed_reviews = pd.concat([uniqueKey_series_df, tagged_df], axis=1);
pos_tagged_keyed_reviews.head()
pos_tagged_keyed_reviews.to_csv("../data/interim/002_pos_tagged_keyed_reviews.csv", sep='\t', header=True, index=False);
def noun_collector(word_tag_list):
if(len(word_tag_list)>0):
return [word for (word, tag) in word_tag_list if tag in {'NN', 'NNS', 'NNP', 'NNPS'}]
nouns_df = pd.DataFrame(tagged_df['reviewText'].progress_apply(lambda review: noun_collector(review)))
nouns_df.head()
keyed_nouns_df = pd.concat([uniqueKey_series_df, nouns_df], axis=1);
keyed_nouns_df.head()
keyed_nouns_df.to_csv("../../data/interim/002_keyed_nouns_stanford.csv", sep='\t', header=True, index=False);
## END_OF_FILE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span style="color
Step2: Thankfully, nltk provides documentation for each tag, which can be queried using the tag, e.g., nltk.help.upenn_tagset(‘RB’), or a regular expression. nltk also provides batch pos-tagging method for document pos-tagging
Step3: The list of all possible tags appears below
Step4: Nouns
|
14,077
|
<ASSISTANT_TASK:>
Python Code::
import cv2
import numpy as np
dog_cascade = cv2.CascadeClassifier('dog_face_haar_cascade.xml')
dog_face = dog_cascade.detectMultiScale(image)
for (x, y, w, h) in dog_face:
start_point, end_point = (x, y), (x+ w, y+h)
cv2.rectangle(image, pt1= start_point, pt2 = end_point, color = (0, 255, 0), thickness = 2)
cv2.imshow('img', image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
14,078
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image, ImageDraw
import math, colorsys, numpy
from matplotlib import colors
from IPython.display import Image as ipythonImage
ipythonImage(filename = "images/named_colors.png")
color_list=('black',
'darkslategray',
'darkgreen',
'green',
'forestgreen',
'darkseagreen',
'limegreen',
'lime',
'palegreen',
'white')
palette = []
palette.append( colors.hex2color(colors.cnames[color_list[0]]) )
palette.append( colors.hex2color(colors.cnames[color_list[1]]) )
palette.append( colors.hex2color(colors.cnames[color_list[2]]) )
palette.append( colors.hex2color(colors.cnames[color_list[3]]) )
palette.append( colors.hex2color(colors.cnames[color_list[4]]) )
palette.append( colors.hex2color(colors.cnames[color_list[5]]) )
palette.append( colors.hex2color(colors.cnames[color_list[6]]) )
palette.append( colors.hex2color(colors.cnames[color_list[7]]) )
palette.append( colors.hex2color(colors.cnames[color_list[8]]) )
palette.append( colors.hex2color(colors.cnames[color_list[9]]) )
cutoff = 2.0
def iterate_series(c):
z_n = complex(0,0)
for n in range(0,100):
z_n = z_n*z_n + c
if abs(z_n) > cutoff:
return n
return -1
iterate_series(1)
iterate_series(0)
iterate_series(-1)
x_max = 800
y_max = 800
img = Image.new("RGB",(x_max,y_max))
d = ImageDraw.Draw(img)
for x in range(x_max):
for y in range(y_max):
#This determines the centering of our image
offset=(2.2,1.5)
#The value of c is determined by scaling the pixel location and offsetting it.
c = complex(x*3.0/x_max-offset[0], y*3.0/y_max-offset[1])
#Now we call our function from before
n = iterate_series(c)
#Checks if c is in the Mandelbrot Set
if n == -1:
v=1
#If not, it checks when it diverged
else:
v=n/100.0
#Determines the colors in our image based on our the previous check
color_index = int(v * (len(palette)-1))
rgb = palette[color_index]
red = int(rgb[0]*255)
green = int(rgb[1]*255)
blue = int(rgb[2]*255)
d.point((x,y),fill = (red,green,blue))
img.save("fractal.png")
ipythonImage(filename='fractal.png')
def func_z_n(c, z_n):
#return z_n*z_n +c
return numpy.power(z_n,2) + c
cutoff = 2
def iterate_series2(c, z_n = -2.0**.5):
for n in range(0,100):
z_n = func_z_n(c, z_n)
if abs(z_n) > cutoff:
return n
return -1
c_julia = complex(-0.4, 0.6)
for x in range(x_max):
for y in range(y_max):
offset=(1.5, 1.5)
z = complex(x*3.0/x_max-offset[0], y*3.0/y_max-offset[1])
n = iterate_series2(c_julia, z)
if n == -1:
v=1
else:
v=n/100.0
color_index = int(v * (len(palette)-1))
rgb = palette[color_index]
red = int(rgb[0]*255)
green = int(rgb[1]*255)
blue = int(rgb[2]*255)
d.point((x,y),fill = (red,green,blue))
#If you want to play with the colors another way, uncomment this and run the color pallet cell bellow.
#Don't forget to comment out the line above first
#d.point((x, y), fill = palette[int(v * (colors_max-1))])
name = "julia"
img.save(name+".png")
ipythonImage(filename = name+".png")
#The max number of colors it can handle
#colors_max = 50
colors_max = 500
#Coefficients for tweaking the percentage of each color we use
r1, g1, b1 = 0.66, 1.0, 0.0 # Between 0.0 and 1.0
r2, g2, b2 = 1.0, -2.0, 2.0 # Must be greater than 1.0 or less than -1.0
r3, g3, b3 = 0.6, 0.8, 0.1 # Between 0.0 and 1.0
# Calculate a tolerable palette
palette = [0] * colors_max
for i in range(colors_max):
f = 1-abs((float(i)/colors_max-1)**15)
#r, g, b = colorsys.hsv_to_rgb(.66+f/3, 1-f/2, f)
r, g, b = colorsys.hsv_to_rgb(r1+f/r2, g1+f/g2, b1+f/b2)
#palette[i] = (int(r*255), int(g*255), int(b*255))
palette[i] = (int((r-r3)*255), int((g-g3)*255), int((b-b3)*255))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This sets up the colors we want in our fractal image.
Step2: 2. Generating the Mandelbrot Set
Step3: Let's test our function
Step4: 3. Generate a Fractal Image
Step5: We are going to loop over all the pixels in our image and check if that pixel is in the Mandelbrot Set. We are using the $x$ and $y$ coordinates to represent the Real and Imaginary parts of the Complex number $z$.
Step6: Now we save our image and display it.
Step7: 4. The Julia Set
Step8: Now we open up the value of c to be defined by us and let the pixel location relate to the value of $z_{n}$
Step9: By changing the name here, you can save multiple files without having to modify too much code
Step10: Useful numpy Functions
|
14,079
|
<ASSISTANT_TASK:>
Python Code:
import os
def dir_structure (path=None, decorated=False):
read out the full recursive directory structure of `path` and return it as a tuple
path - the path where to start the walk (default: `.`)
decorated - if True, the actual directory is returned as well as the index
RETURNS
(dirs, files, tree)
dirs - tuple of dirs, in order traversed by walk
files - tuple of tuples (filename, dir_ix) where dir_ix is index in dirs
tree - tuple of tuples (dir_ix1, dir_ix2, ...) where dir_ix1... are subdirs
if path == None: path = "."
path = path.rstrip("/")
dirs_and_files = list(os.walk(path))
dirs = []
files = []
for df in dirs_and_files:
dir_ix = len(dirs)
dirs.append( df[0] )
for f in df[2]:
files.append( (f, dir_ix) )
tree = []
for df in dirs_and_files:
parent_dir = df[0]
parent_dir_ix = dirs.index(parent_dir)
tree.append(tuple((dirs.index(parent_dir+"/"+dir)) for dir in df[1]))
if decorated:
files = ( (x[0], x[1], dirs[x[1]]) for x in files)
return tuple((
tuple(dirs),
tuple(files),
tuple(tree)))
"123/".rstrip("/")
dd = dir_structure (decorated=True)
##dd
1
import os
import pandas as pd
import functools
import re
import types
class DirStructure ():
read out the full recursive directory structure of `path` into the object
path - the pathname given to os.walk(); it can also be a tuple `(dirs, files, tree)` in which
case this tuple is used to initialise the object
PROPERTIES
dirs - tuple of dirs, in order traversed by walk
files - tuple of tuples (filename, dir_ix) where dir_ix is index in dirs
tree - tuple of tuples (dir_ix1, dir_ix2, ...) where dir_ix1... are subdirs
dirs_t, files_t - the corresponding pandas dataframe tables
METHODS
subdirs - get all subdirs
files_bydir - get all files in set of dirs
files_decorate - add the full directory names to a files table
files_byre - filter files table by regular expression
add_col - adds a column to the files_t table (values either explicit or as function)
re_func - factory function for use in relation to add_col
DEPENDENCIES
os
pandas
functools
re
types
VERSION AND COPYRIGHT
version 0.1a
(c) 2014 Stefan LOESCH / oditorium
__version__ = "0.1a"
def __init__(self, path=None):
if type(path) == tuple:
self.dirs = tuple(dirs)
self.files = tuple(files)
self.tree = tuple(tree)
self.dirs_t = pd.DataFrame(list(self.dirs), columns = ['dir'])
self.files_t = pd.DataFrame(list(self.files), columns = ['file', 'dir'])
return
if path == None: path = "."
path = path.rstrip("/")
dirs_and_files = list(os.walk(path))
dirs = []
files = []
for df in dirs_and_files:
dir_ix = len(dirs)
dirs.append( df[0] )
for f in df[2]:
files.append( (f, dir_ix) )
tree = []
for df in dirs_and_files:
parent_dir = df[0]
parent_dir_ix = dirs.index(parent_dir)
#print (parent_dir)
#print (parent_dir_ix)
#print ([(dirs.index(parent_dir+"/"+dir)) for dir in df[1]])
tree.append(tuple((dirs.index(parent_dir+"/"+dir)) for dir in df[1]))
self.dirs = tuple(dirs)
self.files = tuple(files)
self.tree = tuple(tree)
self.dirs_t = pd.DataFrame(list(self.dirs), columns = ['dir'])
self.files_t = pd.DataFrame(list(self.files), columns = ['file', 'dir'])
def files_bydir (self, dirs=None, files_t=None):
filters files_t with respect to all directories in dirs
dirs - iterable of directory indices
files_t - a pandas table with column `dir` (default: self.files_t)
if type(files_t) == type(None): files_t = self.files_t
if dirs==None: dirs = tuple(0);
if type(dirs) == int: dirs = tuple((dirs,))
the_filter = list(map(any,(zip(*list(list(files_t['dir'] == ix) for ix in dirs)))))
# this expression is a bit complicated; it filters the equiv of
# ds.files_t['dir'] in dir
# - generate a list of filter arrays
# - zip them together (the double list command are to convert pandas structures to list in the proper format)
# - map `any()` to each of those zipped elements and unpack the map into a list
return files_t[the_filter]
def _subdirs (self, dir_ix):
returns list of all subdirectories (private)
use subdirs() to access this function
sd = list(self.tree[dir_ix])
sd1 = [self._subdirs(ix) for ix in sd]
sd1 = functools.reduce(lambda x,y: x+y, sd1, [])
if sd1 != [[]]: sd = sd+sd1
return sd
def subdirs(self, dir_ix, as_str=False, get_files=False):
get all subdirs of the given directory (as index or name), or the files therein
dir_ix - the directory index of the root
as_str - if True, directory indices are expanded into names
get_files - it True, return list of files rather than list if subdirectories
sd = self._subdirs(dir_ix)
if get_files == True:
thefiles = self.files_bydir([dir_ix] + sd)
return thefiles
if as_str == True:
sd = [self.dirs[ix] for ix in sd]
return sd
def files_decorate(self, files_t=None):
add a column `dirn` to a files table (based on column `dir`)
files_t - must have a column dir containing numerical values
if type(files_t) == type(None): files_t = self.files_t
files_t['dirn'] = list(self.dirs[ix] for ix in list(files_t['dir']))
return files_t
def files_byre(self, regex, files_t=None):
filter the files table by regex
files_t - must have a column `file` containing the filename
if type(files_t) == type(None): files_t = self.files_t
the_filter = list(map (lambda fn: type(re.match(regex, fn)) != type(None), list(files_t['file'])))
return files_t[the_filter]
def add_col(self, heading, values):
adds a columns to the files_t table (note: the files property is _not_ kept in synch
heading - the col heading
values - the col values (must the the right number of entries; can also be a function f(filename))*
* this might change in the future to
if type(values) != types.FunctionType:
self.files_t[heading] = values
return self.files_t
fnames = self.files_t['file']
val1 = map(values, fnames)
self.files_t[heading] = list(val1)
return self.files_t
def re_func(self, regex, none_val=None):
factory function: returns a function that evaluates regex on its argument an returns first group or none_val
EXAMPLE
f = re_func("(.*)\.jpg$", "0")
f("test.jpg") -> "test"
f("test.JPG") -> "0"
def f(s):
m = re.match(regex, s)
if m == None: return none_val
return m.groups()[0]
return f
ds = DirStructure('./delme')
ds.dirs
ds.files
ds.tree
ds.dirs_t
ds.files_t
ds.add_col('f2', list(ds.files_t['file']))
ds.add_col('f3', lambda f: "prefix-"+f)
def ab(fn):
m = re.match("[^ab]*(a|b)[^ab]*", fn)
if m == None: return "0"
return m.groups()[0]
ds.add_col('ab', ab)
f = ds.re_func("[^ab]*(a|b)[^ab]*", "-")
f("xxxx")
f("xxaxx")
ds.add_col('ab2', ds.re_func("[^ab]*(a|b)[^ab]*", "-"))
ds.files_bydir([0,1,3])
ds.subdirs(1, as_str = False)
ds.subdirs(1, as_str = True)
ds.subdirs(1, get_files = True)
ds.files_decorate(ds.files_bydir([0,1,3]))
ds.files_byre("f2.*\.jpg$")
set("2012_02 Weekend With Parents in Paris".lower().split()[1:]) - out
out = {"is", "a", "in", "with", "for", "incl", "for", "and", "the"}
def dates(s):
m = re.match("^([0-9]{8,8})_([0-9]{4,6}).*",s)
if m==None: return []
return m.groups()
dates("20121224_120000")
dates("aa_20121224_120000")
dates("20121224_120000_aa")
dates("20121224_1200")
dates("20121224_120")
list("abcdef")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step9: Walking the directory tree
Step10: read the directory structure
Step11: the properties
Step12: directories and files as pandas dataframes
Step13: the methods
|
14,080
|
<ASSISTANT_TASK:>
Python Code:
import itertools
import numpy as np
import pandas as pd
import seaborn as sb
import holoviews as hv
np.random.seed(9221999)
%reload_ext holoviews.ipython
%output holomap='widgets' fig='svg'
%%opts Distribution (hist=False kde_kws=dict(shade=True))
d1 = 25 * np.random.randn(500) + 450
d2 = 45 * np.random.randn(500) + 540
d3 = 55 * np.random.randn(500) + 590
hv.Distribution(d1, label='Blue') *\
hv.Distribution(d2, label='Red') *\
hv.Distribution(d3, label='Yellow')
%%opts Distribution (rug=True kde_kws={'color':'indianred','linestyle':'--'})
hv.Distribution(np.random.randn(10), kdims=['Activity'])
%%opts Bivariate.A (shade=True cmap='Blues') Bivariate.B (shade=True cmap='Reds') Bivariate.C (shade=True cmap='Greens')
hv.Bivariate(np.array([d1, d2]).T, group='A') +\
hv.Bivariate(np.array([d1, d3]).T, group='B') +\
hv.Bivariate(np.array([d2, d3]).T, group='C')
%%opts Bivariate [joint=True] (kind='kde' cmap='Blues')
hv.Bivariate(np.array([d1, d2]).T, group='A')
def sine_wave(n_x, obs_err_sd=1.5, tp_err_sd=.3, phase=0):
x = np.linspace(0+phase, (n_x - 1) / 2+phase, n_x)
y = np.sin(x) + np.random.normal(0, obs_err_sd) + np.random.normal(0, tp_err_sd, n_x)
return y
sine_stack = hv.HoloMap(kdims=['Observation error','Random error'])
cos_stack = hv.HoloMap(kdims=['Observation error', 'Random error'])
for oe, te in itertools.product(np.linspace(0.5,2,4), np.linspace(0.5,2,4)):
sines = np.array([sine_wave(31, oe, te) for _ in range(20)])
sine_stack[(oe, te)] = hv.TimeSeries(sines, label='Sine', group='Activity',
kdims=['Time', 'Observation'])
cosines = np.array([sine_wave(31, oe, te, phase=np.pi) for _ in range(20)])
cos_stack[(oe, te)] = hv.TimeSeries(cosines, group='Activity',label='Cosine',
kdims=['Time', 'Observation'])
%%opts TimeSeries [apply_databounds=True] (ci=95 color='indianred')
sine_stack
%%opts TimeSeries (err_style='ci_bars')
cos_stack.last
cos_stack.last * sine_stack.last
%%opts TimeSeries (err_style='unit_points')
sine_stack * cos_stack
iris = hv.DFrame(sb.load_dataset("iris"))
tips = hv.DFrame(sb.load_dataset("tips"))
titanic = hv.DFrame(sb.load_dataset("titanic"))
flights_data = sb.load_dataset('flights')
dimensions = {'month': hv.Dimension('Month', values=list(flights_data.month[0:12])),
'passengers': hv.Dimension('Passengers', type=int),
'year': hv.Dimension('Year', type=int)}
flights = hv.DFrame(flights_data, dimensions=dimensions)
%output fig='png' dpi=100 size=150
%%opts TimeSeries (err_style='unit_traces' err_palette='husl') HeatMap [xrotation=30 aspect=2]
flights.timeseries(['Year', 'Month'], 'Passengers', label='Airline', group='Passengers') +\
flights.heatmap(['Year', 'Month'], 'Passengers', label='Airline', group='Passengers')
%%opts Regression [apply_databounds=True]
tips.regression('total_bill', 'tip', mdims=['smoker','sex'],
extents=(0, 0, 50, 10), reduce_fn=np.mean).overlay('smoker').layout('sex')
%%opts DFrame (diag_kind='kde' kind='reg' hue='species')
iris.clone(label="Iris Data", plot_type='pairplot')
%%opts DFrame [show_grid=False]
iris.clone(x='species', y='sepal_width', plot_type='boxplot') + iris.clone(x='species', y='sepal_length', plot_type='violinplot')
%%opts DFrame (map=('barplot', 'alive', 'age') col='class' row='sex' hue='pclass' aspect=1.0)
titanic.clone(plot_type='facetgrid')
%%opts DFrame (map=('regplot', 'age', 'fare') col='class' hue='class')
titanic.clone(plot_type='facetgrid')
%%output holomap='widgets' size=200
titanic.clone(titanic.data.dropna(), plot_type='corrplot').holomap(['survived'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can now select static and animation backends
Step2: Visualizing Distributions of Data <a id='Histogram'></a>
Step3: Thanks to Seaborn you can choose to plot your distribution as histograms, kernel density estimates, or rug plots
Step4: We can also visualize the same data with Bivariate distributions
Step5: This plot type also has the option of enabling a joint plot with marginal distribution along each axis, and the kind option lets you control whether to visualize the distribution as a scatter, reg, resid, kde or hex plot
Step6: Bivariate plots also support overlaying and animations, so let's generate some two dimensional normally distributed data with varying mean and standard deviation.
Step7: Now we can create HoloMaps of sine and cosine curves with varying levels of observational and independent error.
Step8: First let's visualize the sine stack with a confidence interval
Step9: And the cosine stack with error bars
Step10: Since the %%opts cell magic has applied the style to each object individually, we can now overlay the two with different visualization styles in the same plot
Step11: Let's apply the databounds across the HoloMap again and visualize all the observations as unit points
Step12: Working with pandas DataFrames
Step13: By default the DFrame simply inherits the column names of the data frames and converts them into Dimensions. This works very well as a default, but if you wish to override it, you can either supply an explicit list of key dimensions to the DFrame object or a dimensions dictionary, which maps from the column name to the appropriate Dimension object. In this case, we define a Month Dimension, which defines the ordering of months
Step14: Flight passenger data
Step15: Tipping data <a id='Regression'/>
Step16: When you're dealing with higher dimensional data you can also work with pandas dataframes directly by displaying the DFrame Element directly. This allows you to perform all the standard HoloViews operations on more complex Seaborn and pandas plot types, as explained in the following sections.
Step17: When working with a DFrame object directly, you can select particular columns of your DFrame to visualize by supplying x and y parameters corresponding to the Dimensions or columns you want visualize. Here we'll visualize the sepal_width and sepal_length by species as a box plot and violin plot, respectively.
Step18: Titanic passenger data <a id='Correlation'></a>
Step19: FacetGrids support most Seaborn and matplotlib plot types
Step20: Finally, we can summarize our data using a correlation plot and split out Dimensions using the .holomap method, which groups by the specified dimension, giving you a frame for each value along that Dimension. Here we group by the survived Dimension (with 1 if the passenger survived and 0 otherwise), which thus provides a widget to allow us to compare those two values.
|
14,081
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 4
sample_id = 2
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
output = np.ndarray(shape=x.shape, dtype=float) # Do NOT save in X directly, wrong datatype *lessons learned*
for i in range(x.shape[0]):
output[i,:,:,:] = x[i,:,:,:] /float(255)
return output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
number_of_classes = 10
number_of_labels = len(x)
one_hot_encode = np.zeros((number_of_labels, number_of_classes), dtype=np.int)
for i in range(number_of_labels):
one_hot_encode[i, x[i]] = 1
return one_hot_encode
#Other solution?
#from sklearn import preprocessing
#labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
#label_encoder = preprocessing.LabelEncoder()
#label_binarizer = preprocessing.LabelBinarizer()
#encoded_labels = label_encoder.fit_transform(labels)
#label_binarizer.fit(encoded_labels)
#return(label_binarizer.transform(x))
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
x = tf.placeholder(tf.float32, shape=[None, image_shape[0], image_shape[1], image_shape[2]], name='x')
return x
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
y = tf.placeholder(tf.float32, shape = [None, n_classes], name='y')
return y
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# Var prep
datasize = x_tensor.get_shape().as_list()[0] # if I use it, test fails
channels = x_tensor.get_shape().as_list()[3]
conv_weights = tf.Variable(tf.truncated_normal([conv_ksize[0],
conv_ksize[1],
channels,
conv_num_outputs],
mean=0.0,
stddev=0.05,
dtype=tf.float32))
bias = tf.Variable(tf.zeros([conv_num_outputs]))
padding = 'SAME'
convu_strides2 = [1, conv_strides[0], conv_strides[1], 1] # batch, height, width, depth
pooling_strides = [1, pool_strides[0], pool_strides[1], 1]
# Layers
conv = tf.nn.conv2d(x_tensor, conv_weights, convu_strides2, padding)
conv = tf.nn.bias_add(conv, bias)
relu = tf.nn.relu(conv)
pooling = tf.nn.max_pool(relu, [1, pool_ksize[0], pool_ksize[1], 1], pooling_strides, padding)
return pooling
#DO NOT USE THIS IMPLEMENTATION, BUT FOR DEBUGGING OKAY ;)
#conv2 = tf.layers.conv2d(x_tensor,
# conv_num_outputs,
# conv_ksize,
# conv_strides,
# padding='SAME',
# data_format='channels_last',
# dilation_rate=(1, 1),
# activation=tf.nn.relu,
# use_bias=True,
# kernel_initializer=None,
# bias_initializer=tf.zeros_initializer(),
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# trainable=True,
# name=None,
# reuse=None)
#pooling2 = tf.layers.max_pooling2d(conv2,
# pool_ksize,
# pool_strides,
# padding='SAME',
# data_format='channels_last',
# name=None)
#return pooling2
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
shape = x_tensor.get_shape().as_list()
dim = np.prod(shape[1:])
return tf.reshape(x_tensor, [-1, dim])
#return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# Variables
datasize = x_tensor.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal([datasize, num_outputs], mean=0.0, stddev=0.05))
bias = tf.Variable(tf.zeros(num_outputs))
#Calc
fc = tf.add(tf.matmul(x_tensor, weights), bias)
return tf.nn.relu(fc)
#return tf.contrib.layers.fully_connected(x_tensor, num_outputs, activation_fn=tf.nn.relu)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# Variables
datasize = x_tensor.get_shape().as_list()[1]
weights = tf.Variable(tf.truncated_normal([datasize, num_outputs], mean=0.0, stddev=0.05))
bias = tf.Variable(tf.zeros(num_outputs))
#Calc
fc = tf.add(tf.matmul(x_tensor, weights), bias)
return fc
#return tf.contrib.layers.fully_connected(x_tensor, num_outputs, activation_fn=None)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
#Variables
pool_ksize = (2, 2)
pool_strides = (2, 2)
conv1_num_outputs = 32
conv1_ksize = (3, 3)
conv1_strides = (1, 1)
conv2_num_outputs = 64
conv2_ksize = (3, 3)
conv2_strides = (1, 1)
conv3_num_outputs = 40
conv3_ksize = (5, 5)
conv3_strides = (1, 1)
num_classes = 10
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv1 = conv2d_maxpool(x, conv1_num_outputs, conv1_ksize, conv1_strides, pool_ksize, pool_strides)
conv2 = conv2d_maxpool(conv1, conv2_num_outputs, conv2_ksize, conv2_strides, pool_ksize, pool_strides)
#conv3 = conv2d_maxpool(conv2, conv3_num_outputs, conv3_ksize, conv3_strides, pool_ksize, pool_strides)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flattened = flatten(conv2)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fc1 = fully_conn(flattened, 64)
fc1 = tf.nn.dropout(fc1, keep_prob)
fc2 = fully_conn(flattened, 32)
fc2 = tf.nn.dropout(fc1, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
output = fully_conn(fc2, num_classes)
# TODO: return output
return output
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(optimizer, feed_dict={x: feature_batch,
y: label_batch,
keep_prob: keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
loss = session.run(accuracy, feed_dict = {x: feature_batch,
y: label_batch,
keep_prob: 1.})
valid_acc = session.run(accuracy, feed_dict = {x: valid_features,
y: valid_labels,
keep_prob: 1.})
print('Loss: {}, Validation Accuracy: {}'.format(
loss,
valid_acc))
# TODO: Tune Parameters
epochs = 30
batch_size = 128
keep_probability = 0.75
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
14,082
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import scipy.linalg
class GaussianKernel():
Expect input as array of shape `(d,N)` of `N` samples in `d`-dimensional space.
def __init__(self, data, bandwidth=None):
self.data = np.asarray(data)
if len(self.data.shape) == 0:
raise ValueError("Cannot be a scalar")
if len(self.data.shape) == 1:
self.data = self.data[None,:]
self.space_dims, self.num_data_points = self.data.shape
if bandwidth is None:
bandwidth = self._scott()
self.covariance = np.atleast_2d(np.cov(data, rowvar=1, bias=False))
self.inv_cov = scipy.linalg.inv(self.covariance) / (bandwidth**2)
cdet = np.sqrt(scipy.linalg.det(self.covariance))
self.normalisation = cdet * (2 * np.pi * bandwidth * bandwidth) ** (self.space_dims/2)
self.normalisation *= self.num_data_points
def _scott(self):
return self.num_data_points ** (-1 / (4 + self.space_dims))
def __call__(self, t):
t = np.asarray(t)
if len(t.shape) == 0:
if self.space_dims != 1:
raise ValueError("Expect {} dimensional input".format(space_dims))
t = t[None]
if len(t.shape) == 1:
t = t[None,:]
x = self.data[:,:,None] - t[:,None,:]
x = np.sum(x * np.sum(self.inv_cov[:,:,None,None] * x[:,None,:,:], axis=0), axis=0)
return np.sum(np.exp(-x / 2), axis=0) / self.normalisation
data = np.random.random(size=(2,20))
k = GaussianKernel(data)
kernel = scipy.stats.kde.gaussian_kde(data, bw_method="scott")
x = np.random.random(size=(2,100))
np.testing.assert_allclose(kernel(x), k(x))
data = np.random.random(size=20)
k = GaussianKernel(data)
kernel = scipy.stats.kde.gaussian_kde(data, bw_method="scott")
x = np.random.random(size=100)
np.testing.assert_allclose(kernel(x), k(x))
data = np.random.random(size=(3,20))
k = GaussianKernel(data)
kernel = scipy.stats.kde.gaussian_kde(data, bw_method="scott")
x = np.random.random(size=(3,100))
np.testing.assert_allclose(kernel(x), k(x))
N = 20
mu0, mu1 = 1, 5
actual0 = [i for i in range(N) if np.random.random() < 0.5]
x = []
for i in range(N):
if i in actual0:
x.append(np.random.normal(loc=mu0))
else:
x.append(np.random.normal(loc=mu1))
x = np.asarray(x)
x.sort()
def make_p(x, mu0, mu1):
p0 = np.exp(-(x-mu0)**2/2)
p1 = np.exp(-(x-mu1)**2/2)
return np.vstack([p0 / (p0+p1), p1 / (p0+p1)])
def estimate_mu(x, p):
return np.sum(p[0] * x) / np.sum(p[0]), np.sum(p[1] * x) / np.sum(p[1])
hatmu0 = 0
hatmu1 = 10
for _ in range(20):
p = make_p(x, hatmu0, hatmu1)
hatmu0, hatmu1 = estimate_mu(x, p)
hatmu0, hatmu1
p = make_p(x, hatmu0, hatmu1)
class0 = p[0] < p[1]
fig, ax = plt.subplots(figsize=(10,5))
x0 = x[class0]
ax.scatter(x0, [0]*len(x0), color="red")
x1 = x[~class0]
ax.scatter(x1, [0]*len(x1), color="blue")
xx = np.linspace(-2, 7, 100)
ax.plot(xx, np.exp(-(xx-hatmu0)**2/2) / np.sqrt(2*np.pi))
ax.plot(xx, np.exp(-(xx-hatmu1)**2/2) / np.sqrt(2*np.pi))
None
x0 = [x < np.mean(x)]
x1 = [x >= np.mean(x)]
for _ in range(1000):
k0 = scipy.stats.kde.gaussian_kde(x0)
k1 = scipy.stats.kde.gaussian_kde(x1)
p0, p1 = k0(x), k1(x)
p = np.vstack([p0/(p0+p1), p1/(p0+p1)])
choice = np.random.random(size=len(x)) <= p[0]
x0 = x[choice]
x1 = x[~choice]
fig, ax = plt.subplots(figsize=(10,5))
ax.scatter(x0, [0]*len(x0), color="red")
ax.scatter(x1, [0]*len(x1), color="blue")
xx = np.linspace(-2, 7, 100)
ax.plot(xx, k0(xx))
ax.plot(xx, k1(xx))
None
def combine(kernels):
def kernel(t):
return np.mean(np.asarray([k(t) for k in kernels]), axis=0)
return kernel
def make_kernels(x, p, samples=100):
k0, k1 = [], []
for _ in range(samples):
choice = np.random.random(size=len(x)) <= p[0]
x0 = x[choice]
x1 = x[~choice]
k0.append(scipy.stats.kde.gaussian_kde(x0))
k1.append(scipy.stats.kde.gaussian_kde(x1))
return combine(k0), combine(k1)
p0 = np.array(x < np.mean(x), dtype=np.int)
p1 = np.array(x >= np.mean(x), dtype=np.int)
p = np.vstack([p0/(p0+p1), p1/(p0+p1)])
for _ in range(1000):
k0, k1 = make_kernels(x, p)
p0, p1 = k0(x), k1(x)
p = np.vstack([p0/(p0+p1), p1/(p0+p1)])
fig, ax = plt.subplots(figsize=(10,5))
xx = np.linspace(-2, 7, 100)
ax.plot(xx, k0(xx))
ax.plot(xx, k1(xx))
None
def kde(x, p, training):
# `training` used only for bandwidth selection.
s = np.var(training, ddof=1)
scott = len(training) ** (-1 / 5)
band = 1 / (s * scott * scott)
norm = np.sqrt(band / (2 * np.pi)) / np.sum(p)
def kernel(t):
t = np.asarray(t)
if len(t.shape) == 0:
t = t[None]
xx = (x[:,None]-t[None,:])**2 * band
return np.sum(p[:,None] * np.exp(-xx/2), axis=0) * norm
return kernel
p0 = np.array(x < np.mean(x), dtype=np.int)
p1 = np.array(x >= np.mean(x), dtype=np.int)
p = np.vstack([p0/(p0+p1), p1/(p0+p1)])
for _ in range(10000):
k0 = kde(x, p[0], x[p[0] > 0.1])
k1 = kde(x, p[1], x[p[1] > 0.1])
p0, p1 = k0(x), k1(x)
p = np.vstack([p0/(p0+p1), p1/(p0+p1)])
fig, ax = plt.subplots(figsize=(10,5))
class0 = p[0] < p[1]
x0 = x[class0]
ax.scatter(x0, [0]*len(x0), color="red")
x1 = x[~class0]
ax.scatter(x1, [0]*len(x1), color="blue")
xx = np.linspace(-1, 7, 200)
ax.plot(xx, k0(xx), color="blue")
ax.plot(xx, k1(xx), color="blue")
kk0 = scipy.stats.kde.gaussian_kde(x0)
ax.plot(xx, kk0(xx), linestyle="--", color="red")
kk1 = scipy.stats.kde.gaussian_kde(x1)
ax.plot(xx, kk1(xx), linestyle="--", color="red")
None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scipy vs our code
Step2: Stocastic declusting
Step3: Via parameter estimation
Step4: Stocastic declustering
Step5: Stocastic declustering with repeatition
Step6: Mixed KDE
|
14,083
|
<ASSISTANT_TASK:>
Python Code:
# https://esa.github.io/pykep/
# https://github.com/esa/pykep
# https://pypi.python.org/pypi/pykep/
import PyKEP as pk
import numpy as np
from tqdm import tqdm, trange
import matplotlib.pylab as plt
%matplotlib inline
import seaborn as sns
plt.rcParams['figure.figsize'] = 10, 8
from gtoc5 import *
from gtoc5.multiobjective import *
from gtoc5.phasing import *
from paco import *
from paco_traj import *
from experiments import *
from experiments__paco import *
%load_ext watermark
%watermark -v -m -p PyKEP,numpy,scipy,tqdm,pandas,matplotlib,seaborn
# https://github.com/rasbt/watermark
from urllib.request import urlretrieve
import gzip
urlretrieve('http://comopt.ifi.uni-heidelberg.de/software/TSPLIB95/tsp/eil101.tsp.gz', filename='eil101.tsp.gz');
with gzip.open('eil101.tsp.gz') as f:
xy_locs = np.loadtxt(f, skiprows=6, usecols=(1,2), comments='EOF', dtype=np.int)
nr_cities = len(xy_locs)
xy_locs[:5]
distances = np.zeros((nr_cities, nr_cities))
for a in range(nr_cities):
for b in range(a, nr_cities):
distances[a,b] = distances[b,a] = np.linalg.norm(xy_locs[a] - xy_locs[b])
distances[:4, :4]
rng, seed = initialize_rng(seed=None)
print('Seed:', seed)
path_handler = tsp_path(distances, random_state=rng)
aco = paco(path_handler.nr_nodes, path_handler, random_state=rng)
%time (quality, best) = aco.solve(nr_generations=100)
quality
%time (quality, best) = aco.solve(nr_generations=400, reinitialize=False)
quality
xy = np.vstack([xy_locs[best], xy_locs[best][0]]) # to connect back to the start
line, = plt.plot(xy[:,0], xy[:,1], 'go-')
t = mission_to_1st_asteroid(1712)
add_asteroid(t, 4893)
score(t), final_mass(t), tof(t) * DAY2YEAR
resource_rating(t)
score(t) + resource_rating(t)
print(seq(t))
print(seq(t, incl_flyby=False))
t[-1]
pk.epoch(t[-1][2], 'mjd')
import os
from copy import copy
def greedy_step(traj):
traj_asts = set(seq(traj, incl_flyby=False))
progress_bar_args = dict(leave=False, file=os.sys.stdout, desc='attempting score %d' % (score(traj)+1))
extended = []
for a in trange(len(asteroids), **progress_bar_args):
if a in traj_asts:
continue
tt = copy(traj)
if add_asteroid(tt, next_ast=a, use_cache=False):
extended.append(tt)
return max(extended, key=resource_rating, default=[])
# measure time taken at one level to attempt legs towards all asteroids (that aren't already in the traj.)
%time _ = greedy_step(mission_to_1st_asteroid(1712))
def greedy_search(first_ast):
t = mission_to_1st_asteroid(first_ast)
while True:
tt = greedy_step(t)
if tt == []:
# no more asteroids could be added
return t
t = tt
%time T = greedy_search(first_ast=1712)
score(T), resource_rating(T), final_mass(T), tof(T) * DAY2YEAR
print(seq(T, incl_flyby=False))
t = mission_to_1st_asteroid(1712)
r = rate__orbital_2(dep_ast=t[-1][0], dep_t=t[-1][2], leg_dT=125)
r[seq(t)] = np.inf # (exclude bodies already visited)
r.argsort()[:5]
[add_asteroid(copy(t), a) for a in r.argsort()[:5]]
def narrowed_greedy_step(traj, top=10):
traj_asts = set(seq(traj, incl_flyby=False))
extended = []
ratings = rate__orbital_2(dep_ast=traj[-1][0], dep_t=traj[-1][2], leg_dT=125)
for a in ratings.argsort()[:top]:
if a in traj_asts:
continue
tt = copy(traj)
if add_asteroid(tt, next_ast=a, use_cache=False):
extended.append(tt)
return max(extended, key=resource_rating, default=[])
def narrowed_greedy_search(first_ast, **kwargs):
t = mission_to_1st_asteroid(first_ast)
while True:
tt = narrowed_greedy_step(t, **kwargs)
if tt == []:
# no more asteroids could be added
return t
t = tt
# measure time taken at one level to attempt legs towards the best `top` asteroids
%time _ = narrowed_greedy_step(mission_to_1st_asteroid(1712), top=10)
%time T = narrowed_greedy_search(first_ast=1712, top=10)
score(T), resource_rating(T), final_mass(T), tof(T) * DAY2YEAR
print(seq(T, incl_flyby=False))
gtoc_ph = init__path_handler(multiobj_evals=True)
# configuring Beam P-ACO to behave as a deterministic multi-objective Beam Search
_args = {
'beam_width': 20,
'branch_factor': 250,
'alpha': 0.0, # 0.0: no pheromones used
'beta': 1.0,
'prob_greedy': 1.0, # 1.0: deterministic, greedy branching decisions
}
bpaco = beam_paco_pareto(nr_nodes=len(asteroids), path_handler=gtoc_ph, random_state=None, **_args)
# start the search
# given we're running the algoritm in deterministic mode, we execute it for a single generation
%time best_pf = bpaco.solve(nr_generations=1)
# being this a `_pareto` class, .best returns a Pareto front
# pick the first solution from the Pareto front
best_eval, best = best_pf[0]
# Evaluation of the best found solution
# (score, mass consumed, time of flight)
best_eval
# sequence of asteroids visited (0 is the Earth)
print(seq(best, incl_flyby=False))
# mission data structure, up to the full scoring of the first two asteroids
best[:5]
%%javascript
$.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js')
// https://github.com/kmahelona/ipython_notebook_goodies
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Taking a look at our Python environment.
Step2: Solving a TSPLIB problem with P-ACO
Step3: Load each city's (x, y) coordinates.
Step4: Calculate distances matrix.
Step5: Instantiate the TSP "path handler" with this distances matrix, and P-ACO, with its default parameters.
Step6: Solve it.
Step7: Continue refining the solution for a few more generations.
Step8: Let's see what we found.
Step9: Basic steps for assembling GTOC5 trajectories
Step10: We can evaluate this trajectory with respect to its score (number of asteroids fully explored), final mass (in kg), and time of flight (here converted from days to years).
Step11: An aggregation of the mission's mass and time costs can be obtained with resource_rating(). It measures the extent to which the mass and time budgets available for the mission have been depleted by the trajectory. It produces a value of 1.0 at the start of the mission, and a value of 0.0 when the mission has exhausted its 3500 kg of available mass, or its maximum duration of 15 years.
Step12: As the score increments discretely by 1.0 with each added asteroid, and the resource rating evaluates mass and time available in a range of [0, 1], both can be combined to give a single-objective evaluation of the trajectory, that should be maximized
Step13: Calling seq(), we can see either the full sequence of asteroids visited in each leg, or just the distinct asteroids visited in the mission. In this example, we see that the mission starts on Earth (id 0), performs a rendezvous with asteroid 1712, followed by a flyby of the same asteroid, and then repeats the pattern at asteroid 4893.
Step14: The trajectory data structure built by mission_to_1st_asteroid() and add_asteroid() is a list of tuples summarizing the evolution of the spacecraft's state. It provides the minimal sufficient information from which a more detailed view can be reproduced, if so desired. Each tuple contains
Step15: Epochs are given here as Modified Julian Dates (MJD), and can be converted as
Step16: Greedy search
Step17: Greedy search gave us a trajectory that is able to visit 14 distinct asteroids. However, by the 14th, the spacecraft finds itself unable to find a viable target to fly to next, even though it has 84.8 kg of mass still available (the spacecraft itself weighs 500 kg, so the mission cannot go below that value), and 2 years remain in its 15 year mission.
Step18: We use here the (improved) orbital phasing indicator to rate destinations with respect to the estimated ΔV of hypothetical legs that would depart from dep_ast, at epoch dep_t, towards each possible asteroid, arriving there within leg_dT days. We don't know exactly how long the transfer time chosen by add_asteroid() would be, but we take leg_dT=125 days as reference transfer time.
Step19: Below are the 5 asteroids the indicator estimates would be most easily reachable. As we've seen above in the results from the greedy search, asteroid 4893, here the 2nd best rated alternative, would indeed be the target reachable with lowest ΔV.
Step20: The indicator is however not infallible. If we attempt to go from asteroid 1712 towards each of these asteroids, we find that none of them are actually reachable, except for 4893! Still, the indicator allows us to narrow our focus considerably.
Step21: Armed with the indicator, we can reimplement the greedy search, so it will only optimize legs towards a number of top rated alternatives, and then proceed with the best out of those.
Step22: We were able to find another score 14 trajectory, but this time it took us ~1 second, whereas before it was taking us 2 and a half minutes.
Step23: Generate the Table of Contents
|
14,084
|
<ASSISTANT_TASK:>
Python Code:
# Import the toolkit and the full Porter Stemmer library
import nltk
from nltk.stem.porter import *
p_stemmer = PorterStemmer()
words = ['run','runner','running','ran','runs','easily','fairly']
for word in words:
print(word+' --> '+p_stemmer.stem(word))
from nltk.stem.snowball import SnowballStemmer
# The Snowball Stemmer requires that you pass a language parameter
s_stemmer = SnowballStemmer(language='english')
words = ['run','runner','running','ran','runs','easily','fairly']
# words = ['generous','generation','generously','generate']
for word in words:
print(word+' --> '+s_stemmer.stem(word))
words = ['consolingly']
print('Porter Stemmer:')
for word in words:
print(word+' --> '+p_stemmer.stem(word))
print('Porter2 Stemmer:')
for word in words:
print(word+' --> '+s_stemmer.stem(word))
phrase = 'I am meeting him tomorrow at the meeting'
for word in phrase.split():
print(word+' --> '+p_stemmer.stem(word))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color=green>Note how the stemmer recognizes "runner" as a noun, not a verb form or participle. Also, the adverbs "easily" and "fairly" are stemmed to the unusual root "easili" and "fairli"</font>
Step2: <font color=green>In this case the stemmer performed the same as the Porter Stemmer, with the exception that it handled the stem of "fairly" more appropriately with "fair"</font>
Step3: Stemming has its drawbacks. If given the token saw, stemming might always return saw, whereas lemmatization would likely return either see or saw depending on whether the use of the token was as a verb or a noun. As an example, consider the following
|
14,085
|
<ASSISTANT_TASK:>
Python Code:
import vcsn
language = '\e+a+b+abc+abcd+abdc'
b = vcsn.context('lal_char, b')
B = vcsn.context('law_char, b')
B.polynomial(language).trie()
b.expression(language).standard().determinize().strip()
series = '<2>\e + <3>a + <4>b + <5>abc + <6>abcd + <7>abdc'
q = vcsn.context('lal_char, z')
Q = vcsn.context('law_char, z')
Q.polynomial(series).trie()
q.expression(series).standard().determinize().strip()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Boolean weights (finite language)
Step2: Weighted polynomials of words (finite series)
|
14,086
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.io import vform
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.plotting import Figure, output_file, show
output_file("callback.html")
x = [x*0.005 for x in range(0, 200)]
y = x
source = ColumnDataSource(data=dict(x=x, y=y))
plot = Figure(plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code=
var data = source.get('data');
var f = cb_obj.get('value')
x = data['x']
y = data['y']
for (i = 0; i < x.length; i++) {
y[i] = Math.pow(x[i], f)
}
source.trigger('change');
)
slider = Slider(start=0.1, end=4, value=1, step=.1, title="power", callback=callback)
layout = vform(slider, plot)
show(layout)
from bokeh.models.widgets import Slider, RadioGroup, Button
from bokeh.io import output_file, show, vform
from bokeh.plotting import figure
output_file("queryWise.html")
band = RadioGroup(labels=["3.5 microns", "4.5 microns",
"12 microns", "22 microns"], active=0)
fov = Slider(start=5, end=15, value=5, step=.25, title="Field of View (arcmin)")
ra = Slider(start=0, end=359, value=120, step=1, title="Right Ascension (degrees)")
dec = Slider(start=-90, end=90, value=0, step=1, title="Declination (degrees)")
button = Button(label="Submit")
p = figure(plot_width=400, plot_height=400,
tools="tap", title="WISE sources")
p.circle(ra.value, dec.value)
show(vform(fov,band,ra,dec,button, p))
from ipywidgets import *
from IPython.display import display
fov = FloatSlider(value = 5.0,
min = 5.0,
max = 15.0,
step = 0.25)
display(fov)
%matplotlib notebook
import pandas as pd
import matplotlib.pyplot as plt
from ipywidgets import *
from IPython.display import display
##from jnotebook import display
from IPython.html import widgets
plt.style.use('ggplot')
NUMBER_OF_PINGS = 4
#displaying the text widget
text = widgets.Text(description="Domain to ping", width=200)
display(text)
#preparing the plot
data = pd.DataFrame()
x = range(1,NUMBER_OF_PINGS+1)
plots = dict()
fig, ax = plt.subplots()
plt.xlabel('iterations')
plt.ylabel('ms')
plt.xticks(x)
plt.show()
#preparing a container to put in created checkbox per domain
checkboxes = []
cb_container = widgets.HBox()
display(cb_container)
#add button that updates the graph based on the checkboxes
button = widgets.Button(description="Update the graph")
#function to deal with the added domain name
def handle_submit(sender):
#a part of the magic inside python : pinging
res = !ping -c {NUMBER_OF_PINGS} {text.value}
hits = res.grep('64 bytes').fields(-2).s.replace("time=","").split()
if len(hits) == 0:
print("Domain gave error on pinging")
else:
#rebuild plot based on ping result
data[text.value] = hits
data[text.value] = data[text.value].astype(float)
plots[text.value], = ax.plot(x, data[text.value], label=text.value)
plt.legend()
plt.draw()
#add a new checkbox for the new domain
checkboxes.append(widgets.Checkbox(description = text.value, value=True, width=90))
cb_container.children=[i for i in checkboxes]
if len(checkboxes) == 1:
display(button)
#function to deal with the checkbox update button
def on_button_clicked(b):
for c in cb_container.children:
if not c.value:
plots[c.description].set_visible(False)
else:
plots[c.description].set_visible(True)
plt.legend()
plt.draw()
button.on_click(on_button_clicked)
text.on_submit(handle_submit)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data apps
Step2: The next cell shows the start of how to set up something like the WISE app.
Step3: The rest of the notebook is not currently working
Step4: Example from the blog post
|
14,087
|
<ASSISTANT_TASK:>
Python Code:
# restart your notebook if prompted on Colab
try:
import verta
except ImportError:
!pip install verta
import os
# Ensure credentials are set up, if not, use below
# os.environ['VERTA_EMAIL'] =
# os.environ['VERTA_DEV_KEY'] =
# os.environ['VERTA_HOST'] =
from verta import Client
client = Client(os.environ['VERTA_HOST'])
# Suppose we have a cubic function, 3x^3 + 2x + 5 that we want to implement
def cubic_transform(x):
return 3*(x**3) + 2*x + 5
from verta.registry import VertaModelBase
class CubicFunction(VertaModelBase):
def __init__(self, artifacts=None):
pass
def predict(self, input_data):
output_data = []
for input_data_point in input_data:
output_data_point = cubic_transform(input_data_point)
output_data.append(output_data_point)
return output_data
registered_model = client.get_or_create_registered_model(
name="cubic-function", labels=["data-transform"])
from verta.environment import Python
model_version = registered_model.create_standard_model(
CubicFunction,
environment=Python(requirements=[]),
name="v1",
)
cubic_function_endpoint = client.get_or_create_endpoint("cubic")
cubic_function_endpoint.update(model_version, wait=True)
deployed_model = cubic_function_endpoint.get_deployed_model()
deployed_model.predict([3, 0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Define data transformation
Step2: 1. b Wrap data transform in a class deriving from VertaModelBase
Step3: 2. Define a registered model for deployment
Step4: 3. Deploy model to endpoint
|
14,088
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title Install { display-mode: "form" }
TF_Installation = 'System' #@param ['TF Nightly', 'TF Stable', 'System']
if TF_Installation == 'TF Nightly':
!pip install -q --upgrade tf-nightly
print('Installation of `tf-nightly` complete.')
elif TF_Installation == 'TF Stable':
!pip install -q --upgrade tensorflow
print('Installation of `tensorflow` complete.')
elif TF_Installation == 'System':
pass
else:
raise ValueError('Selection Error: Please select a valid '
'installation option.')
#@title Install { display-mode: "form" }
TFP_Installation = "System" #@param ["Nightly", "Stable", "System"]
if TFP_Installation == "Nightly":
!pip install -q tfp-nightly
print("Installation of `tfp-nightly` complete.")
elif TFP_Installation == "Stable":
!pip install -q --upgrade tensorflow-probability
print("Installation of `tensorflow-probability` complete.")
elif TFP_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import os
from six.moves import urllib
import matplotlib.pyplot as plt; plt.style.use('ggplot')
import numpy as np
import pandas as pd
import seaborn as sns; sns.set_context('notebook')
import tensorflow_datasets as tfds
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
if tf.test.gpu_device_name() != '/device:GPU:0':
print("We'll just use the CPU for this run.")
else:
print('Huzzah! Found GPU: {}'.format(tf.test.gpu_device_name()))
def load_and_preprocess_radon_dataset(state='MN'):
Load the Radon dataset from TensorFlow Datasets and preprocess it.
Following the examples in "Bayesian Data Analysis" (Gelman, 2007), we filter
to Minnesota data and preprocess to obtain the following features:
- `county`: Name of county in which the measurement was taken.
- `floor`: Floor of house (0 for basement, 1 for first floor) on which the
measurement was taken.
The target variable is `log_radon`, the log of the Radon measurement in the
house.
ds = tfds.load('radon', split='train')
radon_data = tfds.as_dataframe(ds)
radon_data.rename(lambda s: s[9:] if s.startswith('feat') else s, axis=1, inplace=True)
df = radon_data[radon_data.state==state.encode()].copy()
df['radon'] = df.activity.apply(lambda x: x if x > 0. else 0.1)
# Make county names look nice.
df['county'] = df.county.apply(lambda s: s.decode()).str.strip().str.title()
# Remap categories to start from 0 and end at max(category).
df['county'] = df.county.astype(pd.api.types.CategoricalDtype())
df['county_code'] = df.county.cat.codes
# Radon levels are all positive, but log levels are unconstrained
df['log_radon'] = df['radon'].apply(np.log)
# Drop columns we won't use and tidy the index
columns_to_keep = ['log_radon', 'floor', 'county', 'county_code']
df = df[columns_to_keep].reset_index(drop=True)
return df
df = load_and_preprocess_radon_dataset()
df.head()
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 4))
df.groupby('floor')['log_radon'].plot(kind='density', ax=ax1);
ax1.set_xlabel('Measured log(radon)')
ax1.legend(title='Floor')
df['floor'].value_counts().plot(kind='bar', ax=ax2)
ax2.set_xlabel('Floor where radon was measured')
ax2.set_ylabel('Count')
fig.suptitle("Distribution of log radon and floors in the dataset");
fig, ax = plt.subplots(figsize=(22, 5));
county_freq = df['county'].value_counts()
county_freq.plot(kind='bar', ax=ax)
ax.set_xlabel('County')
ax.set_ylabel('Number of readings');
features = df[['county_code', 'floor']].astype(int)
labels = df[['log_radon']].astype(np.float32).values.flatten()
def make_joint_distribution_coroutine(floor, county, n_counties, n_floors):
def model():
county_scale = yield tfd.HalfNormal(scale=1., name='scale_prior')
intercept = yield tfd.Normal(loc=0., scale=1., name='intercept')
floor_weight = yield tfd.Normal(loc=0., scale=1., name='floor_weight')
county_prior = yield tfd.Normal(loc=tf.zeros(n_counties),
scale=county_scale,
name='county_prior')
random_effect = tf.gather(county_prior, county, axis=-1)
fixed_effect = intercept + floor_weight * floor
linear_response = fixed_effect + random_effect
yield tfd.Normal(loc=linear_response, scale=1., name='likelihood')
return tfd.JointDistributionCoroutineAutoBatched(model)
joint = make_joint_distribution_coroutine(
features.floor.values, features.county_code.values, df.county.nunique(),
df.floor.nunique())
# Define a closure over the joint distribution
# to condition on the observed labels.
def target_log_prob_fn(*args):
return joint.log_prob(*args, likelihood=labels)
# Initialize locations and scales randomly with `tf.Variable`s and
# `tfp.util.TransformedVariable`s.
_init_loc = lambda shape=(): tf.Variable(
tf.random.uniform(shape, minval=-2., maxval=2.))
_init_scale = lambda shape=(): tfp.util.TransformedVariable(
initial_value=tf.random.uniform(shape, minval=0.01, maxval=1.),
bijector=tfb.Softplus())
n_counties = df.county.nunique()
surrogate_posterior = tfd.JointDistributionSequentialAutoBatched([
tfb.Softplus()(tfd.Normal(_init_loc(), _init_scale())), # scale_prior
tfd.Normal(_init_loc(), _init_scale()), # intercept
tfd.Normal(_init_loc(), _init_scale()), # floor_weight
tfd.Normal(_init_loc([n_counties]), _init_scale([n_counties]))]) # county_prior
optimizer = tf.optimizers.Adam(learning_rate=1e-2)
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior,
optimizer=optimizer,
num_steps=3000,
seed=42,
sample_size=2)
(scale_prior_,
intercept_,
floor_weight_,
county_weights_), _ = surrogate_posterior.sample_distributions()
print(' intercept (mean): ', intercept_.mean())
print(' floor_weight (mean): ', floor_weight_.mean())
print(' scale_prior (approx. mean): ', tf.reduce_mean(scale_prior_.sample(10000)))
fig, ax = plt.subplots(figsize=(10, 3))
ax.plot(losses, 'k-')
ax.set(xlabel="Iteration",
ylabel="Loss (ELBO)",
title="Loss during training",
ylim=0);
county_counts = (df.groupby(by=['county', 'county_code'], observed=True)
.agg('size')
.sort_values(ascending=False)
.reset_index(name='count'))
means = county_weights_.mean()
stds = county_weights_.stddev()
fig, ax = plt.subplots(figsize=(20, 5))
for idx, row in county_counts.iterrows():
mid = means[row.county_code]
std = stds[row.county_code]
ax.vlines(idx, mid - std, mid + std, linewidth=3)
ax.plot(idx, means[row.county_code], 'ko', mfc='w', mew=2, ms=7)
ax.set(
xticks=np.arange(len(county_counts)),
xlim=(-1, len(county_counts)),
ylabel="County effect",
title=r"Estimates of county effects on log radon levels. (mean $\pm$ 1 std. dev.)",
)
ax.set_xticklabels(county_counts.county, rotation=90);
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(np.log1p(county_counts['count']), stds.numpy()[county_counts.county_code], 'o')
ax.set(
ylabel='Posterior std. deviation',
xlabel='County log-count',
title='Having more observations generally\nlowers estimation uncertainty'
);
%%shell
exit # Trick to make this block not execute.
radon = read.csv('srrs2.dat', header = TRUE)
radon = radon[radon$state=='MN',]
radon$radon = ifelse(radon$activity==0., 0.1, radon$activity)
radon$log_radon = log(radon$radon)
# install.packages('lme4')
library(lme4)
fit <- lmer(log_radon ~ 1 + floor + (1 | county), data=radon)
fit
# Linear mixed model fit by REML ['lmerMod']
# Formula: log_radon ~ 1 + floor + (1 | county)
# Data: radon
# REML criterion at convergence: 2171.305
# Random effects:
# Groups Name Std.Dev.
# county (Intercept) 0.3282
# Residual 0.7556
# Number of obs: 919, groups: county, 85
# Fixed Effects:
# (Intercept) floor
# 1.462 -0.693
print(pd.DataFrame(data=dict(intercept=[1.462, tf.reduce_mean(intercept_.mean()).numpy()],
floor=[-0.693, tf.reduce_mean(floor_weight_.mean()).numpy()],
scale=[0.3282, tf.reduce_mean(scale_prior_.sample(10000)).numpy()]),
index=['lme4', 'vi']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 변분 추론으로 일반화된 선형 혼합 효과 모델 맞춤 조정하기
Step2: 요약
Step3: 또한 GPU의 가용성을 빠르게 확인합니다.
Step5: 데이터세트 얻기
Step6: GLMM 패밀리 전문화하기
Step7: 지리에 관한 내용을 포함하여 모델을 좀 더 정교하게 만드는 것이 아마도 더 좋을 것입니다. 라돈은 땅에 존재할 수 있는 우라늄의 붕괴 사슬의 일부이므로 지리를 설명하는 것이 중요합니다.
Step8: 이 모델을 맞춤 조정하면 county_effect 벡터는 훈련 샘플이 거의 없는 카운티에 대한 결과를 기억하게 될 것입니다. 아마도 과대적합이 발생하여 일반화가 불량할 수 있습니다.
Step9: 모델을 지정합니다.
Step10: 사후 확률 대리를 지정합니다.
Step11: 이 셀은 다음과 같이 tfp.experimental.vi.build_factored_surrogate_posterior로 대체할 수 있습니다.
Step12: 추정된 평균 카운티(county) 효과와 해당 평균의 불확실성을 플롯할 수 있습니다. 이를 관찰 횟수로 정렬했으며 가장 큰 수는 왼쪽에 있습니다. 관측치가 많은 카운티에서는 불확실성이 작지만, 관측치가 한두 개만 있는 카운티에서는 불확실성이 더 큽니다.
Step13: 실제로 추정된 표준 편차에 대한 로그 수의 관측치를 플롯하여 이를 더 직접적으로 볼 수 있으며 관계가 거의 선형임을 알 수 있습니다.
Step14: R에서 lme4와 비교하기
Step15: 다음 표에 결과가 요약되어 있습니다.
|
14,089
|
<ASSISTANT_TASK:>
Python Code:
import pymongo
from pymongo import MongoClient
import time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import csv
Client = MongoClient("mongodb://bridges:readonly@nbi-mongo.admin/bridge")
db = Client.bridge
collection = db["bridges"]
def getData(state):
pipeline = [{"$match":{"$and":[{"year":{"$gt":1991, "$lt":2017}},{"stateCode":state}]}},
{"$project":{"_id":0,
"structureNumber":1,
"yearBuilt":1,
"deck":1, ## rating of deck
"year":1, ## survey year
"substructure":1, ## rating of substructure
"superstructure":1, ## rating of superstructure
}}]
dec = collection.aggregate(pipeline)
conditionRatings = pd.DataFrame(list(dec))
conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt']
return conditionRatings
def getMeanRatings(state,startAge, endAge, startYear, endYear):
conditionRatings = getData(state)
conditionRatings = conditionRatings[['structureNumber','Age','superstructure','deck','substructure','year']]
conditionRatings = conditionRatings.loc[~conditionRatings['superstructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['substructure'].isin(['N','NA'])]
conditionRatings = conditionRatings.loc[~conditionRatings['deck'].isin(['N','NA'])]
#conditionRatings = conditionRatings.loc[~conditionRatings['Structure Type'].isin([19])]
#conditionRatings = conditionRatings.loc[~conditionRatings['Type of Wearing Surface'].isin(['6'])]
maxAge = conditionRatings['Age'].unique()
tempConditionRatingsDataFrame = conditionRatings.loc[conditionRatings['year'].isin([i for i in range(startYear, endYear+1, 1)])]
MeanDeck = []
StdDeck = []
MeanSubstructure = []
StdSubstructure = []
MeanSuperstructure = []
StdSuperstructure = []
## start point of the age to be = 1 and ending point = 100
for age in range(startAge,endAge+1,1):
## Select all the bridges from with age = i
tempAgeDf = tempConditionRatingsDataFrame.loc[tempConditionRatingsDataFrame['Age'] == age]
## type conversion deck rating into int
listOfMeanDeckOfAge = list(tempAgeDf['deck'])
listOfMeanDeckOfAge = [ int(deck) for deck in listOfMeanDeckOfAge ]
## takeing mean and standard deviation of deck rating at age i
meanDeck = np.mean(listOfMeanDeckOfAge)
stdDeck = np.std(listOfMeanDeckOfAge)
## type conversion substructure rating into int
listOfMeanSubstructureOfAge = list(tempAgeDf['substructure'])
listOfMeanSubstructureOfAge = [ int(substructure) for substructure in listOfMeanSubstructureOfAge ]
meanSub = np.mean(listOfMeanSubstructureOfAge)
stdSub = np.std(listOfMeanSubstructureOfAge)
## type conversion substructure rating into int
listOfMeanSuperstructureOfAge = list(tempAgeDf['superstructure'])
listOfMeanSuperstructureOfAge = [ int(superstructure) for superstructure in listOfMeanSuperstructureOfAge ]
meanSup = np.mean(listOfMeanSuperstructureOfAge)
stdSup = np.std(listOfMeanSuperstructureOfAge)
#Append Deck
MeanDeck.append(meanDeck)
StdDeck.append(stdDeck)
#Append Substructure
MeanSubstructure.append(meanSub)
StdSubstructure.append(stdSub)
#Append Superstructure
MeanSuperstructure.append(meanSup)
StdSuperstructure.append(stdSup)
return [MeanDeck, StdDeck ,MeanSubstructure, StdSubstructure, MeanSuperstructure, StdSuperstructure]
#Massachuesetts, Connecticut, Maine, New Hampshire, Rhode Island, Vermont, New Jersey, New York, Pennsylvania
states = ['25','09','23','33','44','50','34','36','42']
# state code to state abbreviation
stateNameDict = {'25':'MA',
'04':'AZ',
'08':'CO',
'38':'ND',
'09':'CT',
'19':'IA',
'26':'MI',
'48':'TX',
'35':'NM',
'17':'IL',
'51':'VA',
'23':'ME',
'16':'ID',
'36':'NY',
'56':'WY',
'29':'MO',
'39':'OH',
'28':'MS',
'11':'DC',
'21':'KY',
'18':'IN',
'06':'CA',
'47':'TN',
'12':'FL',
'24':'MD',
'34':'NJ',
'46':'SD',
'13':'GA',
'55':'WI',
'30':'MT',
'54':'WV',
'15':'HI',
'32':'NV',
'37':'NC',
'10':'DE',
'33':'NH',
'44':'RI',
'50':'VT',
'42':'PA',
'05':'AR',
'20':'KS',
'45':'SC',
'22':'LA',
'40':'OK',
'72':'PR',
'41':'OR',
'27':'MN',
'53':'WA',
'01':'AL',
'31':'NE',
'02':'AK',
'49':'UT'
}
def getBulkMeanRatings(states, stateNameDict):
# Initializaing the dataframes for deck, superstructure and subtructure
df_mean_deck = pd.DataFrame({'Age':range(1,61)})
df_mean_sup = pd.DataFrame({'Age':range(1,61)})
df_mean_sub = pd.DataFrame({'Age':range(1,61)})
df_std_deck = pd.DataFrame({'Age':range(1,61)})
df_std_sup = pd.DataFrame({'Age':range(1,61)})
df_std_sub = pd.DataFrame({'Age':range(1,61)})
for state in states:
meanDeck, stdDeck, meanSub, stdSub, meanSup, stdSup = getMeanRatings(state,1,100,1992,2016)
stateName = stateNameDict[state]
df_mean_deck[stateName] = meanDeck[:60]
df_mean_sup[stateName] = meanSup[:60]
df_mean_sub[stateName] = meanSub[:60]
df_std_deck[stateName] = stdDeck[:60]
df_std_sup[stateName] = stdSup[:60]
df_std_sub[stateName] = stdSub[:60]
return df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub
df_mean_deck, df_mean_sup, df_mean_sub, df_std_deck, df_std_sup, df_std_sub = getBulkMeanRatings(states, stateNameDict)
%matplotlib inline
#states = ['25','09','23','33','44','50','34','36','42']
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_deck['Age'],df_mean_deck[stateName], color = palette[index])
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Deck Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Deck Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
#palette = plt.get_cmap('gist_ncar')
palette = [
'blue', 'blue', 'green','magenta','cyan','brown','grey','red','silver','purple','gold','black','olive'
]
# multiple line plot
num=1
for column in df_mean_deck.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_deck['Age'], df_mean_deck[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(10) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7,10]:
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Deck Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Deck Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
#states = ['31','19','17','18','20','26','27','29','38','46','39','55']
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_sup['Age'],df_mean_sup[stateName], color = palette[index])
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Superstructure Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Superstructure Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
#palette = plt.get_cmap('gist_ncar')
palette = [
'blue',
'blue',
'green',
'magenta',
'cyan',
'brown',
'grey',
'red',
'silver',
'purple',
'gold',
'black',
'olive'
]
# multiple line plot
num=1
for column in df_mean_sup.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_sup['Age'], df_mean_sup[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(10) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7,10]:
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Superstructure Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Superstructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
states = ['25','09','23','33','44','50','34','36','42']
palette = [ 'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey',
'red','silver','purple', 'gold', 'black','olive' ]
plt.figure(figsize = (10,8))
index = 0
for state in states:
index = index + 1
stateName = stateNameDict[state]
plt.plot(df_mean_sub['Age'],df_mean_sub[stateName], color = palette[index], linewidth=4)
plt.legend([stateNameDict[state] for state in states],loc='upper right', ncol = 2)
plt.xlim(1,60)
plt.ylim(1,9)
plt.title('Mean Substructure Rating Vs Age')
plt.xlabel('Age')
plt.ylabel('Mean Substructure Rating')
plt.figure(figsize = (16,12))
plt.xlabel('Age')
plt.ylabel('Mean')
# Initialize the figure
plt.style.use('seaborn-darkgrid')
# create a color palette
palette = [
'blue', 'blue', 'green', 'magenta', 'cyan', 'brown', 'grey', 'red', 'silver', 'purple', 'gold', 'black','olive'
]
# multiple line plot
num=1
for column in df_mean_sub.drop('Age', axis=1):
# Find the right spot on the plot
plt.subplot(4,3, num)
# Plot the lineplot
plt.plot(df_mean_sub['Age'], df_mean_sub[column], marker='', color=palette[num], linewidth=4, alpha=0.9, label=column)
# Same limits for everybody!
plt.xlim(1,60)
plt.ylim(1,9)
# Not ticks everywhere
if num in range(7) :
plt.tick_params(labelbottom='off')
if num not in [1,4,7] :
plt.tick_params(labelleft='off')
# Add title
plt.title(column, loc='left', fontsize=12, fontweight=0, color=palette[num])
plt.text(30, -1, 'Age', ha='center', va='center')
plt.text(1, 4, 'Mean Substructure Rating', ha='center', va='center', rotation='vertical')
num = num + 1
# general title
plt.suptitle("Mean Substructure Rating vs Age \nIndividual State Deterioration Curves", fontsize=13, fontweight=0, color='black', style='italic', y=1.02)
def getDataOneYear(state):
pipeline = [{"$match":{"$and":[{"year":{"$gt":2015, "$lt":2017}},{"stateCode":state}]}},
{"$project":{"_id":0,
"Structure Type":"$structureTypeMain.typeOfDesignConstruction",
"Type of Wearing Surface":"$wearingSurface/ProtectiveSystem.typeOfWearingSurface",
'Structure Type':1,
"structureNumber":1,
"yearBuilt":1,
"deck":1, ## rating of deck
"year":1, ## survey year
"substructure":1, ## rating of substructure
"superstructure":1, ## rating of superstructure
}}]
dec = collection.aggregate(pipeline)
conditionRatings = pd.DataFrame(list(dec))
conditionRatings['Age'] = conditionRatings['year'] - conditionRatings['yearBuilt']
return conditionRatings
## Condition ratings of all states concatenated into one single data frame ConditionRatings
frames = []
for state in states:
f = getDataOneYear(state)
frames.append(f)
df_nbi_ne = pd.concat(frames)
df_nbi_ne
df_nbi_ne = df_nbi_ne.loc[~df_nbi_ne['deck'].isin(['N','NA'])]
df_nbi_ne = df_nbi_ne.loc[~df_nbi_ne['substructure'].isin(['N','NA'])]
df_nbi_ne = df_nbi_ne.loc[~df_nbi_ne['superstructure'].isin(['N','NA'])]
df_nbi_ne = df_nbi_ne.loc[~df_nbi_ne['Type of Wearing Surface'].isin(['6'])]
stat = ['25','09','23','33','44','50','34','36','42']
AgeList = list(df_nbi_ne['Age'])
deckList = list(df_nbi_ne['deck'])
num = 1
for st in stat:
deckR = []
deckR = getDataOneYear(st)
deckR = deckR[['Age','deck']]
deckR= deckR.loc[~deckR['deck'].isin(['N','NA'])]
stateName = stateNameDict[st]
labels = []
for deckRating, Age in zip (deckList,AgeList):
if Age < 60:
mean_age_conditionRating = df_mean_deck[stateName][Age]
std_age_conditionRating = df_std_deck[stateName][Age]
detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
if (mean_age_conditionRating - std_age_conditionRating) < int(deckRating) <= (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Average Deterioration')
# else, if more than a value,
elif int(deckRating) > (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Slow Deterioration')
# else, if more than a value,
elif int(deckRating) < (mean_age_conditionRating - std_age_conditionRating):
# Append a label
labels.append('Fast Deterioration')
else:
labels.append('Null Value')
D = dict((x,labels.count(x)) for x in set(labels))
plt.figure(figsize=(12,6))
plt.title(stateName)
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.xlabel('Categories')
plt.ylabel('Number of Bridges')
plt.show()
num = num + 1
stat = ['25','09','23','33','44','50','34','36','42']
AgeList = list(df_nbi_ne['Age'])
deckList = list(df_nbi_ne['deck'])
num = 1
label = []
for st in stat:
deckR = []
deckR = getDataOneYear(st)
deckR = deckR[['Age','deck']]
deckR= deckR.loc[~deckR['deck'].isin(['N','NA'])]
stateName = stateNameDict[st]
for deckRating, Age in zip (deckList,AgeList):
if Age < 60:
mean_age_conditionRating = df_mean_deck[stateName][Age]
std_age_conditionRating = df_std_deck[stateName][Age]
detScore = (int(deckRating) - mean_age_conditionRating) / std_age_conditionRating
if (mean_age_conditionRating - std_age_conditionRating) < int(deckRating) <= (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Average Deterioration')
# else, if more than a value,
elif int(deckRating) > (mean_age_conditionRating + std_age_conditionRating):
# Append a label
labels.append('Slow Deterioration')
# else, if more than a value,
elif int(deckRating) < (mean_age_conditionRating - std_age_conditionRating):
# Append a label
labels.append('Fast Deterioration')
else:
labels.append('Null Value')
D = dict((x,labels.count(x)) for x in set(labels))
plt.figure(figsize=(12,6))
plt.title('Classification of Bridges in Northeast United States')
plt.bar(range(len(D)), list(D.values()), align='center')
plt.xticks(range(len(D)), list(D.keys()))
plt.xlabel('Categories of Bridges')
plt.ylabel('Number of Bridges')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connecting to National Data Service
Step2: Deterioration curves of Northeast United states
Step3: Filtering Null Values, Converting JSON format to Dataframes, and Calculating Mean Condition Ratings of Deck, Superstructure, and Substucture
Step4: Creating DataFrames of the Mean condition ratings of the deck, superstructure and substructure
Step5: Deterioration Curves - Deck
Step6: Deterioration Curve - Superstructure
Step7: Deterioration Curves - Substructure
Step8: The mean deterioration curve can be a measure to evaluate the rate of deterioration. If the condition rating of a bridge lies above the deterioration curve then the bridge is deteriorating at a slower pace than mean deterioration of the bridges, and if the condition rating of the bridge lies below the deterioration curve of the bridges then it is deteriorating at a faster pace than the mean deterioration of the bridges.
Step9: Classification of all the bridges in the Northeast United States
|
14,090
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
litigation = pd.read_csv("Housing_Litigations.csv")
litigation.head()
litigation['Boro'].unique()
litigation.groupby(by = ['Boro','CaseJudgement']).count()
litigation['CaseType'].unique()
litigation.groupby(by = ['CaseType', 'CaseJudgement']).count()
hpdcomp = pd.read_csv('Housing_Maintenance_Code_Complaints.csv')
hpdcomp.head()
len(hpdcomp)
hpdviol = pd.read_csv('Housing_Maintenance_Code_Violations.csv')
hpdviol.head()
len(hpdviol)
hpdcompprob = pd.read_csv('Complaint_Problems.csv')
hpdcompprob.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's take a look at unique values for some of the columns
Step2: The above table tells us that Manhattan has the lowest proportion of cases that receive judgement (about 1 in 80), whereas Staten Island has the highest (about 1 in 12). It may be something worth looking into, but it's also important to note that many cases settle out of court, and landlords in Manhattan may be more willing (or able) to do so.
Step3: The table above shows the same case judgement proportions, but conditioned on what type of case it was. Unhelpfully, the documentation does not specify what the difference between Access Warrant - Lead and Non-Lead is. It could be one of two possibilities
Step4: This dataset is less useful on its own. It doesn't tell us what the type of complaint was, only the date it was received and whether or not the complaint is still open. However, it may be useful in conjunction with the earlier dataset. For example, we might be interested in how many of these complaints end up in court (or at least, have some sort of legal action taken).
Step5: These datasets all have different lengths, but that's not surprising, given they come from different years. One productive initial step would be to convert the date strings into something numerical.
|
14,091
|
<ASSISTANT_TASK:>
Python Code:
## conda install ipyrad -c ipyrad
## conda install structure -c ipyrad
## conda install clumpp -c ipyrad
## conda install toytree -c eaton-lab
import ipyrad.analysis as ipa ## ipyrad analysis toolkit
import ipyparallel as ipp ## parallel processing
import toyplot ## plotting library
##
## ipcluster start --n=4
##
## get parallel client
ipyclient = ipp.Client()
print "Connected to {} cores".format(len(ipyclient))
## set N values of K to test across
kvalues = [2, 3, 4, 5, 6]
## init an analysis object
s = ipa.structure(
name="quick",
workdir="./analysis-structure",
data="./analysis-ipyrad/pedic-full_outfiles/pedic-full.ustr",
)
## set main params (use much larger values in a real analysis)
s.mainparams.burnin = 1000
s.mainparams.numreps = 5000
## submit N replicates of each test to run on parallel client
for kpop in kvalues:
s.run(kpop=kpop, nreps=4, ipyclient=ipyclient)
## wait for parallel jobs to finish
ipyclient.wait()
## return the evanno table (deltaK) for best K
etable = s.get_evanno_table(kvalues)
etable
## get admixture proportion tables avg'd across reps
tables = s.get_clumpp_table(kvalues, quiet=True)
## plot bars for a k-test in tables w/ hover labels
table = tables[3].sort_values(by=[0, 1, 2])
toyplot.bars(
table,
width=500,
height=200,
title=[[i] for i in table.index.tolist()],
xshow=False,
);
## the structure formatted file
strfile = "./analysis-ipyrad/pedic-full_outfiles/pedic-full.str"
## an optional mapfile, to sample unlinked SNPs
mapfile = "./analysis-ipyrad/pedic-full_outfiles/pedic-full.snps.map"
## the directory where outfiles should be written
workdir = "./analysis-structure/"
## create a Structure object
struct = ipa.structure(name="structure-test",
data=strfile,
mapfile=mapfile,
workdir=workdir)
## set mainparams for object
struct.mainparams.burnin = 10000
struct.mainparams.numreps = 100000
## see all mainparams
print struct.mainparams
## see or set extraparams
print struct.extraparams
## a range of K-values to test
tests = [3, 4, 5, 6]
## submit batches of 20 replicate jobs for each value of K
for kpop in tests:
struct.run(
kpop=kpop,
nreps=20,
seed=12345,
ipyclient=ipyclient,
)
## see submitted jobs (we query first 10 here)
struct.asyncs[:10]
## query a specific job result by index
if struct.asyncs[0].ready():
print struct.asyncs[0].result()
## block/wait until all jobs finished
ipyclient.wait()
## set some clumpp params
struct.clumppparams.m = 3 ## use largegreedy algorithm
struct.clumppparams.greedy_option = 2 ## test nrepeat possible orders
struct.clumppparams.repeats = 10000 ## number of repeats
struct.clumppparams
## run clumpp for each value of K
tables = struct.get_clumpp_table(tests)
## return the evanno table w/ deltaK
struct.get_evanno_table(tests)
## custom sorting order
myorder = [
"32082_przewalskii",
"33588_przewalskii",
"41478_cyathophylloides",
"41954_cyathophylloides",
"29154_superba",
"30686_cyathophylla",
"33413_thamno",
"30556_thamno",
"35236_rex",
"40578_rex",
"35855_rex",
"39618_rex",
"38362_rex",
]
print "custom ordering"
print tables[4].ix[myorder]
def hover(table):
hover = []
for row in range(table.shape[0]):
stack = []
for col in range(table.shape[1]):
label = "Name: {}\nGroup: {}\nProp: {}"\
.format(table.index[row],
table.columns[col],
table.ix[row, col])
stack.append(label)
hover.append(stack)
return list(hover)
for kpop in tests:
## parse outfile to a table and re-order it
table = tables[kpop]
table = table.ix[myorder]
## plot barplot w/ hover
canvas, axes, mark = toyplot.bars(
table,
title=hover(table),
width=400,
height=200,
xshow=False,
style={"stroke": toyplot.color.near_black},
)
## save plots for your favorite value of K
table = struct.get_clumpp_table(kpop=3)
table = table.ix[myorder]
## further styling of plot with css
style = {"stroke":toyplot.color.near_black,
"stroke-width": 2}
## build barplot
canvas = toyplot.Canvas(width=600, height=250)
axes = canvas.cartesian(bounds=("5%", "95%", "5%", "45%"))
axes.bars(table, title=hover(table), style=style)
## add names to x-axis
ticklabels = [i for i in table.index.tolist()]
axes.x.ticks.locator = toyplot.locator.Explicit(labels=ticklabels)
axes.x.ticks.labels.angle = -60
axes.x.ticks.show = True
axes.x.ticks.labels.offset = 10
axes.x.ticks.labels.style = {"font-size": "12px"}
axes.x.spine.style = style
axes.y.show = False
## options: uncomment to save plots. Only html retains hover.
import toyplot.svg
import toyplot.pdf
import toyplot.html
toyplot.svg.render(canvas, "struct.svg")
toyplot.pdf.render(canvas, "struct.pdf")
toyplot.html.render(canvas, "struct.html")
## show in notebook
canvas
struct.get_evanno_table([3, 4, 5, 6])
struct.get_evanno_table([3, 4, 5, 6], max_var_multiple=50.)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import Python libraries
Step2: Parallel cluster setup
Step3: Quick guide (tldr;)
Step4: Full guide
Step5: Create a Structure Class object
Step6: Set parameter options for this object
Step7: Submit jobs to run on the cluster
Step8: Track progress until finished
Step9: Summarize replicates with CLUMPP
Step10: Sort the table order how you like it
Step11: A function for adding an interactive hover to our plots
Step12: Visualize population structure in barplots
Step13: Make a slightly fancier plot and save to file
Step14: Calculating the best K
Step15: Testing for convergence
|
14,092
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (10, 6)
girls = pd.read_csv('../data/girls.csv')
girls.head(10)
girls.describe(include='all')
girls['Waist'].hist(bins=15);
sns.distplot(girls['Waist'], kde=True);
ax = sns.distplot(girls['Height'], kde=False)
ax.set(xlabel='Playboy girls height', ylabel='Frequency')
sns.set_style('darkgrid')
def weight_category(weight):
return 'heavier' if weight > 54\
else 'lighter' if weight < 49 else 'median'
girls['weight_cat'] = girls['Weight'].apply(weight_category)
sns.boxplot(x='weight_cat', y='Height', data=girls);
sns.set_palette(sns.color_palette("RdBu"))
sns.pairplot(girls[['Bust', 'Waist', 'Hips', 'Height', 'Weight']]);
girls.corr()
girls.head()
def height_category(height):
return 'high' if height > 175\
else 'small' if height < 160 else 'median'
girls['height_cat'] = girls['Height'].apply(height_category)
sns.countplot(x='height_cat', hue='weight_cat', data=girls);
pd.crosstab(girls['weight_cat'], girls['height_cat'])
sns.jointplot(x='Weight', y='Height',
data=girls, kind='reg');
data_types = {'Drugs': float,
'Score': float}
df = pd.read_csv('../data/drugs-and-math.csv',
index_col=0, sep=',', dtype=data_types)
print(df.shape)
print(df.columns)
print(df.index)
df
df.sort_values('Score',
ascending=False,
inplace=True)
df.describe().T # Иногда так лучше
df.plot(kind='box');
df.plot(x='Drugs', y='Score', kind='bar');
df.plot(x='Drugs', y='Score', kind='scatter');
df.corr(method='pearson')
sns.jointplot(x='Drugs', y='Score',
data=df, kind='reg');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Посмотрим на Seaborn сразу в действии на данных по моделям месяца по версии журнала Playboy.
Step2: Гистограммы. Метод <a href="https
Step3: Метод <a href="https
Step4: Метод <a href="http
Step5: Метод <a href="http
Step6: Метод jointplot
Step7: Пример визуального анализа данных с Pandas и Seaborn
Step8: Таблица уже отсортирована по колонке Drugs, сделаем сортировку по Score.
Step9: Рисунки
Step10: Видна тенденция...
Step11: Не советуем строить регрессию по 7 наблюдениям
|
14,093
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import pandas as pd
import datetime
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import YouTubeVideo
#!pip install xlrd
DATADIR = os.path.join(os.path.expanduser("~"), "DATA", "TimeSeries", "EPA")
os.path.exists(DATADIR)
files = os.listdir(DATADIR)
files
slc = pd.read_excel(os.path.join(DATADIR, 'Salt_Lake_2016_PM25.xlsx'))
print(slc.columns)
print(slc.shape)
slc.head(10)
YouTubeVideo("A4ZysWTWXEk")
slc = slc[["Date Local", "Time Local",
"Sample Measurement", "MDL",
"Latitude", "Longitude", "Site Num"]]
obs=20
print(slc.loc[obs]["Date Local"], slc.loc[obs]["Time Local"])
print(datetime.datetime.combine(slc.loc[obs]["Date Local"],
slc.loc[obs]["Time Local"]))
slc["Date/Time Local"] = \
slc.apply(lambda x: datetime.datetime.combine(x["Date Local"],
x["Time Local"]),
axis=1)
slc["Date/Time Local"].tail()
slc[slc["Site Num"]==3006].plot(x="Date Local",
y="Sample Measurement")
slc_weather = pd.read_excel(os.path.join(DATADIR, 'SLC_Weather_2016.xlsx'))
slc_weather.head()
slc_weather = pd.read_excel(os.path.join(DATADIR,
'SLC_Weather_2016.xlsx'),
skiprows=[1],
na_values='-')
slc_weather.head()
slc_weather['Day'][0]
slc_weather.plot(x="Day", y="High")
slc.groupby("Date Local", as_index=False).aggregate(np.mean).head()
slc.groupby("Date Local", as_index=False).aggregate(np.sum).head()
slc_day_all = slc_day.merge(slc_weather,
left_on="Date Local",
right_on="Day")
slc_day_all.head()
f, ax1 = plt.subplots(1)
slc_day_all[slc_day_all["Site Num"]==3006].plot(x="Date Local",
y="High", ax=ax1)
slc_day_all[slc_day_all["Site Num"]==3006].plot(secondary_y=True, x="Date Local",
y="Sample Measurement", ax=ax1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to create a variable that will tell our program where the data are located
Step2: What files are in the directory?
Step3: Read the air quality data
Step4: A dataframe is an object with attributes and methods.
Step5: In addition to looking at the column names, we can also look at the data
Step6: There is lots of stuff here, more than we're interested in. Thow it away
Step7: Comments
Step8: Applying datetime.combine to all the dates and times in our dataframe
Step9: Let's look at the data
Step10: Read in weather data
Step11: The data file uses the 2nd row to describe the units.
Step12: Our Weather Data Have Resolution of Days
Step13: Group and take sum?
Step14: Now we need to combine the pollution data with the weather data
Step15: Explore the Relationship between various weather variables and Sample Measurement
|
14,094
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v3 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
%matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
%load_ext autoreload
%autoreload 2
np.random.seed(1)
# GRADED FUNCTION: initialize_parameters
def initialize_parameters(n_x, n_h, n_y):
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
parameters -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
np.random.seed(1)
### START CODE HERE ### (≈ 4 lines of code)
W1 = None
b1 = None
W2 = None
b2 = None
### END CODE HERE ###
assert(W1.shape == (n_h, n_x))
assert(b1.shape == (n_h, 1))
assert(W2.shape == (n_y, n_h))
assert(b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
parameters = initialize_parameters(3,2,1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: initialize_parameters_deep
def initialize_parameters_deep(layer_dims):
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
Wl -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
bl -- bias vector of shape (layer_dims[l], 1)
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = None
parameters['b' + str(l)] = None
### END CODE HERE ###
assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))
assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
parameters = initialize_parameters_deep([5,4,3])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: linear_forward
def linear_forward(A, W, b):
Implement the linear part of a layer's forward propagation.
Arguments:
A -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
Returns:
Z -- the input of the activation function, also called pre-activation parameter
cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently
### START CODE HERE ### (≈ 1 line of code)
Z = None
### END CODE HERE ###
assert(Z.shape == (W.shape[0], A.shape[1]))
cache = (A, W, b)
return Z, cache
A, W, b = linear_forward_test_case()
Z, linear_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
# GRADED FUNCTION: linear_activation_forward
def linear_activation_forward(A_prev, W, b, activation):
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = None
A, activation_cache = None
### END CODE HERE ###
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
### START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = None
A, activation_cache = None
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache)
return A, cache
A_prev, W, b = linear_activation_forward_test_case()
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "sigmoid")
print("With sigmoid: A = " + str(A))
A, linear_activation_cache = linear_activation_forward(A_prev, W, b, activation = "relu")
print("With ReLU: A = " + str(A))
# GRADED FUNCTION: L_model_forward
def L_model_forward(X, parameters):
Implement forward propagation for the [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID computation
Arguments:
X -- data, numpy array of shape (input size, number of examples)
parameters -- output of initialize_parameters_deep()
Returns:
AL -- last post-activation value
caches -- list of caches containing:
every cache of linear_relu_forward() (there are L-1 of them, indexed from 0 to L-2)
the cache of linear_sigmoid_forward() (there is one, indexed L-1)
caches = []
A = X
L = len(parameters) // 2 # number of layers in the neural network
# Implement [LINEAR -> RELU]*(L-1). Add "cache" to the "caches" list.
for l in range(1, L):
A_prev = A
### START CODE HERE ### (≈ 2 lines of code)
A, cache = None
### END CODE HERE ###
# Implement LINEAR -> SIGMOID. Add "cache" to the "caches" list.
### START CODE HERE ### (≈ 2 lines of code)
AL, cache = None
### END CODE HERE ###
assert(AL.shape == (1,X.shape[1]))
return AL, caches
X, parameters = L_model_forward_test_case_2hidden()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("Length of caches list = " + str(len(caches)))
# GRADED FUNCTION: compute_cost
def compute_cost(AL, Y):
Implement the cost function defined by equation (7).
Arguments:
AL -- probability vector corresponding to your label predictions, shape (1, number of examples)
Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples)
Returns:
cost -- cross-entropy cost
m = Y.shape[1]
# Compute loss from aL and y.
### START CODE HERE ### (≈ 1 lines of code)
cost = None
### END CODE HERE ###
cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17).
assert(cost.shape == ())
return cost
Y, AL = compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
# GRADED FUNCTION: linear_backward
def linear_backward(dZ, cache):
Implement the linear portion of backward propagation for a single layer (layer l)
Arguments:
dZ -- Gradient of the cost with respect to the linear output (of current layer l)
cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
A_prev, W, b = cache
m = A_prev.shape[1]
### START CODE HERE ### (≈ 3 lines of code)
dW = None
db = None
dA_prev = None
### END CODE HERE ###
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# Set up some test inputs
dZ, linear_cache = linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, linear_cache)
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: linear_activation_backward
def linear_activation_backward(dA, cache, activation):
Implement the backward propagation for the LINEAR->ACTIVATION layer.
Arguments:
dA -- post-activation gradient for current layer l
cache -- tuple of values (linear_cache, activation_cache) we store for computing backward propagation efficiently
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev
dW -- Gradient of the cost with respect to W (current layer l), same shape as W
db -- Gradient of the cost with respect to b (current layer l), same shape as b
linear_cache, activation_cache = cache
if activation == "relu":
### START CODE HERE ### (≈ 2 lines of code)
dZ = None
dA_prev, dW, db = None
### END CODE HERE ###
elif activation == "sigmoid":
### START CODE HERE ### (≈ 2 lines of code)
dZ = None
dA_prev, dW, db = None
### END CODE HERE ###
return dA_prev, dW, db
AL, linear_activation_cache = linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "sigmoid")
print ("sigmoid:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activation_cache, activation = "relu")
print ("relu:")
print ("dA_prev = "+ str(dA_prev))
print ("dW = " + str(dW))
print ("db = " + str(db))
# GRADED FUNCTION: L_model_backward
def L_model_backward(AL, Y, caches):
Implement the backward propagation for the [LINEAR->RELU] * (L-1) -> LINEAR -> SIGMOID group
Arguments:
AL -- probability vector, output of the forward propagation (L_model_forward())
Y -- true "label" vector (containing 0 if non-cat, 1 if cat)
caches -- list of caches containing:
every cache of linear_activation_forward() with "relu" (it's caches[l], for l in range(L-1) i.e l = 0...L-2)
the cache of linear_activation_forward() with "sigmoid" (it's caches[L-1])
Returns:
grads -- A dictionary with the gradients
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
grads = {}
L = len(caches) # the number of layers
m = AL.shape[1]
Y = Y.reshape(AL.shape) # after this line, Y is the same shape as AL
# Initializing the backpropagation
### START CODE HERE ### (1 line of code)
dAL = None
### END CODE HERE ###
# Lth layer (SIGMOID -> LINEAR) gradients. Inputs: "AL, Y, caches". Outputs: "grads["dAL"], grads["dWL"], grads["dbL"]
### START CODE HERE ### (approx. 2 lines)
current_cache = None
grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = None
### END CODE HERE ###
for l in reversed(range(L-1)):
# lth layer: (RELU -> LINEAR) gradients.
# Inputs: "grads["dA" + str(l + 2)], caches". Outputs: "grads["dA" + str(l + 1)] , grads["dW" + str(l + 1)] , grads["db" + str(l + 1)]
### START CODE HERE ### (approx. 5 lines)
current_cache = None
dA_prev_temp, dW_temp, db_temp = None
grads["dA" + str(l + 1)] = None
grads["dW" + str(l + 1)] = None
grads["db" + str(l + 1)] = None
### END CODE HERE ###
return grads
AL, Y_assess, caches = L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print_grads(grads)
# GRADED FUNCTION: update_parameters
def update_parameters(parameters, grads, learning_rate):
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of L_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters["W" + str(l)] = ...
parameters["b" + str(l)] = ...
L = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
### START CODE HERE ### (≈ 3 lines of code)
### END CODE HERE ###
return parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print ("W1 = "+ str(parameters["W1"]))
print ("b1 = "+ str(parameters["b1"]))
print ("W2 = "+ str(parameters["W2"]))
print ("b2 = "+ str(parameters["b2"]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 2 - Outline of the Assignment
Step4: Expected output
Step6: Expected output
Step8: Expected output
Step10: Expected output
Step12: <table style="width
Step14: Expected Output
Step16: Expected Output
Step18: Expected output with sigmoid
Step20: Expected Output
|
14,095
|
<ASSISTANT_TASK:>
Python Code:
import pyart
from matplotlib import pyplot as plt
import numpy as np
import os
import s3fs
from datetime import datetime as dt
%matplotlib inline
print(pyart.__version__)
import warnings
warnings.simplefilter("ignore", category=DeprecationWarning)
#warnings.simplefilter('ignore')
def get_latest_file(radar_id, bucket='noaa-nexrad-level2', engine='s3fs'):
Return latest NEXRAD data file name.
s3conn = s3fs.S3FileSystem(anon=True)
latest_year = os.path.join(
bucket, os.path.basename(s3conn.ls(bucket)[-1]))
latest_month = os.path.join(
latest_year, os.path.basename(s3conn.ls(latest_year)[-1]))
latest_day = os.path.join(
latest_month, os.path.basename(s3conn.ls(latest_month)[-1]))
return s3conn.ls(os.path.join(latest_day, radar_id))[-1]
cm_names = pyart.graph.cm._cmapnames
cms = pyart.graph.cm.cmap_d
nrows = len(cm_names)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
# Create a figure and axes instance
fig, axes = plt.subplots(nrows=nrows, figsize=(5,10))
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title('Py-Art Colormaps', fontsize=14)
# Loop through the possibilities
for nn, pymap in enumerate(cm_names):
axes[nn].imshow(gradient, aspect='auto', cmap=cms[pymap])
pos = list(axes[nn].get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, pymap, va='center', ha='right', fontsize=8)
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
### Plot a NEXRAD file
nexf = "data/KILN20140429_231254_V06"
nexr = pyart.io.read(nexf)
nexd = pyart.graph.RadarDisplay(nexr)
nexr.fields.keys()
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(16, 12))
nexd.plot('reflectivity', sweep=1, cmap='pyart_NWSRef', vmin=0., vmax=55., mask_outside=False, ax=ax[0, 0])
nexd.plot_range_rings([50, 100], ax=ax[0, 0])
nexd.set_limits((-150., 150.), (-150., 150.), ax=ax[0, 0])
nexd.plot('velocity', sweep=1, cmap='pyart_NWSVel', vmin=-30, vmax=30., mask_outside=False, ax=ax[0, 1])
nexd.plot_range_rings([50, 100], ax=ax[0, 1])
nexd.set_limits((-150., 150.), (-150., 150.), ax=ax[0, 1])
nexd.plot('cross_correlation_ratio', sweep=0, cmap='pyart_BrBu12', vmin=0.85, vmax=1., mask_outside=False, ax=ax[1, 0])
nexd.plot_range_rings([50, 100], ax=ax[0, 1])
nexd.set_limits((-150., 150.), (-150., 150.), ax=ax[0, 1])
nexd.plot('differential_reflectivity', sweep=0, cmap='pyart_BuDOr12', vmin=-2, vmax=2., mask_outside=False, ax=ax[1, 1])
nexd.plot_range_rings([50, 100], ax=ax[1, 1])
nexd.set_limits((-150., 150.), (-150., 150.), ax=ax[1, 1])
nexd.plot_azimuth_to_rhi('reflectivity', 305., cmap='pyart_NWSRef', vmin=0., vmax=55.)
nexd.set_limits((0., 150.), (0., 15.))
rhif = "data/noxp_rhi_140610232635.RAWHJFH"
rhir = pyart.io.read(rhif)
rhid = pyart.graph.RadarDisplay(rhir)
rhid.plot_rhi('reflectivity', 0, vmin=-5.0, vmax=70,)
nexmap = pyart.graph.RadarMapDisplayCartopy(nexr)
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
nexmap.plot_ppi_map('reflectivity', sweep=1, vmin=0., vmax=55., ax=ax)
nexmap.ax.set_extent([-87., -82., 37., 42.])
# %load solution.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizations with Py-ART
Step2: Py-ART Colormaps
Step3: The RadarDisplay
Step4: There are many keyword values we can employe to refine the plot
Step5: Py-ART RHI
Step6: Py-ART RadarMapDisplay or RadarMapDisplayCartopy
Step7: Use what you have learned!
|
14,096
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
matplotlib.use('nbagg')
%matplotlib inline
from glob import glob
from modulefinder import Module
from modulefinder import ModuleFinder
from os.path import dirname
from pprint import pprint
import sys
import trace
import urllib.request
import matplotlib.pyplot as plt
from IPython.core.display import Image
from pycallgraph import Config
from pycallgraph import GlobbingFilter
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
sys.path.append("../lib")
#from modarch import matplotlib_groupings
import modfind
import modgraph
from modutil import ls, rm
libdir = "../../.venv-mmpl/lib/python3.4/site-packages/matplotlib"
ls(libdir)
toplevel = glob(libdir + "/*.py")
modules = ["matplotlib" + x.split(libdir)[1] for x in toplevel]
len(modules)
pprint(modules)
modfile = "/__init__.py"
subs = [dirname(x) for x in glob(libdir + "/*" + modfile)]
pprint(["matplotlib" + x.split(libdir)[1] for x in subs])
ignore_output =[ls(libdir + x) for x in ["/backends", "/axes", "/projections"]]
pprint(matplotlib_groupings)
#! /usr/bin/env python3.4
import matplotlib.pyplot as plt
def main () -> None:
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.savefig('simple-line.png')
if __name__ == '__main__':
main()
finder = ModuleFinder()
finder.run_script('../scripts/simple-line.py')
len(finder.modules)
class CustomFinder(ModuleFinder):
def __init__(self, include:list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cf_include = include
def matches(self, name:str) -> bool:
if True in [name.startswith(x) for x in self.cf_include]:
return True
return False
def import_hook(self, name:str, caller:Module=None, fromlist:list=None,
level:int=-1) -> Module:
if self.matches(name):
super().import_hook(name, caller, fromlist, level)
finder = CustomFinder(["matpl", "mpl"])
finder.run_script('../scripts/simple-line.py')
len(finder.modules)
finder.modules.keys()
finder = modfind.CustomFinder()
finder.run_script('../scripts/simple-line.py')
len(finder.modules)
grapher = modgraph.ModGrapher(
source='../scripts/simple-line.py',
layout='neato')
grapher.render()
grapher.render(layout="twopi")
grapher.render(layout="twopi", labels=False)
grapher.render(layout="twopi", labels=False, mode="simple")
grapher.render(layout="neato", labels=True, mode="reduced-structure")
grapher.render(layout="neato", labels=True, mode="simple-structure")
grapher.render(layout="neato", labels=True, mode="full-structure")
#! /usr/bin/env python3.4
import matplotlib.pyplot as plt
def main () -> None:
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.savefig('simple-line.png')
if __name__ == '__main__':
main()
plt.rcParams['backend']
plt._backend_mod.__name__
plt._show
plt.plot([1,2,3,4])
plt.get_current_fig_manager()
plt.get_current_fig_manager().canvas
plt.get_current_fig_manager().canvas.figure
plt.gcf()
plt.gca()
plt.gca().lines
plt.gca().get_ylabel()
plt.ylabel('some numbers')
print(plt.gca().get_ylabel())
image_file = "simple-line.png"
if os.path.exists(image_file):
rm(image_file)
if os.path.exists(image_file):
ls("*.png")
plt.savefig(image_file)
if os.path.exists(image_file):
ls("*.png")
def hello_world():
print("Hello, World!")
config = Config(groups=False, trace_filter=None)
output = output=GraphvizOutput(output_file='callgraph.png')
with PyCallGraph(config=config, output=output):
hello_world()
Image(filename='callgraph.png')
with PyCallGraph(config=config, output=output):
urllib.request.urlopen('http://matplotlib.org/')
Image(filename='callgraph.png')
with PyCallGraph(config=config, output=output):
import matplotlib.pyplot as plt
def plotit():
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.show()
plotit()
Image(filename='callgraph.png')
def plotit():
plt.plot([1,2,3,4])
plt.ylabel('some numbers')
plt.show()
tracer = trace.Trace(countfuncs=1, countcallers=1)
_ = tracer.runfunc(plotit)
results = tracer.results()
_ = results.write_results(show_missing=True, summary=True, coverdir=".")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll bring in the modules we'll be using in this notebook
Step2: Now let's get some code we created espeically for this notebook
Step3: Modules and matplotlib import graphs
Step4: Let's get a cleaner view, though. Here are all the top-level matplotlib modules
Step5: And these are matplotlib's subpackages
Step6: Let's take a peek into some of these
Step7: matplotlib has an architecture that conceptually groups areas of functionality into the following layers
Step8: As you may notice, not all of the strings in the key/list pairs match exactly to matplotlib's modules or subpackages. That's because these strings are used to match beginnings of strings. Their intended use is in a call such as x.startswith(mod_name_part).
Step9: Super easy. But the purpose of this isn't to demonstrate something impressive with matplotlib. Rather, as we dig into the matplotlib modules, and in this case explore the module dependencies for our script, we want the simplest case possible so that we can focus on out goal
Step10: Now, the problem with this, is that it's going to pull in every dependency for every library used. We're going to see matplotlib, numpy, the Python standard library, etc. Let's just take a look at the module count, and not display all of them
Step11: So we're going to need to do some customization...
Step12: Let's give this a try, passing a list of strings that module names can start with (just the ones we're interested in)
Step13: That's more like it! Since it's not 1000s of modules, let's take a peek
Step14: We've created a custom finder very similar to the one above in the modfind module for this notebook. Try it out
Step15: As you can see, the loaded finder is a little more strict, having 3 fewer modules.
Step16: Well, that's a bit of a mess. Not so "neato". Usig the dot layout is worse. Let's try twopi
Step17: That's a little bit better, but we're still not that much closer to seeing some structure. If we turn off the lables, we might get a better sense of things
Step18: The way things are colored right now is fairly highly tweaked in the custom class
Step19: That's a little better, but I don't think we've really seen any structure revealed over what was visible in the previous rendering.
Step20: This definitely simplifies things! But if we can combine this with the simple mode, it might help us better see how the individual components are related
Step21: Now we're getting somewhere. What about with the full set of modules?
Step22: And there you have it
Step23: Now we will step through this on bit at a time
Step24: Or we can just use the pyplot utility functions
Step25: Let's clean up from any previous runs
Step26: Finally, we can save our image
Step27: Callgraphs
Step28: Now, let's generate a call graph for this function, and then display it
Step29: Pretty simple, eh? Not too much information there to ponder. Let's try something a little more involved
Step30: That's something to stare at for a while! Ready for the big one now? Let's do it!
Step31: trace
|
14,097
|
<ASSISTANT_TASK:>
Python Code:
from a301utils.a301_readfile import download
import h5py
filename = 'MYD021KM.A2016136.2015.006.2016138123353.h5'
download(filename)
from IPython.display import Image
Image(url='http://clouds.eos.ubc.ca/~phil/courses/atsc301/downloads/aqua_136_2015.jpg',width=600)
h5_file=h5py.File(filename)
print(list(h5_file.attrs.keys()))
print(h5_file.attrs['Earth-Sun Distance_GLOSDS'])
print(h5_file.attrs['HDFEOSVersion_GLOSDS'])
Image('screenshots/HDF_file_structure.png')
print(list(h5_file.keys()))
print(list(h5_file['MODIS_SWATH_Type_L1B'].keys()))
print(list(h5_file['MODIS_SWATH_Type_L1B/Data Fields'].keys()))
print(h5_file['MODIS_SWATH_Type_L1B/Data Fields/Band_1KM_Emissive'][...])
index31=10
my_name = 'EV_1KM_Emissive'
chan31=h5_file['MODIS_SWATH_Type_L1B']['Data Fields'][my_name][index31,:,:]
print(chan31.shape,chan31.dtype)
chan31[:3,:3]
scale=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_scales'][...]
print(scale)
offset=h5_file['MODIS_SWATH_Type_L1B']['Data Fields']['EV_1KM_Emissive'].attrs['radiance_offsets'][...]
print(offset)
chan31_calibrated =(chan31 - offset[index31])*scale[index31]
%matplotlib inline
import matplotlib.pyplot as plt
out=plt.hist(chan31.flatten())
#
# get the current axis to add title with gca()
#
ax = plt.gca()
_=ax.set(title='Aqua Modis raw counts')
import matplotlib.pyplot as plt
fig,ax = plt.subplots(1,1)
ax.hist(chan31_calibrated.flatten())
_=ax.set(xlabel='radiance $(W\,m^{-2}\,\mu m^{-1}\,sr^{-1}$)',
title='channel 31 radiance for Aqua Modis')
lon_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Longitude'][...]
lat_data=h5_file['MODIS_SWATH_Type_L1B']['Geolocation Fields']['Latitude'][...]
_=plt.plot(lon_data[:10,:10],lat_data[:10,:10],'b+')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here is the corresponding red,green,blue color composite for the granule.
Step2: now use h5py to read some of the satellite channels
Step3: h5 files have attributes -- stored as a dictionary
Step4: print two of the attributes
Step5: h5 files have variables -- stored in a dictionary.
Step6: Read the radiance data from MODIS_SWATH_Type_L1B/Data Fields/EV_1KM_Emissive
Step7: and the 'MODIS_SWATH_Type_L1B' group contains 3 subgroups
Step8: and the 'Data Fields' subgroup contains 27 more groups
Step9: Print out the 16 channel numbers stored in Band_1KM_Emissive data array. The [...] means "read everything". The 16 thermal channels are channels 20-36. Their wavelength ranges and common uses are listed
Step10: note that channel 31, which covers the wavelength range 10.78-11.28 $\mu m$ occurs at index value 10 (remember python counts from 0)
Step11: the data are stored as unsigned (i.e. only positive values), 2 byte (16 bit) integers which can hold values from 0 to $2^{16}$ - 1 = 65,535.
Step12: Print the first 3 rows and columns
Step13: we need to apply a
Step14: and here is the offset for 16 channels
Step15: note that as the satellite ages and wears out, these calibration coefficients change
Step16: histogram the raw counts -- note that hist doesn't know how to handle 2-dim arrays, so flatten to 1-d
Step17: histogram the calibrated radiances and show that they lie between
Step18: Next Read MODIS_SWATH_Type_L1B/Geolocation Fields/Longitude
|
14,098
|
<ASSISTANT_TASK:>
Python Code:
!pip freeze
%%capture
%load_ext autoreload
%autoreload 2
import sys
sys.path.append('model_management')
from model_management.sklearn_model import SklearnModel
import numpy as np
import pandas as pd
import h2o
from h2o.automl import H2OAutoML
from __future__ import print_function
import pandas_profiling
# Suppress unwatned warnings
import warnings
warnings.filterwarnings('ignore')
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
# Load our favorite visualization library
import os
import plotly
import plotly.plotly as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
import cufflinks as cf
plotly.offline.init_notebook_mode(connected=True)
# Sign into Plotly with masked, encrypted API key
myPlotlyKey = os.environ['SECRET_ENV_BRETTS_PLOTLY_KEY']
py.sign_in(username='bretto777',api_key=myPlotlyKey)
# Load some data
churnDF = pd.read_csv('https://trifactapro.s3.amazonaws.com/churn.csv', delimiter=',')
churnDF.head(5)
#%%capture
#pandas_profiling.ProfileReport(churnDF)
churnDF["Churn"] = churnDF["Churn"].replace([True, False],[1,0])
churnDF["Int'l Plan"] = churnDF["Int'l Plan"].replace(["no","yes"],[0,1])
churnDF["VMail Plan"] = churnDF["VMail Plan"].replace(["no","yes"],[0,1])
churnDF.drop(["State", "Area Code", "Phone"], axis=1, inplace=True)
%%capture
# Split data into training and testing frames
h2o.init(nthreads=1, max_mem_size="768m")
from sklearn import cross_validation
from sklearn.model_selection import train_test_split
training, testing = train_test_split(churnDF, train_size=0.8, stratify=churnDF["Churn"], random_state=9)
x_train = training.drop(["Churn"], axis = 1)
y_train = training["Churn"]
x_test = testing.drop(["Churn"], axis = 1)
y_test = testing["Churn"]
train = h2o.H2OFrame(python_obj=training)
test = h2o.H2OFrame(python_obj=testing)
train["Churn"] = train["Churn"].asfactor()
test["Churn"] = test["Churn"].asfactor()
# Set predictor and response variables
y = "Churn"
x = train.columns
x.remove(y)
x_train = x_train.values
y_train = y_train.values
x_test = x_test.values
y_test = y_test.values
from sklearn.ensemble import GradientBoostingClassifier
gbm = GradientBoostingClassifier(n_estimators=200,
learning_rate=.07,
max_depth=4,
random_state=0).fit(x_train,y_train)
model_gbm = SklearnModel(model=gbm,
problem_class='binary_classification',
description='GBM, 275 trees, learning rate .04, max depth 5',
name="GBM-4",
y_test = y_test,
x_test = x_test)
model_gbm.metrics()
model_gbm.save()
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression().fit(x_train,y_train)
model_LR = SklearnModel(model=lr,
problem_class='binary_classification',
description='sklearn logistic regression, default settings',
name="LR-2",
y_test = y_test,
x_test = x_test)
model_LR.metrics()
model_LR.save()
%%capture
# Run AutoML until 11 models are built
autoModel = H2OAutoML(max_models = 9)
autoModel.train(x = x, y = y,
training_frame = train,
validation_frame = test,
leaderboard_frame = test)
leaders = autoModel.leaderboard
leaders
importances = h2o.get_model(leaders[2, 0]).varimp(use_pandas=True)
importances = importances.loc[:,['variable','relative_importance']].groupby('variable').mean()
importances.sort_values(by="relative_importance", ascending=False).iplot(kind='bar', colors='#5AC4F2', theme='white')
import matplotlib.pyplot as plt
plt.figure()
bestModel = h2o.get_model(leaders[2, 0])
plt = bestModel.partial_plot(data=test, cols=["Day Mins","CustServ Calls","Day Charge"])
Model0 = np.array(h2o.get_model(leaders[0,0]).roc(xval=True))
Model1 = np.array(h2o.get_model(leaders[1,0]).roc(xval=True))
Model2 = np.array(h2o.get_model(leaders[2,0]).roc(xval=True))
Model3 = np.array(h2o.get_model(leaders[3,0]).roc(xval=True))
Model4 = np.array(h2o.get_model(leaders[4,0]).roc(xval=True))
Model5 = np.array(h2o.get_model(leaders[5,0]).roc(xval=True))
Model6 = np.array(h2o.get_model(leaders[6,0]).roc(xval=True))
Model7 = np.array(h2o.get_model(leaders[7,0]).roc(xval=True))
Model8 = np.array(h2o.get_model(leaders[8,0]).roc(xval=True))
Model9 = np.array(h2o.get_model(leaders[9,0]).roc(xval=True))
layout = go.Layout(autosize=False, width=725, height=575, xaxis=dict(title='False Positive Rate', titlefont=dict(family='Arial, sans-serif', size=15, color='grey')),
yaxis=dict(title='True Positive Rate', titlefont=dict(family='Arial, sans-serif', size=15, color='grey')))
traceChanceLine = go.Scatter(x = [0,1], y = [0,1], mode = 'lines+markers', name = 'chance', line = dict(color = ('rgb(136, 140, 150)'), width = 4, dash = 'dash'))
Model0Trace = go.Scatter(x = Model0[0], y = Model0[1], mode = 'lines', name = 'Model 0', line = dict(color = ('rgb(26, 58, 126)'), width = 3))
Model1Trace = go.Scatter(x = Model1[0], y = Model1[1], mode = 'lines', name = 'Model 1', line = dict(color = ('rgb(156, 190, 241))'), width = 1))
Model2Trace = go.Scatter(x = Model2[0], y = Model2[1], mode = 'lines', name = 'Model 2', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model3Trace = go.Scatter(x = Model3[0], y = Model3[1], mode = 'lines', name = 'Model 3', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model4Trace = go.Scatter(x = Model4[0], y = Model4[1], mode = 'lines', name = 'Model 4', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model5Trace = go.Scatter(x = Model5[0], y = Model5[1], mode = 'lines', name = 'Model 5', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model6Trace = go.Scatter(x = Model6[0], y = Model6[1], mode = 'lines', name = 'Model 6', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model7Trace = go.Scatter(x = Model7[0], y = Model7[1], mode = 'lines', name = 'Model 7', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model8Trace = go.Scatter(x = Model8[0], y = Model8[1], mode = 'lines', name = 'Model 8', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
Model9Trace = go.Scatter(x = Model9[0], y = Model9[1], mode = 'lines', name = 'Model 9', line = dict(color = ('rgb(156, 190, 241)'), width = 1))
fig = go.Figure(data=[Model0Trace,Model1Trace,Model2Trace,Model3Trace,Model4Trace,Model5Trace,Model6Trace,Model8Trace,Model9Trace,traceChanceLine], layout=layout)
py.iplot(fig)
cm = h2o.get_model(leaders[1, 0]).confusion_matrix(xval=True)
cm = cm.table.as_data_frame()
cm
confusionMatrix = ff.create_table(cm)
confusionMatrix.layout.height=300
confusionMatrix.layout.width=800
confusionMatrix.layout.font.size=17
py.iplot(confusionMatrix)
CorrectPredictChurn = cm.loc[1,'1']
CorrectPredictChurnImpact = 75
cm1 = CorrectPredictChurn*CorrectPredictChurnImpact
IncorrectPredictChurn = cm.loc[1,'0']
IncorrectPredictChurnImpact = -5
cm2 = IncorrectPredictChurn*IncorrectPredictChurnImpact
IncorrectPredictRetain = cm.loc[0,'1']
IncorrectPredictRetainImpact = -150
cm3 = IncorrectPredictRetain*IncorrectPredictRetainImpact
CorrectPredictRetain = cm.loc[0,'0']
CorrectPredictRetainImpact = 5
cm4 = IncorrectPredictRetain*CorrectPredictRetainImpact
data_matrix = [['Business Impact', '($) Predicted Churn', '($) Predicted Retain', '($) Total'],
['($) Actual Churn', cm1, cm3, '' ],
['($) Actual Retain', cm2, cm4, ''],
['($) Total', cm1+cm2, cm3+cm4, cm1+cm2+cm3+cm4]]
impactMatrix = ff.create_table(data_matrix, height_constant=20, hoverinfo='weight')
impactMatrix.layout.height=300
impactMatrix.layout.width=800
impactMatrix.layout.font.size=17
py.iplot(impactMatrix)
print("Total customers evaluated: 2132")
print("Total value created by the model: $" + str(cm1+cm2+cm3+cm4))
print("Total value per customer: $" +str(round(((cm1+cm2+cm3+cm4)/2132),3)))
#%%capture
# Save the best model
#path = h2o.save_model(model=h2o.get_model(leaders[0, 0]), force=True)
#os.rename(h2o.get_model(leaders[0, 0]).model_id, "AutoML-leader")
#%%capture
#LoadedEnsemble = h2o.load_model(path="AutoML-leader")
#print(LoadedEnsemble)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load The Dataset
Step2: Train a Model
Step3: Automatic Machine Learning
Step4: Leaderboard
Step5: Variable Importances
Step6: Best Model vs the Base Learners
Step7: Confusion Matrix
Step8: Business Impact Matrix
|
14,099
|
<ASSISTANT_TASK:>
Python Code:
from math import sin, cos
import matplotlib.pyplot as plt
import numpy as np
#from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
@np.vectorize
def any_function(x):
return (x)**2 + 3*sin(x) - 4*cos((x)**2)
x = np.arange(-5, 5, 0.1)
y = any_function(x)
curveFigure = plt.figure()
plt.plot(x,y, figure = curveFigure)
plt.show()
@np.vectorize
def numerical_derivative(x, f, h = 0.001):
return (f(x+h) - f(x-h))/(2.0*h)
ytick = numerical_derivative(x, any_function)
plt.plot(x,ytick)
plt.show()
@np.vectorize
def tangent(x, x_p, any_function):
y_p = any_function(x_p)
m = numerical_derivative(x_p, any_function)
y = m*(x - x_p) + y_p
return y
#tangent([min(x), max(x)], xn, any_function)
xn = 3
mu = 0.2
x_range = [min(x), max(x)]
y_range = tangent(x_range, xn, any_function)
plt.plot(x,y)
plt.plot(x_range, y_range, '-g')
plt.plot(xn, any_function(xn), '.r')
plt.ylim(min(y)-1, max(y)+1)
plt.show()
#mu = 0.9*mu
xnew = xn - mu * numerical_derivative(xn, any_function)
xn = xnew
print 'xn: %f, f(xn) = %f, f\'(xn) = %f' % (xn, any_function(xn), numerical_derivative(xn, any_function))
print 'mu = %f' % (mu)
plt.plot(x,y)
plt.plot(xn, any_function(xn), '.r')
plt.show()
# TODO: animate!
def decay_exp(mu_0, t, k):
return mu_0 * np.exp(-k*t)
def optimize_simple(f, x, mu, mudecay = decay_exp, k = 1, maxiter = 1000, eps = 0.001):
y = f(x)
i = 1
yn = np.inf
xhist = [x]
yhist = [y]
gradhist = [np.inf]
mu_act = mudecay(mu, 0, k)
while (not np.isclose(y, yn)) and (i < maxiter):
y = yn
ftick_x = numerical_derivative(x, f)
x = x - mu_act * ftick_x
yn = f(x)
xhist.append(x)
yhist.append(yn)
gradhist.append(ftick_x)
mu_act = mudecay(mu, i, k)
i += 1
return xhist, yhist, gradhist
plt.plot(x,y)
xhist, yhist, gradhist = optimize_simple(any_function, 2, 0.2)
print len(xhist)
plt.plot(xhist, yhist, '.r')
plt.show()
@interact(x_in = (-4, 4, 0.1), mu = (0.01, 1, 0.01), k = (0.01, 5, 0.01))
def interactive_optim(x_in, mu, k):
xhist, yhist, gradhist = optimize_simple(any_function, x_in, mu, k=k)
xx = np.arange(min(xhist), max(xhist), 0.01)
yy = any_function(xx)
print len(xhist)
plt.plot(xx,yy)
plt.plot(xhist, yhist, '.r')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we need to find the deviation. A very simple and popular method is using symmetric difference.
Step2: Now, we look for an $x$ for which $f'(x) = 0$. Seems like a difficult function to optimize since there are many values where this is the case.
Step3: This example shows very nicely that altought we are close at finding the minimum we still have trouble to converge. This is due to the fixed learning rate. Hence, it is sensible to reduce the learning rate with time, e.g. by using exponetial decay $\mu = \mu_0*e^{-kt}$ (see for others).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.