text_prompt
stringlengths
168
30.3k
code_prompt
stringlengths
67
124k
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Explore usage of %ferret_run line magic Step2: Another example
<ASSISTANT_TASK:> Python Code: %load_ext ferretmagic for i in [100,500,1000]: %ferret_run -s 300,300 'set text/font=arial ; plot/thick=2 sin(i[i=1:%(i)s]*0.1)' % locals() %%ferret use levitus_climatology for i in range(1,3): %ferret_run -q -s 400,300 'set text/font=arial ; fill salt[k=%(i)s] ; go land' % locals() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Install the latest GA version of google-cloud-storage library as well. Step2: Install the latest GA version of KFP SDK library as well. Step3: Restart the kernel Step4: Check the versions of the packages you installed. The KFP SDK version should be >=1.6. Step5: Before you begin Step6: Region Step7: Timestamp Step8: Authenticate your Google Cloud account Step9: Create a Cloud Storage bucket Step10: Only if your bucket doesn't already exist Step11: Finally, validate access to your Cloud Storage bucket by examining its contents Step12: Service Account Step13: Set service account access for Vertex AI Pipelines Step14: Set up variables Step15: Vertex AI Pipelines constants Step16: Additional imports. Step17: Initialize Vertex AI SDK for Python Step18: Define pipeline components using scikit-learn Step19: Define iris_sgdclassifier component Step20: Define iris_logregression component Step21: Define the pipeline Step22: Compile the pipeline Step23: Run the pipeline Step24: Click on the generated link to see your run in the Cloud Console. Step25: When both pipeline runs have finished, compare their results by navigating to the pipeline runs list in the Cloud Console, selecting both of them, and clicking COMPARE at the top of the Console panel. Step26: Plot parallel coordinates of parameters and metrics Step27: Plot ROC curve and calculate AUC number Step28: Cleaning up
<ASSISTANT_TASK:> Python Code: import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG ! pip3 install -U google-cloud-storage $USER_FLAG ! pip3 install $USER kfp --upgrade if os.getenv("IS_TESTING"): ! pip3 install --upgrade matplotlib $USER_FLAG import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))" PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID REGION = "us-central1" # @param {type: "string"} from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP ! gsutil mb -l $REGION $BUCKET_NAME ! gsutil ls -al $BUCKET_NAME SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"} if ( SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]" ): # Get your GCP project id from gcloud shell_output = !gcloud auth list 2>/dev/null SERVICE_ACCOUNT = shell_output[2].strip() print("Service Account:", SERVICE_ACCOUNT) ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME import google.cloud.aiplatform as aip PIPELINE_ROOT = "{}/pipeline_root/iris".format(BUCKET_NAME) from kfp.v2 import dsl from kfp.v2.dsl import ClassificationMetrics, Metrics, Output, component aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME) @component( packages_to_install=["sklearn"], base_image="python:3.9", output_component_file="wine_classification_component.yaml", ) def wine_classification(wmetrics: Output[ClassificationMetrics]): from sklearn.datasets import load_wine from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import roc_curve from sklearn.model_selection import cross_val_predict, train_test_split X, y = load_wine(return_X_y=True) # Binary classification problem for label 1. y = y == 1 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) rfc = RandomForestClassifier(n_estimators=10, random_state=42) rfc.fit(X_train, y_train) y_scores = cross_val_predict(rfc, X_train, y_train, cv=3, method="predict_proba") fpr, tpr, thresholds = roc_curve( y_true=y_train, y_score=y_scores[:, 1], pos_label=True ) wmetrics.log_roc_curve(fpr, tpr, thresholds) @component(packages_to_install=["sklearn"], base_image="python:3.9") def iris_sgdclassifier( test_samples_fraction: float, metricsc: Output[ClassificationMetrics], ): from sklearn import datasets, model_selection from sklearn.linear_model import SGDClassifier from sklearn.metrics import confusion_matrix iris_dataset = datasets.load_iris() train_x, test_x, train_y, test_y = model_selection.train_test_split( iris_dataset["data"], iris_dataset["target"], test_size=test_samples_fraction, ) classifier = SGDClassifier() classifier.fit(train_x, train_y) predictions = model_selection.cross_val_predict(classifier, train_x, train_y, cv=3) metricsc.log_confusion_matrix( ["Setosa", "Versicolour", "Virginica"], confusion_matrix( train_y, predictions ).tolist(), # .tolist() to convert np array to list. ) @component( packages_to_install=["sklearn"], base_image="python:3.9", ) def iris_logregression( input_seed: int, split_count: int, metrics: Output[Metrics], ): from sklearn import datasets, model_selection from sklearn.linear_model import LogisticRegression # Load digits dataset iris = datasets.load_iris() # # Create feature matrix X = iris.data # Create target vector y = iris.target # test size test_size = 0.20 # cross-validation settings kfold = model_selection.KFold( n_splits=split_count, random_state=input_seed, shuffle=True ) # Model instance model = LogisticRegression() scoring = "accuracy" results = model_selection.cross_val_score(model, X, y, cv=kfold, scoring=scoring) print(f"results: {results}") # split data X_train, X_test, y_train, y_test = model_selection.train_test_split( X, y, test_size=test_size, random_state=input_seed ) # fit model model.fit(X_train, y_train) # accuracy on test set result = model.score(X_test, y_test) print(f"result: {result}") metrics.log_metric("accuracy", (result * 100.0)) PIPELINE_NAME = "metrics-pipeline-v2" @dsl.pipeline( # Default pipeline root. You can override it when submitting the pipeline. pipeline_root=PIPELINE_ROOT, # A name for the pipeline. name="metrics-pipeline-v2", ) def pipeline(seed: int, splits: int): wine_classification_op = wine_classification() # noqa: F841 iris_logregression_op = iris_logregression( # noqa: F841 input_seed=seed, split_count=splits ) iris_sgdclassifier_op = iris_sgdclassifier(test_samples_fraction=0.3) # noqa: F841 from kfp.v2 import compiler # noqa: F811 compiler.Compiler().compile( pipeline_func=pipeline, package_path="tabular classification_pipeline.json".replace(" ", "_"), ) DISPLAY_NAME = "iris_" + TIMESTAMP job = aip.PipelineJob( display_name=DISPLAY_NAME, template_path="tabular classification_pipeline.json".replace(" ", "_"), job_id=f"tabular classification-v2{TIMESTAMP}-1".replace(" ", ""), pipeline_root=PIPELINE_ROOT, parameter_values={"seed": 7, "splits": 10}, ) job.run() job = aip.PipelineJob( display_name="iris_" + TIMESTAMP, template_path="tabular classification_pipeline.json".replace(" ", "_"), job_id=f"tabular classification-pipeline-v2{TIMESTAMP}-2".replace(" ", ""), pipeline_root=PIPELINE_ROOT, parameter_values={"seed": 5, "splits": 7}, ) job.run() pipeline_df = aip.get_pipeline_df(pipeline=PIPELINE_NAME) print(pipeline_df.head(2)) import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.rcParams["figure.figsize"] = [15, 5] pipeline_df["param.input:seed"] = pipeline_df["param.input:seed"].astype(np.float16) pipeline_df["param.input:splits"] = pipeline_df["param.input:splits"].astype(np.float16) ax = pd.plotting.parallel_coordinates( pipeline_df.reset_index(level=0), "run_name", cols=["param.input:seed", "param.input:splits", "metric.accuracy"], ) ax.set_yscale("symlog") ax.legend(bbox_to_anchor=(1.0, 0.5)) try: df = pd.DataFrame(pipeline_df["metric.confidenceMetrics"][0]) auc = np.trapz(df["recall"], df["falsePositiveRate"]) plt.plot(df["falsePositiveRate"], df["recall"], label="auc=" + str(auc)) plt.legend(loc=4) plt.show() except Exception as e: print(e) delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True try: if delete_model and "DISPLAY_NAME" in globals(): models = aip.Model.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) model = models[0] aip.Model.delete(model) print("Deleted model:", model) except Exception as e: print(e) try: if delete_endpoint and "DISPLAY_NAME" in globals(): endpoints = aip.Endpoint.list( filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time" ) endpoint = endpoints[0] endpoint.undeploy_all() aip.Endpoint.delete(endpoint.resource_name) print("Deleted endpoint:", endpoint) except Exception as e: print(e) if delete_dataset and "DISPLAY_NAME" in globals(): if "tabular" == "tabular": try: datasets = aip.TabularDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TabularDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "image": try: datasets = aip.ImageDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.ImageDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "text": try: datasets = aip.TextDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.TextDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) if "tabular" == "video": try: datasets = aip.VideoDataset.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) dataset = datasets[0] aip.VideoDataset.delete(dataset.resource_name) print("Deleted dataset:", dataset) except Exception as e: print(e) try: if delete_pipeline and "DISPLAY_NAME" in globals(): pipelines = aip.PipelineJob.list( filter=f"display_name={DISPLAY_NAME}", order_by="create_time" ) pipeline = pipelines[0] aip.PipelineJob.delete(pipeline.resource_name) print("Deleted pipeline:", pipeline) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: nltk์—์„œ ์ œ๊ณต๋˜๋Š” gutenberg data read Step3: Tokenize Step4: PoS tagging Step5: Alphabetical list of part-of-speech tags used in the Penn Treebank Project Step6: ์‹ค์Šต 1 Step7: Stemming (or lemmatizing) Step8: Collection library Step9: ๊ฒฐ๊ณผ๋ฅผ ์‚ดํŽด๋ณด๋ฉด ','๊ณผ '.' ๊ฐ™์ด ๋‹จ์–ด๊ฐ€ ์•„๋‹Œ ๋ฌธ์ž๊ฐ€ ๋†’์€ frequency๋ฅผ ๊ฐ€์ง€๊ณ  ์žˆ์Œ Step10: ์‹ค์Šต 3 - ๋ช…์‚ฌ๋งŒ ์ถ”์ถœํ•˜์—ฌ ๊ณ ์œ ํ•œ ๋ฆฌ์ŠคํŠธ๋ฅผ ๋งŒ๋“ค์–ด ๋ณด์ž
<ASSISTANT_TASK:> Python Code: import nltk nltk.download('gutenberg') # nltk.download('maxent_treebank_pos_tagger') from nltk.corpus import gutenberg # ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๋ฐ์ดํ„ฐ ๋กœ๋“œ ๋ฐ ํŒŒ์ผ ์ œ๋ชฉ ํ™•์ธ gutenberg_files = gutenberg.fileids() gutenberg_files # ํŠน์ • ํ…์ŠคํŠธ ํ™•์ธ gutenberg_doc = gutenberg.open('austen-emma.txt').read() print(gutenberg_doc[:200]) # tokenize - ๋„์–ด์“ฐ๊ธฐ ๊ธฐ์ค€์œผ๋กœ ๋‹จ์–ด๋ฅผ ๋ถ„๋ฆฌํ•˜์—ฌ list ํ˜•ํƒœ๋กœ ์ €์žฅ sentence = At eight o'clock on Thursday morning ... Arthur didn't feel very good. tokens = nltk.word_tokenize(sentence) tokens # Pos tagging - token ๋‹จ์œ„๋กœ Pos๋ฅผ ์ถ”๊ฐ€ํ•˜์—ฌ tuple - list ํ˜•ํƒœ๋กœ ์ €์žฅ tagged = nltk.pos_tag(tokens) tagged for word in tagged: print(word) for word in tagged: if word[1][0] == 'N': print(word[0].lower()+'/'+word[1]) # ๊ฐ™์€ ํ‘œํ˜„ = list comprehension tagged_word = [word[0].lower()+'/'+word[1] for word in tagged if word[1][0] == 'N'] # ๊ฒฐ๊ณผ ํ™•์ธ tagged_word gutenberg_tokens = nltk.word_tokenize(gutenberg_doc) gutenberg_tagged = nltk.pos_tag(gutenberg_tokens) # ์‹œ๊ฐ„์„ ์ฒดํฌํ•˜๊ณ  ์‹ถ๋‹ค๋ฉด time library๋ฅผ ์‚ฌ์šฉํ•ด๋ณด์ž import time start_time = time.time() ## ์‹คํ–‰ ์ฝ”๋“œ gutenberg_tokens = nltk.word_tokenize(gutenberg_doc) gutenberg_tagged = nltk.pos_tag(gutenberg_tokens) print("Processed time = ",(time.time() - start_time)) gutenberg_tokens[:10] gutenberg_tagged[:10] gutenberg_doc = gutenberg.open('austen-sense.txt').read() gutenberg_tokens = nltk.word_tokenize(gutenberg_doc) gutenberg_tagged = nltk.pos_tag(gutenberg_tokens) gutenberg_tokens[:10] gutenberg_tagged[:10] # lemmatization lemma = nltk.wordnet.WordNetLemmatizer() gutenberg_lemma = [] # ๋ถ„๋ฆฌํ•œ token์— ๋Œ€ํ•˜์—ฌ nltk lemmatizing ํ•˜๊ณ  ๊ทธ ๊ฒฐ๊ณผ๋ฅผ lemma list์— ์ถ”๊ฐ€ for token in gutenberg_tokens: gutenberg_lemma.append(lemma.lemmatize(token)) gutenberg_lemma[:20] # tokenizing -> lemmatizing -> PoS tagging gutenberg_lemma_tagged = nltk.pos_tag(gutenberg_lemma) gutenberg_lemma_tagged[:20] # stemming from nltk.stem.porter import PorterStemmer porter_stemmer = PorterStemmer() gutenberg_stemmed = [] for token in gutenberg_tokens: gutenberg_stemmed.append(porter_stemmer.stem(token)) gutenberg_stemmed[:20] start_time = time.time() gutenberg_stemmed_tagged = nltk.pos_tag(gutenberg_stemmed) print(time.time() - start_time) #gutenberg_stemmed_tagged # compare Stemming & Lemmatization print(porter_stemmer.stem('running')) print(lemma.lemmatize('running')) import collections # token์œผ๋กœ ๋‚˜๋ˆ ์ง„ ๋ฐ์ดํ„ฐ๋ฅผ ๋”•์…”๋„ˆ๋ฆฌ ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜ + ํŽธ๋ฆฌํ•œ ํ•จ์ˆ˜๋ฅผ ์ œ๊ณตํ•˜๋Š” library import numpy as np import pandas as pd import matplotlib.pyplot as plt # ๊ฒฐ๊ณผ๋ฅผ ์‹œ๊ฐํ™” ํ•˜๊ธฐ ์œ„ํ•œ matplotlib %matplotlib inline # token - frequency ํ˜•ํƒœ๋กœ ๋ณ€ํ™˜ : collections.Counter ํ•จ์ˆ˜! print(collections.Counter(gutenberg_stemmed_tagged).most_common(50)) # tuple ํ˜•ํƒœ๋กœ ์ €์žฅ๋˜์–ด ์žˆ๋Š” ๋ฐ์ดํ„ฐ๋ฅผ token, frequency๋กœ ๋‚˜๋ˆ ์„œ ์ €์žฅ token_list = [] freq_list = [] for token, freq in collections.Counter(gutenberg_stemmed_tagged).most_common(10): token_list.append(token) freq_list.append(freq) print(token_list[:4]) print(freq_list[:4]) # list๋กœ ๋‚˜๋ˆˆ ๋ฐ์ดํ„ฐ๋ฅผ pandas ํ˜•ํƒœ๋กœ ์ €์žฅ data = pd.concat([pd.DataFrame(token_list),pd.DataFrame(freq_list)], axis=1) data.columns = ['word','tag','freq'] data.head() # word index ๋Œ€์‹  word๋ฅผ ๋ณด์—ฌ์ฃผ๋Š” ๊ทธ๋ž˜ํ”„ freqdist = nltk.FreqDist(gutenberg_lemma_tagged) freqdist.plot(50) freqdist.plot(50,cumulative=True) # nltk์—์„œ ์ œ๊ณต๋˜๋Š” stop word ์‚ฌ์šฉ from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) for index, ele in enumerate(stop_words): if index<20: print(index,ele) # domain specific stop-words๋ฅผ updateํ•˜์—ฌ ์‚ฌ์šฉํ•  ์ˆ˜ ๋„ ์žˆ์Œ stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}']) # ๋Œ€์†Œ๋ฌธ์ž๊ฐ€ ๋‹ค๋ฅด๊ฒŒ ์ธ์‹๋˜๊ธฐ ๋•Œ๋ฌธ์— lowercase๋กœ ๋ณ€ํ™˜ํ•˜์—ฌ ์‚ฌ์šฉ filtered_words = [word[0].lower() for word in gutenberg_lemma_tagged if word[0].lower() not in stop_words] filtered_tag = [word[1].lower() for word in gutenberg_lemma_tagged if word[0].lower() not in stop_words] filtered_words[:10] # stop word list์— 'mr.' ์ถ”๊ฐ€ํ•˜๋ฉด ์—†์–ด์ง freqdist = nltk.FreqDist(filtered_words) freqdist.plot(50) freqdist.plot(50,cumulative=True) # Dataframe ์„ค์ • result = pd.DataFrame() result['filtered_word'] = filtered_words result['filtered_freq'] = filtered_tag # csv ํ˜•ํƒœ๋กœ ์ €์žฅ result.to_csv("filtered_word.csv") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Language Translation Step3: Explore the Data Step6: Implement Preprocessing Function Step8: Preprocess all the data and save it Step10: Check Point Step12: Check the Version of TensorFlow and Access to GPU Step15: Build the Neural Network Step18: Process Decoding Input Step21: Encoding Step24: Decoding - Training Step27: Decoding - Inference Step30: Build the Decoding Layer Step33: Build the Neural Network Step34: Neural Network Training Step36: Build the Graph Step39: Train Step41: Save Parameters Step43: Checkpoint Step46: Sentence to Sequence Step48: Translate
<ASSISTANT_TASK:> Python Code: DON'T MODIFY ANYTHING IN THIS CELL import helper import problem_unittests as tests source_path = 'data/small_vocab_en' target_path = 'data/small_vocab_fr' source_text = helper.load_data(source_path) target_text = helper.load_data(target_path) view_sentence_range = (0, 10) DON'T MODIFY ANYTHING IN THIS CELL import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()}))) sentences = source_text.split('\n') word_counts = [len(sentence.split()) for sentence in sentences] print('Number of sentences: {}'.format(len(sentences))) print('Average number of words in a sentence: {}'.format(np.average(word_counts))) print() print('English sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) print() print('French sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int): Convert source and target text to proper word ids :param source_text: String that contains all the source text. :param target_text: String that contains all the target text. :param source_vocab_to_int: Dictionary to go from the source words to an id :param target_vocab_to_int: Dictionary to go from the target words to an id :return: A tuple of lists (source_id_text, target_id_text) # TODO: Implement Function source_text_int = [[source_vocab_to_int.get(word) for word in sentence.split()] for sentence in source_text.split('\n')] target_text_int = [[target_vocab_to_int.get(word) for word in sentence.split()]+[target_vocab_to_int.get('<EOS>')] for sentence in target_text.split('\n')] return source_text_int, target_text_int DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_text_to_ids(text_to_ids) DON'T MODIFY ANYTHING IN THIS CELL helper.preprocess_and_save_data(source_path, target_path, text_to_ids) DON'T MODIFY ANYTHING IN THIS CELL import numpy as np import helper (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() DON'T MODIFY ANYTHING IN THIS CELL from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) def model_inputs(): Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate, keep probability) # TODO: Implement Function input_placeholder = tf.placeholder(tf.int32, [None, None], name='input') targets_placeholder = tf.placeholder(tf.int32, [None, None]) learning_rate = tf.placeholder(tf.float32, name='learning_rate') probability = tf.placeholder(tf.float32, name='keep_prob') return input_placeholder, targets_placeholder, learning_rate, probability DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_model_inputs(model_inputs) def process_decoding_input(target_data, target_vocab_to_int, batch_size): Preprocess target data for decoding :param target_data: Target Placeholder :param target_vocab_to_int: Dictionary to go from the target words to an id :param batch_size: Batch Size :return: Preprocessed target data # TODO: Implement Function sliced_data = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1]) concat_data = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), sliced_data], 1) return concat_data DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_process_decoding_input(process_decoding_input) def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob): Create encoding layer :param rnn_inputs: Inputs for the RNN :param rnn_size: RNN Size :param num_layers: Number of layers :param keep_prob: Dropout keep probability :return: RNN state # TODO: Implement Function LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size) rnn_cell = tf.contrib.rnn.MultiRNNCell([LSTM]*num_layers) dropout_cell = tf.contrib.rnn.DropoutWrapper(rnn_cell, output_keep_prob=keep_prob) # def make_cell(rnn_size): # cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) # cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob) # return cell # rnn_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) enc_output, enc_state = tf.nn.dynamic_rnn(dropout_cell, rnn_inputs, dtype=tf.float32) return enc_state DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_encoding_layer(encoding_layer) def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob): Create a decoding layer for training :param encoder_state: Encoder State :param dec_cell: Decoder RNN Cell :param dec_embed_input: Decoder embedded input :param sequence_length: Sequence Length :param decoding_scope: TenorFlow Variable Scope for decoding :param output_fn: Function to apply the output layer :param keep_prob: Dropout keep probability :return: Train Logits dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state) outputs_train, state_train, _ = tf.contrib.seq2seq.dynamic_rnn_decoder( dec_cell, decoder_fn=dynamic_fn_train, inputs=dec_embed_input, sequence_length=sequence_length, scope=decoding_scope) output_train_dropout = tf.nn.dropout(outputs_train, keep_prob=keep_prob) logits = output_fn(output_train_dropout) # TODO: Implement Function return logits DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_decoding_layer_train(decoding_layer_train) def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob): Create a decoding layer for inference :param encoder_state: Encoder state :param dec_cell: Decoder RNN Cell :param dec_embeddings: Decoder embeddings :param start_of_sequence_id: GO ID :param end_of_sequence_id: EOS Id :param maximum_length: The maximum allowed time steps to decode :param vocab_size: Size of vocabulary :param decoding_scope: TensorFlow Variable Scope for decoding :param output_fn: Function to apply the output layer :param keep_prob: Dropout keep probability :return: Inference Logits # TODO: Implement Function dynamic_fn_inference = tf.contrib.seq2seq.simple_decoder_fn_inference( output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, dtype=tf.int32) logits, state, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, dynamic_fn_inference, scope=decoding_scope) return logits DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_decoding_layer_infer(decoding_layer_infer) def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob): Create decoding layer :param dec_embed_input: Decoder embedded input :param dec_embeddings: Decoder embeddings :param encoder_state: The encoded state :param vocab_size: Size of vocabulary :param sequence_length: Sequence Length :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :param keep_prob: Dropout keep probability :return: Tuple of (Training Logits, Inference Logits) # TODO: Implement Function start_of_sequence_id = target_vocab_to_int['<GO>'] end_of_sequence_id = target_vocab_to_int['<EOS>'] LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size) rnn_cell = tf.contrib.rnn.MultiRNNCell([LSTM]*num_layers) dropout_wrapper_cell = tf.contrib.rnn.DropoutWrapper(rnn_cell, output_keep_prob=keep_prob) # def make_cell(rnn_size): # cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_normal_initializer(-0.1, 0.1, seed=2)) # return cell # multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)]) # dropout_wrapper_cell = tf.contrib.rnn.DropoutWrapper(multi_rnn_cell, output_keep_prob=keep_prob) with tf.variable_scope('decoding') as decoding_scope: output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope) train_logits = decoding_layer_train(encoder_state, dropout_wrapper_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) # train_logits = output_fn(train_output) with tf.variable_scope('decoding', reuse=True) as decoding_scope: inference_logits = decoding_layer_infer(encoder_state, dropout_wrapper_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, sequence_length, vocab_size, decoding_scope, output_fn, keep_prob) # inference_logits = output_fn(inference_output) return train_logits, inference_logits DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_decoding_layer(decoding_layer) def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size, enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int): Build the Sequence-to-Sequence part of the neural network :param input_data: Input placeholder :param target_data: Target placeholder :param keep_prob: Dropout keep probability placeholder :param batch_size: Batch Size :param sequence_length: Sequence Length :param source_vocab_size: Source vocabulary size :param target_vocab_size: Target vocabulary size :param enc_embedding_size: Decoder embedding size :param dec_embedding_size: Encoder embedding size :param rnn_size: RNN Size :param num_layers: Number of layers :param target_vocab_to_int: Dictionary to go from the target words to an id :return: Tuple of (Training Logits, Inference Logits) # TODO: Implement Function enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size) enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob) dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size) dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) dec_train_logits, dec_inference_logits = decoding_layer( dec_embed_input, dec_embeddings, enc_state, target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob) return dec_train_logits, dec_inference_logits DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_seq2seq_model(seq2seq_model) # Number of Epochs epochs = 15 # Batch Size batch_size = 256 # RNN Size rnn_size = 128 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 64 decoding_embedding_size = 64 # Learning Rate learning_rate = 0.001 # Dropout Keep Probability keep_probability = 0.5 DON'T MODIFY ANYTHING IN THIS CELL save_path = 'checkpoints/dev' (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess() max_source_sentence_length = max([len(sentence) for sentence in source_int_text]) train_graph = tf.Graph() with train_graph.as_default(): input_data, targets, lr, keep_prob = model_inputs() sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = seq2seq_model( tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) tf.identity(inference_logits, 'logits') with tf.name_scope("optimization"): # Loss function cost = tf.contrib.seq2seq.sequence_loss( train_logits, targets, tf.ones([input_shape[0], sequence_length])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) DON'T MODIFY ANYTHING IN THIS CELL import time def get_accuracy(target, logits): Calculate accuracy max_seq = max(target.shape[1], logits.shape[1]) if max_seq - target.shape[1]: target = np.pad( target, [(0,0),(0,max_seq - target.shape[1])], 'constant') if max_seq - logits.shape[1]: logits = np.pad( logits, [(0,0),(0,max_seq - logits.shape[1]), (0,0)], 'constant') return np.mean(np.equal(target, np.argmax(logits, 2))) train_source = source_int_text[batch_size:] train_target = target_int_text[batch_size:] valid_source = helper.pad_sentence_batch(source_int_text[:batch_size]) valid_target = helper.pad_sentence_batch(target_int_text[:batch_size]) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch) in enumerate( helper.batch_data(train_source, train_target, batch_size)): start_time = time.time() _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate, sequence_length: target_batch.shape[1], keep_prob: keep_probability}) batch_train_logits = sess.run( inference_logits, {input_data: source_batch, keep_prob: 1.0}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_source, keep_prob: 1.0}) train_acc = get_accuracy(target_batch, batch_train_logits) valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits) end_time = time.time() print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}' .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_path) print('Model Trained and Saved') DON'T MODIFY ANYTHING IN THIS CELL # Save parameters for checkpoint helper.save_params(save_path) DON'T MODIFY ANYTHING IN THIS CELL import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess() load_path = helper.load_params() def sentence_to_seq(sentence, vocab_to_int): Convert a sentence to a sequence of ids :param sentence: String :param vocab_to_int: Dictionary to go from the words to an id :return: List of word ids # TODO: Implement Function lower_word_array = sentence.lower().split(' ') lower_word_array_int = [vocab_to_int[word] if vocab_to_int.get(word, -1) >= 0 else vocab_to_int['<UNK>'] for word in lower_word_array] return lower_word_array_int DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE tests.test_sentence_to_seq(sentence_to_seq) translate_sentence = 'he saw a old yellow truck .' DON'T MODIFY ANYTHING IN THIS CELL translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_path + '.meta') loader.restore(sess, load_path) input_data = loaded_graph.get_tensor_by_name('input:0') logits = loaded_graph.get_tensor_by_name('logits:0') keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0] print('Input') print(' Word Ids: {}'.format([i for i in translate_sentence])) print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)])) print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)])) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The easiest way to demonstrate how clustering works is to simply generate some data and show them in action. Step2: To generate our data, we're going to pick 6 random points, which we'll call centroids, and for each point we're going to generate 250 random points about it. Step3: Below we can see each centroid marked w/ X, and the coloring associated to each respective cluster. Step4: K means Step5: Inital "random" guesses, based on the data. Step6: Updated centroids after one iteration. Step7: The result are centroids that have minimized the total distance between all points and their centroids; the centroids are "optimal" in this sense. Step8: In our implementation, we choose the bandwidth to be 2.5. Step9: We can see that mean shift clustering has almost reproduced our original clustering. The one exception are the very close clusters, but if we really wanted to differentiate them we could lower the bandwidth. Step10: We should be able to accelerate this algorithm with a GPU. Step11: The advantage of pytorch is that it's very similar to numpy. Step12: Torch does not support broadcasting, therefore Jeremy has replaced the distance subtraction line with a subtraction function from his custom pytorch broadcasting library. Step13: This implementation actually takes longer. Step14: All the computation is happening in the <tt>for</tt> loop, which isn't accelerated by pytorch. Step15: GPU Step16: Although each iteration still has to launch a new cuda kernel, there are now fewer iterations, and the acceleration from updating a batch of points more than makes up for it. Step17: LSH
<ASSISTANT_TASK:> Python Code: import kmeans; reload(kmeans) from kmeans import Kmeans n_clusters=6 n_samples =250 centroids = np.random.uniform(-35, 35, (n_clusters, 2)) slices = [np.random.multivariate_normal(centroids[i], np.diag([5., 5.]), n_samples) for i in range(n_clusters)] data = np.concatenate(slices).astype(np.float32) kmeans.plot_data(centroids, data, n_samples) k = Kmeans(data, n_clusters) sess = tf.InteractiveSession() tf.global_variables_initializer().run() initial_centroids = k.find_initial_centroids(n_clusters).eval() kmeans.plot_data(initial_centroids, data, n_samples) curr_centroids = tf.Variable(initial_centroids) nearest_indices = k.assign_to_nearest(curr_centroids) updated_centroids = k.update_centroids(nearest_indices) tf.global_variables_initializer().run() kmeans.plot_data(updated_centroids.eval(), data, n_samples) curr_centroids.assign(updated_centroids) with tf.Session().as_default(): new_centroids = k.run() kmeans.plot_data(new_centroids, data, n_samples) def gaussian(d, bw): return np.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi)) def meanshift(data): X = np.copy(data) for it in range(5): for i, x in enumerate(X): dist = np.sqrt(((x-X)**2).sum(1)) weight = gaussian(dist, 2.5) X[i] = (np.expand_dims(weight,1)*X).sum(0) / weight.sum() return X %time X=meanshift(data) kmeans.plot_data(centroids+2, X, n_samples) import torch_utils; reload(torch_utils) from torch_utils import * def gaussian(d, bw): return torch.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi)) # - Original # def meanshift(data): # X = torch.FloatTensor(np.copy(data)) # for it in range(5): # for i, x in enumerate(X): # dist = torch.sqrt((sub(x, X)**2).sum(1)) # weight = gaussian(dist, 3) # num = mul(weight, X).sum(0) # X[i] = num / weight.sum() # return X # - Modified def meanshift(data): X = torch.FloatTensor(np.copy(data)) for it in range(5): for i, x in enumerate(X): dist = torch.sqrt((sub(x, X)**2).sum(1)) weight = gaussian(dist, 3).unsqueeze(1) num = mul(weight, X).sum(0).unsqueeze(0) X[i] = num / weight.sum() return X %time X = meanshift(data).numpy() X kmeans.plot_data(centroids+2, X, n_samples) def dist_b(a,b): return torch.sqrt((sub(a.unsqueeze(0),b.unsqueeze(1))**2).sum(2)) a=torch.rand(2,2) b=torch.rand(3,2) # dist_b(b, a).squeeze(2) # - not sure why .squeeze(2) dist_b(b, a) def gaussian(d, bw): return torch.exp(-0.5*((d/bw))**2) / (bw*math.sqrt(2*math.pi)) def sum_sqz(a,axis): return a.sum(axis).squeeze(axis) # - Original # def meanshift(data, bs=500): # n = len(data) # X = torch.FloatTensor(np.copy(data)).cuda() # for it in range(5): # for i in range(0,n,bs): # s = slice(i,min(n,i+bs)) # weight = gaussian(dist_b(X, X[s]), 2) # num = sum_sqz(mul(weight, X), 1) # X[s] = div(num, sum_sqz(weight, 1)) # return X # - Modified def meanshift(data, bs=500): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() for it in range(5): for i in range(0,n,bs): s = slice(i,min(n,i+bs)) weight = gaussian(dist_b(X, X[s]), 2).unsqueeze(2) # - Adapted to obtain working shape num = sum_sqz(mul(weight, X), 1) X[s] = div(num, sum_sqz(weight, 1).unsqueeze(1)) # - Adapted to obtain working shape return X %time X = meanshift(data).cpu().numpy() kmeans.plot_data(centroids+2, X, n_samples) from sklearn.neighbors import LSHForest, KDTree, BallTree n_clusters=6 n_samples =2500 centroids = np.random.uniform(-35, 35, (n_clusters, 2)) slices = [np.random.multivariate_normal(centroids[i], np.diag([5., 5.]), n_samples) for i in range(n_clusters)] data = np.concatenate(slices).astype(np.float32) nn = KDTree(data) nearest = nn.query(data[:10], 3, False); nearest nn = BallTree(data) nearest = nn.query(data[:10], 3, False); nearest kmeans.plot_data(centroids, data, n_samples) def index_b(a,idxs): ir, ic = idxs.size() ar, ac = a.size() return a[idxs.view(ir*ic)].view(ir,ic,ac) a = FT([[1,2],[3.,4],[5,6]]) b = torch.LongTensor([[0,1], [1,2]]) exp = FT([[[1,2], [3,4.]], [[3,4], [5,6]]]) assert(torch.equal(index_b(a,b), exp)) def dist_b_n(a,b,pts): dists = sub(pts,b.unsqueeze(1))**2 return torch.sqrt(dists.sum(2)) # - Original # def meanshift(data, bs=512): # n = len(data) # X = torch.FloatTensor(np.copy(data)).cuda() # for it in range(5): # d = X.cpu().numpy() # nn = BallTree(d) # for i in range(0,n,bs): # s = slice(i,min(n,i+bs)) # nearest = torch.LongTensor(nn.query(d[s], 50, False)).cuda() # pts = index_b(X, nearest) # weight = gaussian(dist_b_n(X, X[s], pts), 2) # num = sum_sqz(mul(weight, pts), 1) # X[s] = div(num, sum_sqz(weight, 1)) # return X # - Modified def meanshift(data, bs=512): n = len(data) X = torch.FloatTensor(np.copy(data)).cuda() for it in range(5): d = X.cpu().numpy() nn = BallTree(d) for i in range(0,n,bs): s = slice(i,min(n,i+bs)) nearest = torch.LongTensor(nn.query(d[s], 50, False)).cuda() pts = index_b(X, nearest) weight = gaussian(dist_b_n(X, X[s], pts), 2).unsqueeze(2) num = sum_sqz(mul(weight, pts), 1) X[s] = div(num, sum_sqz(weight, 1).unsqueeze(1)) return X %time data = meanshift(data).cpu().numpy() kmeans.plot_data(centroids+1, data, n_samples) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: I hadn't bothered to look if the relevant scikit-learn functions actually accepted sparse matrices when I was just playing, so I did the row normalization myself by hand. Step2: Again with the hand-rolled normalisation. It was not hard in this case. Step3: Instead of LargeVis we can just use t-SNE. Some caveats Step4: Clustering looks pretty much the same as it did in the final version. I played with parameters a little here, and also looked at leaf clustering as the cluster extraction method. In practice, however, the standard Excess of Mass approach was more than adequate. Step5: Onto the Bokeh plotting. This was still just experimenting with mapping and clustering so I hadn't honed down the plot code much. I don't do nice colormapping, for instance, but instead plot the noise and cluster points separately. There is also no adjustment of alpha channels based on zoom levels. It was good enough to view the map and mouse over regions to see how well things worked. Step6: The final real test was simply print out the contents of the clusters and see if they made sense. For the most part they are pretty good, but they are less good than what LargeVis provided, with more clusters for which there aren't clear topics. Feel free to do exactly this for the LargeVis version and you'll see what I mean.
<ASSISTANT_TASK:> Python Code: import pandas as pd import scipy.sparse as ss import numpy as np from sklearn.decomposition import TruncatedSVD import sklearn.manifold import tsne import re raw_data = pd.read_csv('subreddit-overlap') raw_data.head() subreddit_popularity = raw_data.groupby('t2_subreddit')['NumOverlaps'].sum() subreddits = np.array(subreddit_popularity.sort_values(ascending=False).index) index_map = dict(np.vstack([subreddits, np.arange(subreddits.shape[0])]).T) count_matrix = ss.coo_matrix((raw_data.NumOverlaps, (raw_data.t2_subreddit.map(index_map), raw_data.t1_subreddit.map(index_map))), shape=(subreddits.shape[0], subreddits.shape[0]), dtype=np.float64) count_matrix conditional_prob_matrix = count_matrix.tocsr() row_sums = np.array(conditional_prob_matrix.sum(axis=1))[:,0] row_indices, col_indices = conditional_prob_matrix.nonzero() conditional_prob_matrix.data /= row_sums[row_indices] reduced_vectors = TruncatedSVD(n_components=500, random_state=0).fit_transform(conditional_prob_matrix) reduced_vectors /= np.sqrt((reduced_vectors**2).sum(axis=1))[:, np.newaxis] seed_state = np.random.RandomState(0) subreddit_map = tsne.bh_sne(reduced_vectors[:10000], perplexity=50.0, random_state=seed_state) subreddit_map_df = pd.DataFrame(subreddit_map, columns=('x', 'y')) subreddit_map_df['subreddit'] = subreddits[:10000] subreddit_map_df.head() import hdbscan clusterer = hdbscan.HDBSCAN(min_samples=5, min_cluster_size=20).fit(subreddit_map) cluster_ids = clusterer.labels_ subreddit_map_df['cluster'] = cluster_ids from bokeh.plotting import figure, show, output_notebook, output_file from bokeh.models import HoverTool, ColumnDataSource, value from bokeh.models.mappers import LinearColorMapper from bokeh.palettes import viridis from collections import OrderedDict output_notebook() color_mapper = LinearColorMapper(palette=viridis(256), low=0, high=cluster_ids.max()) color_dict = {'field': 'cluster', 'transform': color_mapper} plot_data_clusters = ColumnDataSource(subreddit_map_df[subreddit_map_df.cluster >= 0]) plot_data_noise = ColumnDataSource(subreddit_map_df[subreddit_map_df.cluster < 0]) tsne_plot = figure(title=u'A Map of Subreddits', plot_width = 700, plot_height = 700, tools= (u'pan, wheel_zoom, box_zoom,' u'box_select, resize, reset'), active_scroll=u'wheel_zoom') tsne_plot.add_tools( HoverTool(tooltips = OrderedDict([('subreddit', '@subreddit'), ('cluster', '@cluster')]))) # draw clusters tsne_plot.circle(u'x', u'y', source=plot_data_clusters, fill_color=color_dict, line_alpha=0.002, fill_alpha=0.1, size=10, hover_line_color=u'black') # draw noise tsne_plot.circle(u'x', u'y', source=plot_data_noise, fill_color=u'gray', line_alpha=0.002, fill_alpha=0.05, size=10, hover_line_color=u'black') # configure visual elements of the plot tsne_plot.title.text_font_size = value(u'16pt') tsne_plot.xaxis.visible = False tsne_plot.yaxis.visible = False tsne_plot.grid.grid_line_color = None tsne_plot.outline_line_color = None show(tsne_plot); def is_nsfw(subreddit): return re.search(r'(nsfw|gonewild)', subreddit) for cid in range(cluster_ids.max() + 1): subreddits = subreddit_map_df.subreddit[cluster_ids == cid] if np.any(subreddits.map(is_nsfw)): subreddits = ' ... Censored ...' else: subreddits = subreddits.values print '\nCluster {}:\n{}\n'.format(cid, subreddits) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Por conveniรชncia, o valor da entropia para esses dois casos รฉ definida como 0.0. Step2: Nos dois primeiros casos, entropia(0.0) e entropia(1.0), a funรงรฃo irรก verificar que p contรฉm um dos valores especiais e irรก retornar simplesmente zero. Step3: Exercรญcio 02 Step4: Exercรญcio 03 Step5: Exercรญcio 04 Step7: Vamos criar agora o jogo do Pedra-Papel-Tesoura. Step8: O texto escrito logo apรณs a definiรงรฃo da funรงรฃo, envolvidos entre sequรชncia de 3 aspas duplas รฉ chamado de docstring. Step9: Exercรญcio 05 Step11: Exercรญcio 06
<ASSISTANT_TASK:> Python Code: from math import log def entropia(p): return -p*log(p,2) - (1.0-p)*log(1.0-p,2) print(entropia(1.0), entropia(0.0)) def entropia(p): if p == 0 or p == 1: return 0.0 else: return -p*log(p,2) - (1.0-p)*log(1.0-p,2) print(entropia(0.0), entropia(1.0), entropia(0.5)) # crie a funรงรฃo aqui: # casos de teste print(MediaPonderada(10, 9, 0.3)) # resposta = 9.3 print(MediaPonderada(5,7,1.0)) # resposta = 5 print(MediaPonderada(3,7,2.0)) # resposta = 5 # crie a funรงรฃo aqui corte = 8 media = MediaPonderada(10, 9, 0.3) print (media, Conceito(media, corte)) # 9.3, A media = MediaPonderada(5,7,0.3) print (media, Conceito(media, corte)) # 6.4, F # crie a funรงรฃo aqui def Faltas(faltas, limiar): corte = 14 faltas = Faltas(10, corte) print (faltas) # NO faltas = Faltas(20, corte) print (faltas) # O # crie a funรงรฃo aqui media = MediaPonderada(10, 9, 0.3) conceito = Conceito(media, 8) faltas = Faltas(10, 14) print(ConceitoFinal(conceito, faltas)) # A conceito = Conceito(0.0, 8) print(ConceitoFinal(conceito,faltas)) # F faltas = Faltas(20, 14) print(ConceitoFinal(conceito,faltas)) # O def PedraPapelTesoura(J1, J2): Determina o vencedor de uma partida de pedra-papel-tesoura. Retorna -1 caso o jogador 1 tenha vencido, 0 em caso de empate e +1 caso o jogador 2 venรงa print(PedraPapelTesoura("pedra", "tesoura"), PedraPapelTesoura("pedra", "pedra"), PedraPapelTesoura("tesoura", "pedra")) help(PedraPapelTesoura) import random def CaraCoroa(palpite): numero = random.random() print(CaraCoroa("cara")) print(CaraCoroa("cara")) print(CaraCoroa("cara")) print(CaraCoroa("cara")) def CaraCoroa(palpite): numero = random.random() # usando apenas um if-else print(CaraCoroa("cara")) print(CaraCoroa("cara")) print(CaraCoroa("cara")) print(CaraCoroa("cara")) def PedraPapelTesoura(J1): Determina o vencedor de uma partida de pedra-papel-tesoura. Retorna -1 caso o jogador 1 tenha vencido, 0 em caso de empate e +1 caso o jogador 2 venรงa J2 = random.choice([ "pedra", "papel", "tesoura"]) print(PedraPapelTesoura("pedra")) print(PedraPapelTesoura("pedra")) print(PedraPapelTesoura("pedra")) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: This is what the general algorithm for star outputs, given an automaton A and s being a new state. Step2: Examples Step3: General
<ASSISTANT_TASK:> Python Code: import vcsn %%automaton a context = "lan_char, b" $ -> s s -> A \e A -> s \e s -> $ ctx = vcsn.context('lal_char, q') aut = lambda e: ctx.expression(e).standard() aut('a+b').star("standard") aut('a+b').star("general") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Neural Network Step2: Initialize Weights Step3: As you can see the accuracy is close to guessing for both zeros and ones, around 10%. Step4: The histogram used 500 buckets for the 1000 values. Since the chance for any single bucket is the same, there should be around 2 values for each bucket. That's exactly what we see with the histogram. Some buckets have more and some have less, but they trend around 2. Step5: The loss graph is showing the neural network is learning, which it didn't with all zeros or all ones. We're headed in the right direction. Step6: We're going in the right direction, the accuracy and loss is better with [-1, 1). We still want smaller weights. How far can we go before it's too small? Step7: Looks like anything [-0.01, 0.01) or smaller is too small. Let's compare this to our typical rule of using the range $y=1/\sqrt{n}$. Step8: The range we found and $y=1/\sqrt{n}$ are really close. Step9: Let's compare the normal distribution against the previous uniform distribution. Step10: The normal distribution gave a slight increasse in accuracy and loss. Let's move closer to 0 and drop picked numbers that are x number of standard deviations away. This distribution is called Truncated Normal Distribution. Step11: Again, let's compare the previous results with the previous distribution. Step12: There's no difference between the two, but that's because the neural network we're using is too small. A larger neural network will pick more points on the normal distribution, increasing the likelihood it's choices are larger than 2 standard deviations.
<ASSISTANT_TASK:> Python Code: %matplotlib inline import tensorflow as tf import helper from tensorflow.examples.tutorials.mnist import input_data print('Getting MNIST Dataset...') mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print('Data Extracted.') # Save the shapes of weights for each layer layer_1_weight_shape = (mnist.train.images.shape[1], 256) layer_2_weight_shape = (256, 128) layer_3_weight_shape = (128, mnist.train.labels.shape[1]) print(mnist.train.images.shape) all_zero_weights = [ tf.Variable(tf.zeros(layer_1_weight_shape)), tf.Variable(tf.zeros(layer_2_weight_shape)), tf.Variable(tf.zeros(layer_3_weight_shape)) ] all_one_weights = [ tf.Variable(tf.ones(layer_1_weight_shape)), tf.Variable(tf.ones(layer_2_weight_shape)), tf.Variable(tf.ones(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'All Zeros vs All Ones', [ (all_zero_weights, 'All Zeros'), (all_one_weights, 'All Ones')]) helper.hist_dist('Random Uniform (minval=-3, maxval=3)', tf.random_uniform([1000], -3, 3)) # Default for tf.random_uniform is minval=0 and maxval=1 basline_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape)), tf.Variable(tf.random_uniform(layer_2_weight_shape)), tf.Variable(tf.random_uniform(layer_3_weight_shape)) ] helper.compare_init_weights( mnist, 'Baseline', [(basline_weights, 'tf.random_uniform [0, 1)')]) uniform_neg1to1_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1, 1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1, 1)) ] helper.compare_init_weights( mnist, '[0, 1) vs [-1, 1)', [ (basline_weights, 'tf.random_uniform [0, 1)'), (uniform_neg1to1_weights, 'tf.random_uniform [-1, 1)')]) uniform_neg01to01_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.1, 0.1)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.1, 0.1)) ] uniform_neg001to001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.01, 0.01)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.01, 0.01)) ] uniform_neg0001to0001_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_2_weight_shape, -0.001, 0.001)), tf.Variable(tf.random_uniform(layer_3_weight_shape, -0.001, 0.001)) ] helper.compare_init_weights( mnist, '[-1, 1) vs [-0.1, 0.1) vs [-0.01, 0.01) vs [-0.001, 0.001)', [ (uniform_neg1to1_weights, '[-1, 1)'), (uniform_neg01to01_weights, '[-0.1, 0.1)'), (uniform_neg001to001_weights, '[-0.01, 0.01)'), (uniform_neg0001to0001_weights, '[-0.001, 0.001)')], plot_n_batches=None) import numpy as np general_rule_weights = [ tf.Variable(tf.random_uniform(layer_1_weight_shape, -1/np.sqrt(layer_1_weight_shape[0]), 1/np.sqrt(layer_1_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_2_weight_shape, -1/np.sqrt(layer_2_weight_shape[0]), 1/np.sqrt(layer_2_weight_shape[0]))), tf.Variable(tf.random_uniform(layer_3_weight_shape, -1/np.sqrt(layer_3_weight_shape[0]), 1/np.sqrt(layer_3_weight_shape[0]))) ] helper.compare_init_weights( mnist, '[-0.1, 0.1) vs General Rule', [ (uniform_neg01to01_weights, '[-0.1, 0.1)'), (general_rule_weights, 'General Rule')], plot_n_batches=None) helper.hist_dist('Random Normal (mean=0.0, stddev=1.0)', tf.random_normal([1000])) normal_01_weights = [ tf.Variable(tf.random_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.random_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Uniform [-0.1, 0.1) vs Normal stddev 0.1', [ (uniform_neg01to01_weights, 'Uniform [-0.1, 0.1)'), (normal_01_weights, 'Normal stddev 0.1')]) helper.hist_dist('Truncated Normal (mean=0.0, stddev=1.0)', tf.truncated_normal([1000])) trunc_normal_01_weights = [ tf.Variable(tf.truncated_normal(layer_1_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_2_weight_shape, stddev=0.1)), tf.Variable(tf.truncated_normal(layer_3_weight_shape, stddev=0.1)) ] helper.compare_init_weights( mnist, 'Normal vs Truncated Normal', [ (normal_01_weights, 'Normal'), (trunc_normal_01_weights, 'Truncated Normal')]) helper.compare_init_weights( mnist, 'Baseline vs Truncated Normal', [ (basline_weights, 'Baseline'), (trunc_normal_01_weights, 'Truncated Normal')]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 4.2 Policy Improvement Step2: 4.3 Policy Iteration Step3: 4.4 Value Iteration Step4: 4.5 Asynchronouse Dynamic Programming
<ASSISTANT_TASK:> Python Code: Image('./res/iterative_policy_evaluation.png') Image('./res/ex4_1.png') class Action(enum.Enum): EAST = enum.auto() WEST = enum.auto() SOUTH = enum.auto() NORTH = enum.auto() @staticmethod def move(x, y, action): if action == Action.EAST: return x, y - 1 elif action == Action.WEST: return x, y + 1 elif action == Action.SOUTH: return x + 1, y elif action == Action.NORTH: return x - 1, y class GridWorld(object): def move(self, s, action): if s == 0 or s == 15: return s, 0 elif 0 < s < 15: x = s // 4 y = s % 4 x1, y1 = Action.move(x, y, action) if 0 <= x1 < 4 and 0 <= y1 < 4: s1 = x1 * 4 + y1 return s1, -1 else: return s, -1 else: raise ValueError('s {} must be in [0, 15]'.format(s)) class RandomPolicy(object): def __init__(self, grid_world): self._grid_world = grid_world self._v = np.zeros((4, 4)) self._v_flatten = self._v.ravel() self._delta = 0 def iterate(self): v = self._v.copy() for s in range(0, 16): self.update_value(s) self._delta = max(self._delta, np.sum(np.abs(v - self._v))) return self._v.copy() def get_pi(self, s): return [(0.25, (s, a)) for a in [Action.EAST, Action.WEST, Action.SOUTH, Action.NORTH]] def update_value(self, s): # V(s) = \sum_a \pi(a | s) \sum 1 * (r + 1 * V(s1)) vs = [] for (prob, (s, a)) in self.get_pi(s): s1, r = self._grid_world.move(s, a) vs.append(prob * (r + self._v_flatten[s1])) logger.debug('vs: {}'.format(vs)) self._v_flatten[s] = np.sum(vs) # logger.setLevel(logging.DEBUG) r = RandomPolicy(GridWorld()) for _ in range(100): r.iterate() pd.DataFrame(np.round(r.iterate())) Image('./res/fig4_1.png') Image('./res/policy_iteration.png') Image('./res/fig4_2.png') Image('./res/value_iteration.png') Image('./res/gpi.png') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Define some common variables Step2: 1. Comon Queries for Clinical Analysis Step3: Proband information Step4: Check the interpretation id of a case Step5: Inspect the Interpretation object Step6: Check Reported pathogenic variants in a case interpretation and list the variant tier Step7: Retrieve the annotation for the reported variants Step8: PanelApp panels applied in the original analysis Step9: 2. Use Case Step10: Now you can select one random case id for the subsequent analysis Step11: 2. Retrieve the interpretation id/s from the seleted case Step12: 3. Retrieve reported variants and the annotation, including tiering Step13: Obtain annotation from variants reported in a interpretation from a case as a JSON object Step14: Obtain tiering
<ASSISTANT_TASK:> Python Code: ## Step 1. Import pyopencga dependecies from pyopencga.opencga_config import ClientConfiguration # import configuration module from pyopencga.opencga_client import OpencgaClient # import client module from pprint import pprint from IPython.display import JSON import matplotlib.pyplot as plt import seaborn as sns import pandas as pd ## Step 2. User credentials user = 'demouser' #################################### ## Step 3. Create the ClientConfiguration dict host = 'http://bioinfo.hpc.cam.ac.uk/opencga-prod' config_dict = {'rest': { 'host': host } } ## Step 4. Create the ClientConfiguration and OpenCGA client config = ClientConfiguration(config_dict) oc = OpencgaClient(config) ## Step 5. Login to OpenCGA using the OpenCGA client- add password when prompted oc.login(user) print('Logged succesfuly to {}, your token is: {} well done!'.format(host, oc.token)) # Define the study id study = 'reanalysis:rd38' # Define a clinicalCaseId case_id = 'OPA-10044-1' # Define a interpretationId interpretation_id = 'OPA-10044-1__2' ## Query using the clinical search web service cases_search = oc.clinical.search(study=study, include='id,type,proband,description,panels,interpretation', limit=5) cases_search.print_results(title='Cases found for study {}'.format(study), fields='id,type,proband.id,panels.id,interpretation.id') ## Uncomment next line to display an interactive JSON viewer # JSON(cases_search.get_results()) ## Query using the clinical info web service disorder_search = oc.clinical.search(study=study, include='id,type,proband', limit=5) disorder_search.print_results(title='Disorders and phenotypes', fields='id,type,proband.id') disorder_object = disorder_search.get_results()[0]['proband'] ## Uncomment next line to display an interactive JSON viewer # JSON(disorder_object) # Query using the clinical info web service clinical_info = oc.clinical.info(clinical_analysis=case_id, study=study) clinical_info.print_results(fields='id,interpretation.id,type,proband.id') ## Uncomment next line to display an interactive JSON viewer # JSON(clinical_info.get_results()[0]['interpretation']) ## Query using the clinical info_interpretation web service interpretation_object = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', study=study).get_results() ## Uncomment next line to display an interactive JSON viewer # JSON(interpretation_object) ## Query using the clinical info_interpretation web service interpretation_stats = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='stats', study=study).get_results()[0]['stats']['primaryFindings'] ## Uncomment next line to display an interactive JSON viewer # JSON(interpretation_stats) ## Query using the clinical info_interpretation web service variant_annotation = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='primaryFindings.annotation', study=study).get_results()[0]['primaryFindings'] ## Uncomment next line to display an interactive JSON viewer # JSON(variant_annotation) cases_search = oc.clinical.search(study=study, include='id,panels', limit= 5) cases_search.print_results(title='Cases found for study {}'.format(study), fields='id,panels.id') ## Uncomment next line to display an interactive JSON viewer # JSON(cases_search.get_results()) ## Search the cases cases_search = oc.clinical.search(study=study, limit=3) ## Uncomment next line to display an interactive JSON viewer # JSON(cases_search.get_results()) ## Define an empty list to keep the case ids: case_ids = [] ## Iterate over the cases and retrieve the ids: for case in oc.clinical.search(study=study, include='id').result_iterator(): case_ids.append(case['id']) ## Uncomment for printing the list with all the case ids # print(case_ids) ## Select a random case from the list import random if case_ids != []: print('There are {} cases in study {}'.format(len(case_ids), study)) selected_case = random.choice(case_ids) print('Case selected for analysis is {}'.format(selected_case)) else: print('There are no cases in the study', study) ## Query using the clinical info web service interpretation_info = oc.clinical.info(clinical_analysis=selected_case, study=study) interpretation_info.print_results(fields='id,interpretation.id,type,proband.id') ## Select interpretation object interpretation_object = interpretation_info.get_results()[0]['interpretation'] ## Select interpretation id interpretation_id = interpretation_info.get_results()[0]['interpretation']['id'] ## Uncomment next line to display an interactive JSON viewer # JSON(interpretation_object) print('The interpretation id for case {} is {}'.format(selected_case, interpretation_object['id'] )) ## Query using the clinical info_interpretation web service interpretation_stats = oc.clinical.info_interpretation(interpretations=interpretation_id, include='stats', study=study).get_results()[0]['stats']['primaryFindings'] ## Uncomment next line to display an interactive JSON viewer # JSON(interpretation_stats) ## Query using the clinical info_interpretation web service primary_findings = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study).get_results()[0]['primaryFindings'] ## Uncomment next line to display an interactive JSON viewer # JSON(primary_findings) ## Perform the query variants_reported = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study) ## Define empty list to store the variants, genes and the tiering variant_list = [] gene_id_list=[] genename_list=[] tier_list =[] for variant in variants_reported.get_results()[0]['primaryFindings']: variant_id = variant['id'] variant_list.append(variant_id) gene_id = variant['evidences'][0]['genomicFeature']['id'] gene_id_list.append(gene_id) gene_name = variant['evidences'][0]['genomicFeature']['geneName'] genename_list.append(gene_name) tier = variant['evidences'][0]['classification']['tier'] tier_list.append(tier) ## Construct a Dataframe and return the first 5 rows df = pd.DataFrame(data = {'variant_id':variant_list, 'gene_id':gene_id_list, 'gene_name':genename_list, 'tier': tier_list}) df.head() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Example Step2: Inline visualisation Step3: DFT example - persistent connection, checkpointing Step4: File-based interfaces vs. Native interfaces Step5: Wrapping Castep with f90wrap Step6: Single point calculation Step7: Interactive introspection Step8: Visualise charge density isosurfaces on-the-fly Step9: Postprocessing/steering of running calculations Step10: So far this is just analysis/post-processing, but could easily go beyond this and steer calculations based on results of e.g. Bader analysis. Step11: Example - geometry optimisation Step12: Developing and testing new high-level algorithms
<ASSISTANT_TASK:> Python Code: %pylab inline import numpy as np from chemview import enable_notebook from matscipy.visualise import view enable_notebook() from ase.lattice import bulk from ase.optimize import LBFGSLineSearch from quippy.potential import Potential si = bulk('Si', a=5.44, cubic=True) sw_pot = Potential('IP SW') # call into Fortran code si.set_calculator(sw_pot) e_bulk_per_atom = si.get_potential_energy()/len(si) vac = si * (3, 3, 3) del vac[len(vac)/2] vac.set_calculator(sw_pot) p0 = vac.get_positions() opt = LBFGSLineSearch(vac) opt.run(fmax=1e-3) p1 = vac.get_positions() u = p1 - p0 e_vac = vac.get_potential_energy() print 'SW vacancy formation energy', e_vac - e_bulk_per_atom*len(vac), 'eV' view(vac, np.sqrt(u**2).sum(axis=1), bonds=False) import distutils.spawn as spawn from matscipy.socketcalc import SocketCalculator, VaspClient from matscipy.checkpoint import CheckpointCalculator from ase.lattice import bulk from ase.optimize import FIRE mpirun = spawn.find_executable('mpirun') vasp = spawn.find_executable('vasp') vasp_client = VaspClient(client_id=0, npj=2, ppn=12, exe=vasp, mpirun=mpirun, parmode='mpi', lwave=False, lcharg=False, ibrion=13, xc='PBE', kpts=[2,2,2]) vasp = SocketCalculator(vasp_client) chk_vasp = CheckpointCalculator(vasp, 'vasp_checkpoint.db') si = bulk('Si', a=5.44, cubic=True) si.set_calculator(chk_vasp) e_bulk_per_atom = si.get_potential_energy()/len(si) vac3 = si.copy() del vac3[0] vac3.set_calculator(chk_vasp) opt = FIRE(vac3) opt.run(fmax=1e-3) e_vac3 = vac3.get_potential_energy() print 'VASP vacancy formation energy', e_vac3 - e_bulk_per_atom*len(vac3), 'eV' from gpaw import restart si, gpaw = restart('si-vac.gpw') rho = gpaw.get_pseudo_density() atom = 5 plot(si.positions[:, 0], si.positions[:, 1], 'k.', ms=20) plot(si.positions[atom, 0], si.positions[5, 1], 'g.', ms=20) imshow(rho[:,:,0], extent=[0, si.cell[0,0], 0, si.cell[1,1]]) import bader bdr = bader.bader(si, rho) bdr.nvols # collect Bader volumes associated with atom #5 mask = np.zeros_like(rho, dtype=bool) for v in (bdr.nnion == atom+1).nonzero()[0]: mask[bdr.volnum == v+1] = True plot(si.positions[:, 0], si.positions[:, 1], 'k.', ms=20) plot(si.positions[atom, 0], si.positions[5, 1], 'g.', ms=20) imshow(rho[:,:,0], extent=[0, si.cell[0,0], 0, si.cell[1,1]]) imshow(mask[:,:,0], extent=[0, si.cell[0,0], 0, si.cell[1,1]], alpha=.6) import castep #castep. #castep.cell.Unit_Cell. castep.model.model_wave_read? from ase.lattice.cubic import Diamond atoms = Diamond('Si') calc = castep.calculator.CastepCalculator(atoms=atoms) atoms.set_calculator(calc) e = atoms.get_potential_energy() f = atoms.get_forces() print 'Energy', e, 'eV' print 'Forces (eV/A):' print f #calc.model.eigenvalues #calc.model.wvfn.coeffs #calc.model.cell.ionic_positions.T #calc.model.wvfn. #calc.parameters.cut_off_energy figsize(8,6) plot(castep.ion.get_array_core_radial_charge()) plot(castep.ion.get_array_atomic_radial_charge()) ylim(-0.5,0.5) # grid points, in Angstrom real_grid = (castep.basis.get_array_r_real_grid()* castep.io.io_atomic_to_unit(1.0, 'ang')) resolution = [castep.basis.get_ngx(), castep.basis.get_ngy(), castep.basis.get_ngz()] origin = np.array([real_grid[i, :].min() for i in range(3)]) extent = np.array([real_grid[i, :].max() for i in range(3)]) - origin # charge density resulting from SCF den = calc.model.den.real_charge.copy() den3 = (den.reshape(resolution, order='F') / castep.basis.get_total_fine_grid_points()) # visualise system with isosurface of charge density at 0.002 viewer = view(atoms) viewer.add_isosurface_grid_data(den3, origin, extent, resolution, isolevel=0.002, color=0x0000ff, style='solid') viewer from display import ListTable from bader import bader bdr = bader(atoms, den3) rows = ListTable() rows.append(['<b>{0}</b>'.format(hd) for hd in ['Ion', 'Charge', 'Volume']]) for i, (chg, vol) in enumerate(zip(bdr.ionchg, bdr.ionvol)): rows.append(['{0:.2f}'.format(d) for d in [i, chg, vol] ]) rows castep.wave.wave_orthogonalise? from ase.optimize import LBFGS atoms.rattle(0.01) opt = LBFGS(atoms) opt.run(fmax=0.1) from ase.lattice import bulk import castep import preconpy.lbfgs as lbfgs import preconpy.precon as precon from preconpy.utils import LoggingCalculator atoms = bulk('Si', cubic=True) s = atoms.get_scaled_positions() s[:, 0] *= 0.98 atoms.set_scaled_positions(s) initial_atoms = atoms log_calc = LoggingCalculator(None) for precon, label in zip([None, precon.Exp(A=3, use_pyamg=False)], ['No preconditioner', 'Exp preconditioner']): print label atoms = initial_atoms.copy() calc = castep.calculator.CastepCalculator(atoms=atoms) log_calc.calculator = calc log_calc.label = label atoms.set_calculator(log_calc) opt = lbfgs.LBFGS(atoms, precon=precon, use_line_search=False) opt.run(fmax=1e-2) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load Data Step2: Define a function for modeling and cross-validation Step3: Step 1- Find the number of estimators for a high learning rate Step4: Step 2 Step5: Step 3 Step6: Step 4 Step7: Step 5 Step8: Step 6
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np import xgboost as xgb from xgboost.sklearn import XGBClassifier from sklearn import cross_validation, metrics from sklearn.grid_search import GridSearchCV import matplotlib.pylab as plt %matplotlib inline from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 12, 4 data_path = '/Users/wy/notebook/dataset/Parameter_Tuning_XGBoost_with_Example/' train = pd.read_csv(data_path+'train_modified.csv') test = pd.read_csv(data_path+'test_modified.csv') train.shape, test.shape target='Disbursed' IDcol = 'ID' train['Disbursed'].value_counts() def modelfit(alg, dtrain, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=50): if useTrainCV: xgb_param = alg.get_xgb_params() xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values) cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds, metrics='auc', early_stopping_rounds=early_stopping_rounds) alg.set_params(n_estimators=cvresult.shape[0]) #Fit the algorithm on the data alg.fit(dtrain[predictors], dtrain['Disbursed'],eval_metric='auc') #Predict training set: dtrain_predictions = alg.predict(dtrain[predictors]) dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1] #Print model report: print "\nModel Report" print "Accuracy : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions) print "AUC Score (Train): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob) feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False) feat_imp.plot(kind='bar', title='Feature Importances') plt.ylabel('Feature Importance Score') #Choose all predictors except target & IDcols predictors = [x for x in train.columns if x not in [target, IDcol]] xgb1 = XGBClassifier( learning_rate =0.1, n_estimators=1000, max_depth=5, min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27) modelfit(xgb1, train, predictors) param_test1 = { 'max_depth':range(3,10,2), 'min_child_weight':range(1,6,2) } gsearch1 = GridSearchCV( estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5, min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27), param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5 ) gsearch1.fit(train[predictors],train[target]) gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_ param_test2 = { 'max_depth':[4,5,6], 'min_child_weight':[4,5,6] } gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5, min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch2.fit(train[predictors],train[target]) gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_ param_test2b = { 'min_child_weight':[6,8,10,12] } gsearch2b = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=4, min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test2b, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch2b.fit(train[predictors],train[target]) gsearch2b.grid_scores_, gsearch2.best_params_, gsearch2.best_score_ param_test3 = { 'gamma':[i/10.0 for i in range(0,5)] } gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch3.fit(train[predictors],train[target]) gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_ xgb2 = XGBClassifier( learning_rate =0.1, n_estimators=1000, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27) modelfit(xgb2, train, predictors) param_test4 = { 'subsample':[i/10.0 for i in range(6,10)], 'colsample_bytree':[i/10.0 for i in range(6,10)] } gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch4.fit(train[predictors],train[target]) gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_ param_test5 = { 'subsample':[i/100.0 for i in range(75,90,5)], 'colsample_bytree':[i/100.0 for i in range(75,90,5)] } gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch5.fit(train[predictors],train[target]) param_test6 = { 'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100] } gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4, min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch6.fit(train[predictors],train[target]) gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_ param_test7 = { 'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05] } gsearch7 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4, min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27), param_grid = param_test7, scoring='roc_auc',n_jobs=4,iid=False, cv=5) gsearch7.fit(train[predictors],train[target]) gsearch7.grid_scores_, gsearch7.best_params_, gsearch7.best_score_ xgb3 = XGBClassifier( learning_rate =0.1, n_estimators=1000, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, reg_alpha=0.005, objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27) modelfit(xgb3, train, predictors) xgb4 = XGBClassifier( learning_rate =0.01, n_estimators=5000, max_depth=4, min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8, reg_alpha=0.005, objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27) modelfit(xgb4, train, predictors) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Multi-task recommenders Step2: Preparing the dataset Step3: And repeat our preparations for building vocabularies and splitting the data into a train and a test set Step4: A multi-task model Step5: Rating-specialized model Step6: The model does OK on predicting ratings (with an RMSE of around 1.11), but performs poorly at predicting which movies will be watched or not Step7: We get the opposite result Step8: The result is a model that performs roughly as well on both tasks as each specialized model.
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. !pip install -q tensorflow-recommenders !pip install -q --upgrade tensorflow-datasets import os import pprint import tempfile from typing import Dict, Text import numpy as np import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_recommenders as tfrs ratings = tfds.load('movielens/100k-ratings', split="train") movies = tfds.load('movielens/100k-movies', split="train") # Select the basic features. ratings = ratings.map(lambda x: { "movie_title": x["movie_title"], "user_id": x["user_id"], "user_rating": x["user_rating"], }) movies = movies.map(lambda x: x["movie_title"]) # Randomly shuffle data and split between train and test. tf.random.set_seed(42) shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False) train = shuffled.take(80_000) test = shuffled.skip(80_000).take(20_000) movie_titles = movies.batch(1_000) user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"]) unique_movie_titles = np.unique(np.concatenate(list(movie_titles))) unique_user_ids = np.unique(np.concatenate(list(user_ids))) class MovielensModel(tfrs.models.Model): def __init__(self, rating_weight: float, retrieval_weight: float) -> None: # We take the loss weights in the constructor: this allows us to instantiate # several model objects with different loss weights. super().__init__() embedding_dimension = 32 # User and movie models. self.movie_model: tf.keras.layers.Layer = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=unique_movie_titles, mask_token=None), tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension) ]) self.user_model: tf.keras.layers.Layer = tf.keras.Sequential([ tf.keras.layers.StringLookup( vocabulary=unique_user_ids, mask_token=None), tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension) ]) # A small model to take in user and movie embeddings and predict ratings. # We can make this as complicated as we want as long as we output a scalar # as our prediction. self.rating_model = tf.keras.Sequential([ tf.keras.layers.Dense(256, activation="relu"), tf.keras.layers.Dense(128, activation="relu"), tf.keras.layers.Dense(1), ]) # The tasks. self.rating_task: tf.keras.layers.Layer = tfrs.tasks.Ranking( loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.RootMeanSquaredError()], ) self.retrieval_task: tf.keras.layers.Layer = tfrs.tasks.Retrieval( metrics=tfrs.metrics.FactorizedTopK( candidates=movies.batch(128).map(self.movie_model) ) ) # The loss weights. self.rating_weight = rating_weight self.retrieval_weight = retrieval_weight def call(self, features: Dict[Text, tf.Tensor]) -> tf.Tensor: # We pick out the user features and pass them into the user model. user_embeddings = self.user_model(features["user_id"]) # And pick out the movie features and pass them into the movie model. movie_embeddings = self.movie_model(features["movie_title"]) return ( user_embeddings, movie_embeddings, # We apply the multi-layered rating model to a concatentation of # user and movie embeddings. self.rating_model( tf.concat([user_embeddings, movie_embeddings], axis=1) ), ) def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor: ratings = features.pop("user_rating") user_embeddings, movie_embeddings, rating_predictions = self(features) # We compute the loss for each task. rating_loss = self.rating_task( labels=ratings, predictions=rating_predictions, ) retrieval_loss = self.retrieval_task(user_embeddings, movie_embeddings) # And combine them using the loss weights. return (self.rating_weight * rating_loss + self.retrieval_weight * retrieval_loss) model = MovielensModel(rating_weight=1.0, retrieval_weight=0.0) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) cached_train = train.shuffle(100_000).batch(8192).cache() cached_test = test.batch(4096).cache() model.fit(cached_train, epochs=3) metrics = model.evaluate(cached_test, return_dict=True) print(f"Retrieval top-100 accuracy: {metrics['factorized_top_k/top_100_categorical_accuracy']:.3f}.") print(f"Ranking RMSE: {metrics['root_mean_squared_error']:.3f}.") model = MovielensModel(rating_weight=0.0, retrieval_weight=1.0) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) metrics = model.evaluate(cached_test, return_dict=True) print(f"Retrieval top-100 accuracy: {metrics['factorized_top_k/top_100_categorical_accuracy']:.3f}.") print(f"Ranking RMSE: {metrics['root_mean_squared_error']:.3f}.") model = MovielensModel(rating_weight=1.0, retrieval_weight=1.0) model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1)) model.fit(cached_train, epochs=3) metrics = model.evaluate(cached_test, return_dict=True) print(f"Retrieval top-100 accuracy: {metrics['factorized_top_k/top_100_categorical_accuracy']:.3f}.") print(f"Ranking RMSE: {metrics['root_mean_squared_error']:.3f}.") trained_movie_embeddings, trained_user_embeddings, predicted_rating = model({ "user_id": np.array(["42"]), "movie_title": np.array(["Dances with Wolves (1990)"]) }) print("Predicted rating:") print(predicted_rating) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Donnรฉes Step2: Premiers modรจles Step3: Pour le modรจle, il suffit de copier coller le code รฉcrit dans ce fichier lasso_random_forest_regressor.py. Step4: Le modรจle a rรฉduit le nombre d'arbres. Step5: Grid Search Step6: Les meilleurs paramรจtres sont les suivants Step7: Et le modรจle a gardรฉ un nombre rรฉduit d'arbres Step8: Evolution de la performance en fonction des paramรจtres
<ASSISTANT_TASK:> Python Code: from jyquickhelper import add_notebook_menu add_notebook_menu() %matplotlib inline from sklearn.datasets import load_boston from sklearn.model_selection import train_test_split data = load_boston() X, y = data.data, data.target X_train, X_test, y_train, y_test = train_test_split(X, y) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score rf = RandomForestRegressor() rf.fit(X_train, y_train) r2_score(y_test, rf.predict(X_test)) from ensae_teaching_cs.ml.lasso_random_forest_regressor import LassoRandomForestRegressor lrf = LassoRandomForestRegressor() lrf.fit(X_train, y_train) r2_score(y_test, lrf.predict(X_test)) len(lrf.estimators_) lrf.get_params() params = { 'lasso_estimator__alpha': [0.25, 0.5, 0.75, 1., 1.25, 1.5], 'rf_estimator__n_estimators': [20, 40, 60, 80, 100, 120] } from sklearn.exceptions import ConvergenceWarning from sklearn.model_selection import GridSearchCV import warnings warnings.filterwarnings("ignore", category=ConvergenceWarning) grid = GridSearchCV(estimator=LassoRandomForestRegressor(), param_grid=params, verbose=1) grid.fit(X_train, y_train) grid.best_params_ len(grid.best_estimator_.estimators_) r2_score(y_test, grid.predict(X_test)) grid.cv_results_ import numpy from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt fig = plt.figure(figsize=(14, 6)) ax = fig.add_subplot(131, projection='3d') xs = numpy.array([el['lasso_estimator__alpha'] for el in grid.cv_results_['params']]) ys = numpy.array([el['rf_estimator__n_estimators'] for el in grid.cv_results_['params']]) zs = numpy.array(grid.cv_results_['mean_test_score']) ax.scatter(xs, ys, zs) ax.set_title("3D...") ax = fig.add_subplot(132) for x in sorted(set(xs)): y2 = ys[xs == x] z2 = zs[xs == x] ax.plot(y2, z2, label="alpha=%1.2f" % x, lw=x*2) ax.legend(); ax = fig.add_subplot(133) for y in sorted(set(ys)): x2 = xs[ys == y] z2 = zs[ys == y] ax.plot(x2, z2, label="n_estimators=%d" % y, lw=y/40) ax.legend(); <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: first just checking that the flattening and reshaping works as expected
<ASSISTANT_TASK:> Python Code: import numpy as np import pandas as pd import matplotlib.pyplot as plt test = np.random.randn(11,11,4,100) test.shape test_flat = test.flatten() test_flat.shape np.savetxt('test.txt', test_flat) test_back = np.loadtxt('test.txt').reshape((11,11,4,100)) test_back.shape np.mean(test - test_back) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Connect Step2: Create and access a database Step3: Collections Step4: Insert data Step5: You can put anything in Step6: A practical example Step8: Checking out the data Step9: Or say we wanted just the "description" field Step10: Define the database Step11: Define the collection Step12: Insert data Step13: Querying Step14: Querying with .find( ) Step15: Searching Step16: 1117 is probably more than we want to print out in a Jupyter Notebook... Step17: Since there's only two, let's check them out Step18: If you already know SQL... Step19: Full text search with a text index Step20: To test our newly created text index on the description field, we will search documents using the $text operator. Let's start by looking for all the documents that have the word 'precipitation' in their description field. Step21: If we want to create a new text index, we can do so by first dropping the first text index Step22: We can also create a wildcard text index for scenarios where we want any text fields in the records to be searchable. In such scenarios you can index all the string fields of your document using the $** wildcard specifier. Step23: Projections Step24: Limit Step25: Aggregate Step26: Or we can combine $match and $group to aggregate the titles of just the public access records that match the word 'Soviet' Step27: The aggregation pipeline Step28: Removing data Step29: Now let's delete our records collection and check again to see what collections are in our database Step30: We can also just drop a database. First let's determine what databases we have Step31: Now let's remove the earthwindfire database Step32: Nice work! Step33: collStats returns a variety of storage statistics for a given collection. Let's try it out for our NOAA records collection
<ASSISTANT_TASK:> Python Code: import json import pymongo from pprint import pprint conn=pymongo.MongoClient() db = conn.mydb conn.database_names() collection = db.my_collection db.collection_names() doc = {"class":"xbus-502","date":"03-05-2016","instructor":"bengfort","classroom":"C222","roster_count":"25"} collection.insert_one(doc) doc = {"class":"xbus-502","date":"03-05-2016","teaching_assistant":"bilbro", "sauce": "awesome"} collection.insert_one(doc) with open("data_sample.json") as data_file: noaa = json.load(data_file) len(noaa) pprint(noaa[0]) pprint(noaa[0]['description']) db = conn.earthwindfire records = db.records # What data fields seem important to you? Add them below following the examples: def insert(metadata): for dataset in metadata: data ={} data["title"] = dataset["title"] data["description"] = dataset["description"] data["keywords"] = dataset["keyword"] data["accessLevel"] = dataset["accessLevel"] data["lang"] = dataset["language"] # choose your own # choose your own # choose your own # choose your own records.insert_one(data) insert(noaa) # Check to make sure they're all in there records.count() records.find_one() for rec in records.find()[:2]: pprint(rec) records.find({"keywords": "NESDIS"}).count() records.find({"keywords": "NESDIS","keywords": "Russia","accessLevel":"public"}).count() for r in records.find({"keywords": "NESDIS","keywords": "Russia","accessLevel":"public"}): pprint(r) cursor = db.records.find({"$where": "this.keywords.length > 100"}).limit(2); for rec in cursor: pprint(rec) db.records.create_index([('description', 'text')]) cursor = db.records.find({'$text': {'$search': 'precipitation'}}) for rec in cursor: print rec cursor = db.records.find({'$text': {'$search': 'fire'}}) cursor.count() db.records.drop_index("description_text") db.records.create_index([("$**","text")]) cursor = db.records.find({'$text': {'$search': "Russia"}}) for rec in cursor: pprint(rec) cursor = db.records.find({'$text': {'$search': "Russia"}}, {"title": 1,"_id":0 }) for rec in cursor: print rec cursor = db.records.find({'$text': {'$search': "Russia"}}, {"title": 1,"_id":0 }).limit(2) for rec in cursor: print rec cursor = db.records.aggregate( [ {"$group": {"_id": "$lang", "count": {"$sum": 1}}} ] ) for document in cursor: pprint(document) cursor = db.records.aggregate( [ {"$match": {'$text': {'$search': "Russia"}, "accessLevel": "public"}}, {"$group": {"_id": "$title"}} ] ) for document in cursor: pprint(document) from IPython.display import Image Image(filename='images/mongodb_pipeline.png', width=600, height=300) conn.earthwindfire.collection_names() conn.earthwindfire.drop_collection("records") conn.earthwindfire.collection_names() conn.database_names() conn.drop_database("earthwindfire") conn.database_names() db = conn.mydb collection = db.my_collection db.command({'dbstats': 1}) db.command({'collstats': 'my_collection', 'verbose': 'true' }) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: List all Positions for an Account. Step2: List all open Positions for an Account. Step3: Get the details of a single instrumentโ€™s position in an Account Step4: Closeout the open Position regarding instrument in an Account.
<ASSISTANT_TASK:> Python Code: import pandas as pd import oandapyV20 import oandapyV20.endpoints.positions as positions import configparser config = configparser.ConfigParser() config.read('../config/config_v20.ini') accountID = config['oanda']['account_id'] access_token = config['oanda']['api_key'] client = oandapyV20.API(access_token=access_token) r = positions.PositionList(accountID=accountID) client.request(r) print(r.response) r = positions.OpenPositions(accountID=accountID) client.request(r) instrument = "AUD_USD" r = positions.PositionDetails(accountID=accountID, instrument=instrument) client.request(r) data = { "longUnits": "ALL" } r = positions.PositionClose(accountID=accountID, instrument=instrument, data=data) client.request(r) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: NOTE Step2: Subplots Step3: Reading csv file and plotting the data. Step4: Simple plot Step5: Plotting with default settings Step6: Instantiating defaults Step7: We can also change the attributes of the graph Step8: Setting limits Step9: Setting ticks Step10: Adding a legend Step11: Figures, Subplots, Axes and Ticks Step12: Subplots Step13: Set up a subplot grid that has height 2 and width 1, Step14: Axes Step15: Plots with fill Step16: Scatter Plots Step17: Bar Plots Step18: Contour Plots
<ASSISTANT_TASK:> Python Code: import numpy as np import matplotlib.pyplot as plt ################## %matplotlib inline # Compute the x and y coordinates for points on a sine curve x = np.arange(0, 3 * np.pi, 0.1) y = np.sin(x) # Plot the points using matplotlib plt.plot(x, y) plt.show() a = np.array([1, 4, 5, 66, 77, 334], int) plt.plot(a) plt.show() Lets add more details to the graphs. y_cos = np.cos(x) y_sin = np.sin(x) # Plot the points using matplotlib plt.plot(x, y_sin) plt.plot(x, y_cos) plt.xlabel('x axis label') plt.ylabel('y axis label') plt.title('Sine and Cosine Graph') plt.legend(['Sine', 'Cosine']) plt.show() # Compute the x and y coordinates for points on sine and cosine curves x = np.arange(0, 3 * np.pi, 0.1) y_sin = np.sin(x) y_cos = np.cos(x) # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 1, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 1, 2) plt.plot(x, y_cos) plt.title('Cosine') # Show the figure. plt.show() # # import numpy as np # data = np.genfromtxt('Metadata_Indicator_API_IND_DS2_en_csv_v2.csv', delimiter=',', # names=['INDICATOR_CODE', 'INDICATOR_NAME', 'SOURCE_NOTE', 'SOURCE_ORGANIZATION']) # plt.plot(data['INDICATOR_CODE'], data['INDICATOR_NAME'], color='r', label='the data') # plt.show() import numpy as np X = np.linspace(-np.pi, np.pi, 256, endpoint=True) C, S = np.cos(X), np.sin(X) print(C) print(S) import numpy as np import matplotlib.pyplot as plt X = np.linspace(-np.pi, np.pi, 256, endpoint=True) C, S = np.cos(X), np.sin(X) plt.plot(X, C) plt.plot(X, S) plt.show() import numpy as np import matplotlib.pyplot as plt # Create a figure of size 8x6 inches, 80 dots per inch plt.figure(figsize=(8, 6), dpi=80) # Create a new subplot from a grid of 1x1 plt.subplot(1, 1, 1) X = np.linspace(-np.pi, np.pi, 256, endpoint=True) C, S = np.cos(X), np.sin(X) # Plot cosine with a blue continuous line of width 1 (pixels) plt.plot(X, C, color="blue", linewidth=1.0, linestyle="-") # Plot sine with a green continuous line of width 1 (pixels) plt.plot(X, S, color="green", linewidth=1.0, linestyle="-") # Set x limits plt.xlim(-4.0, 4.0) # Set x ticks plt.xticks(np.linspace(-4, 4, 9, endpoint=True)) # Set y limits plt.ylim(-1.0, 1.0) # Set y ticks plt.yticks(np.linspace(-1, 1, 5, endpoint=True)) # Save figure using 72 dots per inch # plt.savefig("exercise_2.png", dpi=72) # Show result on screen plt.show() plt.figure(figsize=(10, 6), dpi=80) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-.") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-") plt.xlim(X.min() * 1.1, X.max() * 1.1) plt.ylim(C.min() * 1.1, C.max() * 1.1) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-") plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi]) plt.yticks([-1, 0, +1]) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-") t = 2 * np.pi / 3 plt.plot([t, t], [0, np.cos(t)], color='blue', linewidth=2.5, linestyle="--") plt.scatter([t, ], [np.cos(t), ], 50, color='blue') plt.annotate(r'$cos(\frac{2\pi}{3})=-\frac{1}{2}$', xy=(t, np.cos(t)), xycoords='data', xytext=(-90, -50), textcoords='offset points', fontsize=16, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) plt.plot([t, t],[0, np.sin(t)], color='red', linewidth=2.5, linestyle="--") plt.scatter([t, ],[np.sin(t), ], 50, color='red') plt.annotate(r'$sin(\frac{2\pi}{3})=\frac{\sqrt{3}}{2}$', xy=(t, np.sin(t)), xycoords='data', xytext=(+10, +30), textcoords='offset points', fontsize=16, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2")) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-") ax = plt.gca() # gca stands for 'get current axis' ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.spines['bottom'].set_position(('data',0)) ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0)) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-.") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-") ax = plt.gca() # gca stands for 'get current axis' ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.spines['bottom'].set_position(('data',0)) ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data',0)) plt.plot(X, C, color="blue", linewidth=2.5, linestyle="-.", label="cosine") plt.plot(X, S, color="red", linewidth=2.5, linestyle="-", label="sine") plt.legend(loc='upper left') plt.close(1) x = np.arange(0, 3 * np.pi, 0.1) y_sin = np.sin(x) y_cos = np.cos(x) # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 1, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 1, 2) plt.plot(x, y_cos) plt.title('Cosine') # Show the figure. plt.show() # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 1, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 1, 2) plt.plot(x, y_cos) plt.title('Cosine') # Show the figure. plt.show() # Set up a subplot grid that has height 2 and width 1, # and set the first such subplot as active. plt.subplot(2, 2, 1) # Make the first plot plt.plot(x, y_sin) plt.title('Sine') # Set the second subplot as active, and make the second plot. plt.subplot(2, 2, 2) plt.plot(x, y_cos) plt.title('Cosine') a = np.linspace(-1, 1, num=100) y_arcsin = np.arcsin(a) y_arccos = np.arccos(a) plt.subplot(2, 2, 3) plt.plot(a, y_arcsin) plt.title('arcsin') plt.subplot(2, 2, 4) plt.plot(a, y_arccos) plt.title('arccos') # Show the figure. plt.show() n = 256 X = np.linspace(-np.pi, np.pi, n, endpoint=True) Y = np.sin(2 * X) plt.plot(X, Y + 1, color='blue', alpha=1.00) plt.plot(X, Y - 1, color='blue', alpha=1.00) x = np.arange(0.0, 2, 0.01) y1 = np.sin(2*np.pi*x) y2 = 1.2*np.sin(4*np.pi*x) fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True) ax1.fill_between(x, 0, y1) ax1.set_ylabel('between y1 and 0') ax2.fill_between(x, y1, 1) ax2.set_ylabel('between y1 and 1') ax3.fill_between(x, y1, y2) ax3.set_ylabel('between y1 and y2') ax3.set_xlabel('x') plt.show() n = 1024 X = np.random.normal(0,1,n) Y = np.random.normal(0,1,n) plt.scatter(X,Y) # with colors import numpy as np import matplotlib.pyplot as plt n = 1024 X = np.random.normal(0, 1, n) Y = np.random.normal(0, 1, n) T = np.arctan2(Y, X) print(t) plt.axes([0.025, 0.025, 0.95, 0.95]) plt.scatter(X, Y, s=75, c=T, alpha=.5) plt.xlim(-1.5, 1.5) plt.xticks(()) plt.ylim(-1.5, 1.5) plt.yticks(()) plt.show() import numpy as np import matplotlib.pyplot as plt n = 12 X = np.arange(n) Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 0.7, n) Y2 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n) plt.axes([0.025, 0.025, 0.95, 0.95]) plt.bar(X, +Y1, facecolor='#9999ff', edgecolor='white') plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white') for x, y in zip(X, Y1): plt.text(x + 0.4, y + 0.05, '%.2f' % y, ha='center', va= 'bottom') for x, y in zip(X, Y2): plt.text(x + 0.4, -y - 0.05, '%.2f' % y, ha='center', va= 'top') plt.xlim(-.5, n) plt.xticks(()) plt.ylim(-1.25, 1.25) plt.yticks(()) plt.show() import numpy as np import matplotlib.pyplot as plt def f(x,y): return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2) n = 256 x = np.linspace(-3, 3, n) y = np.linspace(-3, 3, n) X,Y = np.meshgrid(x, y) plt.axes([0.025, 0.025, 0.95, 0.95]) plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot) C = plt.contour(X, Y, f(X, Y), 8, colors='black', linewidth=.5) plt.clabel(C, inline=1, fontsize=10) plt.xticks(()) plt.yticks(()) plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 1.3. Model Type Step7: 1.4. Elemental Stoichiometry Step8: 1.5. Elemental Stoichiometry Details Step9: 1.6. Prognostic Variables Step10: 1.7. Diagnostic Variables Step11: 1.8. Damping Step12: 2. Key Properties --&gt; Time Stepping Framework --&gt; Passive Tracers Transport Step13: 2.2. Timestep If Not From Ocean Step14: 3. Key Properties --&gt; Time Stepping Framework --&gt; Biology Sources Sinks Step15: 3.2. Timestep If Not From Ocean Step16: 4. Key Properties --&gt; Transport Scheme Step17: 4.2. Scheme Step18: 4.3. Use Different Scheme Step19: 5. Key Properties --&gt; Boundary Forcing Step20: 5.2. River Input Step21: 5.3. Sediments From Boundary Conditions Step22: 5.4. Sediments From Explicit Model Step23: 6. Key Properties --&gt; Gas Exchange Step24: 6.2. CO2 Exchange Type Step25: 6.3. O2 Exchange Present Step26: 6.4. O2 Exchange Type Step27: 6.5. DMS Exchange Present Step28: 6.6. DMS Exchange Type Step29: 6.7. N2 Exchange Present Step30: 6.8. N2 Exchange Type Step31: 6.9. N2O Exchange Present Step32: 6.10. N2O Exchange Type Step33: 6.11. CFC11 Exchange Present Step34: 6.12. CFC11 Exchange Type Step35: 6.13. CFC12 Exchange Present Step36: 6.14. CFC12 Exchange Type Step37: 6.15. SF6 Exchange Present Step38: 6.16. SF6 Exchange Type Step39: 6.17. 13CO2 Exchange Present Step40: 6.18. 13CO2 Exchange Type Step41: 6.19. 14CO2 Exchange Present Step42: 6.20. 14CO2 Exchange Type Step43: 6.21. Other Gases Step44: 7. Key Properties --&gt; Carbon Chemistry Step45: 7.2. PH Scale Step46: 7.3. Constants If Not OMIP Step47: 8. Tracers Step48: 8.2. Sulfur Cycle Present Step49: 8.3. Nutrients Present Step50: 8.4. Nitrous Species If N Step51: 8.5. Nitrous Processes If N Step52: 9. Tracers --&gt; Ecosystem Step53: 9.2. Upper Trophic Levels Treatment Step54: 10. Tracers --&gt; Ecosystem --&gt; Phytoplankton Step55: 10.2. Pft Step56: 10.3. Size Classes Step57: 11. Tracers --&gt; Ecosystem --&gt; Zooplankton Step58: 11.2. Size Classes Step59: 12. Tracers --&gt; Disolved Organic Matter Step60: 12.2. Lability Step61: 13. Tracers --&gt; Particules Step62: 13.2. Types If Prognostic Step63: 13.3. Size If Prognostic Step64: 13.4. Size If Discrete Step65: 13.5. Sinking Speed If Prognostic Step66: 14. Tracers --&gt; Dic Alkalinity Step67: 14.2. Abiotic Carbon Step68: 14.3. Alkalinity
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'thu', 'sandbox-2', 'ocnbgchem') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.model_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Geochemical" # "NPZD" # "PFT" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Fixed" # "Variable" # "Mix of both" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.damping') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "use ocean model transport time step" # "use specific time step" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline" # "Online" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Use that of ocean model" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Atmospheric Chemistry model" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "from file (climatology)" # "from file (interannual variations)" # "from Land Surface model" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OMIP protocol" # "Other protocol" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Sea water" # "Free" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrogen (N)" # "Phosphorous (P)" # "Silicium (S)" # "Iron (Fe)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Nitrates (NO3)" # "Amonium (NH4)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Dentrification" # "N fixation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "PFT including size based (specify both below)" # "Size based only (specify below)" # "PFT only (specify below)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Diatoms" # "Nfixers" # "Calcifiers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microphytoplankton" # "Nanophytoplankton" # "Picophytoplankton" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Generic" # "Size based (specify below)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Microzooplankton" # "Mesozooplankton" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Labile" # "Semi-labile" # "Refractory" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diagnostic" # "Diagnostic (Martin profile)" # "Diagnostic (Balast)" # "Prognostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "POC" # "PIC (calcite)" # "PIC (aragonite" # "BSi" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "No size spectrum used" # "Full size spectrum" # "Discrete size classes (specify which below)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Function of particule size" # "Function of particule type (balast)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "C13" # "C14)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Prognostic" # "Diagnostic)" # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Set parameters Step2: Read epochs for the channel of interest Step3: Load FieldTrip neighbor definition to setup sensor connectivity Step4: Compute permutation statistic Step5: Note. The same functions work with source estimate. The only differences
<ASSISTANT_TASK:> Python Code: # Authors: Denis Engemann <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from mne.viz import plot_topomap import mne from mne.stats import spatio_temporal_cluster_test from mne.datasets import sample from mne.channels import read_ch_connectivity print(__doc__) data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' event_id = {'Aud_L': 1, 'Aud_R': 2, 'Vis_L': 3, 'Vis_R': 4} tmin = -0.2 tmax = 0.5 # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) raw.filter(1, 30, l_trans_bandwidth='auto', h_trans_bandwidth='auto', filter_length='auto', phase='zero') events = mne.read_events(event_fname) picks = mne.pick_types(raw.info, meg='mag', eog=True) reject = dict(mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, reject=reject, preload=True) epochs.drop_channels(['EOG 061']) epochs.equalize_event_counts(event_id, copy=False) condition_names = 'Aud_L', 'Aud_R', 'Vis_L', 'Vis_R' X = [epochs[k].get_data() for k in condition_names] # as 3D matrix X = [np.transpose(x, (0, 2, 1)) for x in X] # transpose for clustering connectivity, ch_names = read_ch_connectivity('neuromag306mag') print(type(connectivity)) # it's a sparse matrix! plt.imshow(connectivity.toarray(), cmap='gray', origin='lower', interpolation='nearest') plt.xlabel('{} Magnetometers'.format(len(ch_names))) plt.ylabel('{} Magnetometers'.format(len(ch_names))) plt.title('Between-sensor adjacency') # set cluster threshold threshold = 50.0 # very high, but the test is quite sensitive on this data # set family-wise p-value p_accept = 0.001 cluster_stats = spatio_temporal_cluster_test(X, n_permutations=1000, threshold=threshold, tail=1, n_jobs=1, connectivity=connectivity) T_obs, clusters, p_values, _ = cluster_stats good_cluster_inds = np.where(p_values < p_accept)[0] # configure variables for visualization times = epochs.times * 1e3 colors = 'r', 'r', 'steelblue', 'steelblue' linestyles = '-', '--', '-', '--' # grand average as numpy arrray grand_ave = np.array(X).mean(axis=1) # get sensor positions via layout pos = mne.find_layout(epochs.info).pos # loop over significant clusters for i_clu, clu_idx in enumerate(good_cluster_inds): # unpack cluster information, get unique indices time_inds, space_inds = np.squeeze(clusters[clu_idx]) ch_inds = np.unique(space_inds) time_inds = np.unique(time_inds) # get topography for F stat f_map = T_obs[time_inds, ...].mean(axis=0) # get signals at significant sensors signals = grand_ave[..., ch_inds].mean(axis=-1) sig_times = times[time_inds] # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) mask[ch_inds, :] = True # initialize figure fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) title = 'Cluster #{0}'.format(i_clu + 1) fig.suptitle(title, fontsize=14) # plot average test statistic and mark significant sensors image, _ = plot_topomap(f_map, pos, mask=mask, axes=ax_topo, cmap='Reds', vmin=np.min, vmax=np.max) # advanced matplotlib for showing image with figure and colorbar # in one plot divider = make_axes_locatable(ax_topo) # add axes for colorbar ax_colorbar = divider.append_axes('right', size='5%', pad=0.05) plt.colorbar(image, cax=ax_colorbar) ax_topo.set_xlabel('Averaged F-map ({:0.1f} - {:0.1f} ms)'.format( *sig_times[[0, -1]] )) # add new axis for time courses and plot time courses ax_signals = divider.append_axes('right', size='300%', pad=1.2) for signal, name, col, ls in zip(signals, condition_names, colors, linestyles): ax_signals.plot(times, signal, color=col, linestyle=ls, label=name) # add information ax_signals.axvline(0, color='k', linestyle=':', label='stimulus onset') ax_signals.set_xlim([times[0], times[-1]]) ax_signals.set_xlabel('time [ms]') ax_signals.set_ylabel('evoked magnetic fields [fT]') # plot significant time range ymin, ymax = ax_signals.get_ylim() ax_signals.fill_betweenx((ymin, ymax), sig_times[0], sig_times[-1], color='orange', alpha=0.3) ax_signals.legend(loc='lower right') ax_signals.set_ylim(ymin, ymax) # clean up viz mne.viz.tight_layout(fig=fig) fig.subplots_adjust(bottom=.05) plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 1.3. Model Family Step7: 1.4. Basic Approximations Step8: 2. Key Properties --&gt; Resolution Step9: 2.2. Canonical Horizontal Resolution Step10: 2.3. Range Horizontal Resolution Step11: 2.4. Number Of Vertical Levels Step12: 2.5. High Top Step13: 3. Key Properties --&gt; Timestepping Step14: 3.2. Timestep Shortwave Radiative Transfer Step15: 3.3. Timestep Longwave Radiative Transfer Step16: 4. Key Properties --&gt; Orography Step17: 4.2. Changes Step18: 5. Grid --&gt; Discretisation Step19: 6. Grid --&gt; Discretisation --&gt; Horizontal Step20: 6.2. Scheme Method Step21: 6.3. Scheme Order Step22: 6.4. Horizontal Pole Step23: 6.5. Grid Type Step24: 7. Grid --&gt; Discretisation --&gt; Vertical Step25: 8. Dynamical Core Step26: 8.2. Name Step27: 8.3. Timestepping Type Step28: 8.4. Prognostic Variables Step29: 9. Dynamical Core --&gt; Top Boundary Step30: 9.2. Top Heat Step31: 9.3. Top Wind Step32: 10. Dynamical Core --&gt; Lateral Boundary Step33: 11. Dynamical Core --&gt; Diffusion Horizontal Step34: 11.2. Scheme Method Step35: 12. Dynamical Core --&gt; Advection Tracers Step36: 12.2. Scheme Characteristics Step37: 12.3. Conserved Quantities Step38: 12.4. Conservation Method Step39: 13. Dynamical Core --&gt; Advection Momentum Step40: 13.2. Scheme Characteristics Step41: 13.3. Scheme Staggering Type Step42: 13.4. Conserved Quantities Step43: 13.5. Conservation Method Step44: 14. Radiation Step45: 15. Radiation --&gt; Shortwave Radiation Step46: 15.2. Name Step47: 15.3. Spectral Integration Step48: 15.4. Transport Calculation Step49: 15.5. Spectral Intervals Step50: 16. Radiation --&gt; Shortwave GHG Step51: 16.2. ODS Step52: 16.3. Other Flourinated Gases Step53: 17. Radiation --&gt; Shortwave Cloud Ice Step54: 17.2. Physical Representation Step55: 17.3. Optical Methods Step56: 18. Radiation --&gt; Shortwave Cloud Liquid Step57: 18.2. Physical Representation Step58: 18.3. Optical Methods Step59: 19. Radiation --&gt; Shortwave Cloud Inhomogeneity Step60: 20. Radiation --&gt; Shortwave Aerosols Step61: 20.2. Physical Representation Step62: 20.3. Optical Methods Step63: 21. Radiation --&gt; Shortwave Gases Step64: 22. Radiation --&gt; Longwave Radiation Step65: 22.2. Name Step66: 22.3. Spectral Integration Step67: 22.4. Transport Calculation Step68: 22.5. Spectral Intervals Step69: 23. Radiation --&gt; Longwave GHG Step70: 23.2. ODS Step71: 23.3. Other Flourinated Gases Step72: 24. Radiation --&gt; Longwave Cloud Ice Step73: 24.2. Physical Reprenstation Step74: 24.3. Optical Methods Step75: 25. Radiation --&gt; Longwave Cloud Liquid Step76: 25.2. Physical Representation Step77: 25.3. Optical Methods Step78: 26. Radiation --&gt; Longwave Cloud Inhomogeneity Step79: 27. Radiation --&gt; Longwave Aerosols Step80: 27.2. Physical Representation Step81: 27.3. Optical Methods Step82: 28. Radiation --&gt; Longwave Gases Step83: 29. Turbulence Convection Step84: 30. Turbulence Convection --&gt; Boundary Layer Turbulence Step85: 30.2. Scheme Type Step86: 30.3. Closure Order Step87: 30.4. Counter Gradient Step88: 31. Turbulence Convection --&gt; Deep Convection Step89: 31.2. Scheme Type Step90: 31.3. Scheme Method Step91: 31.4. Processes Step92: 31.5. Microphysics Step93: 32. Turbulence Convection --&gt; Shallow Convection Step94: 32.2. Scheme Type Step95: 32.3. Scheme Method Step96: 32.4. Processes Step97: 32.5. Microphysics Step98: 33. Microphysics Precipitation Step99: 34. Microphysics Precipitation --&gt; Large Scale Precipitation Step100: 34.2. Hydrometeors Step101: 35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics Step102: 35.2. Processes Step103: 36. Cloud Scheme Step104: 36.2. Name Step105: 36.3. Atmos Coupling Step106: 36.4. Uses Separate Treatment Step107: 36.5. Processes Step108: 36.6. Prognostic Scheme Step109: 36.7. Diagnostic Scheme Step110: 36.8. Prognostic Variables Step111: 37. Cloud Scheme --&gt; Optical Cloud Properties Step112: 37.2. Cloud Inhomogeneity Step113: 38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution Step114: 38.2. Function Name Step115: 38.3. Function Order Step116: 38.4. Convection Coupling Step117: 39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution Step118: 39.2. Function Name Step119: 39.3. Function Order Step120: 39.4. Convection Coupling Step121: 40. Observation Simulation Step122: 41. Observation Simulation --&gt; Isscp Attributes Step123: 41.2. Top Height Direction Step124: 42. Observation Simulation --&gt; Cosp Attributes Step125: 42.2. Number Of Grid Points Step126: 42.3. Number Of Sub Columns Step127: 42.4. Number Of Levels Step128: 43. Observation Simulation --&gt; Radar Inputs Step129: 43.2. Type Step130: 43.3. Gas Absorption Step131: 43.4. Effective Radius Step132: 44. Observation Simulation --&gt; Lidar Inputs Step133: 44.2. Overlap Step134: 45. Gravity Waves Step135: 45.2. Sponge Layer Step136: 45.3. Background Step137: 45.4. Subgrid Scale Orography Step138: 46. Gravity Waves --&gt; Orographic Gravity Waves Step139: 46.2. Source Mechanisms Step140: 46.3. Calculation Method Step141: 46.4. Propagation Scheme Step142: 46.5. Dissipation Scheme Step143: 47. Gravity Waves --&gt; Non Orographic Gravity Waves Step144: 47.2. Source Mechanisms Step145: 47.3. Calculation Method Step146: 47.4. Propagation Scheme Step147: 47.5. Dissipation Scheme Step148: 48. Solar Step149: 49. Solar --&gt; Solar Pathways Step150: 50. Solar --&gt; Solar Constant Step151: 50.2. Fixed Value Step152: 50.3. Transient Characteristics Step153: 51. Solar --&gt; Orbital Parameters Step154: 51.2. Fixed Reference Date Step155: 51.3. Transient Method Step156: 51.4. Computation Method Step157: 52. Solar --&gt; Insolation Ozone Step158: 53. Volcanos Step159: 54. Volcanos --&gt; Volcanoes Treatment
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'awi', 'awi-cm-1-0-lr', 'atmos') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "AGCM" # "ARCM" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "primitive equations" # "non-hydrostatic" # "anelastic" # "Boussinesq" # "hydrostatic" # "quasi-hydrostatic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.high_top') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "modified" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.changes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "related to ice sheets" # "related to tectonics" # "modified mean" # "modified variance if taken into account in model (cf gravity waves)" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "spectral" # "fixed grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "finite elements" # "finite volumes" # "finite difference" # "centered finite difference" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "second" # "third" # "fourth" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "filter" # "pole rotation" # "artificial island" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Gaussian" # "Latitude-Longitude" # "Cubed-Sphere" # "Icosahedral" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "isobaric" # "sigma" # "hybrid sigma-pressure" # "hybrid pressure" # "vertically lagrangian" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Adams-Bashforth" # "explicit" # "implicit" # "semi-implicit" # "leap frog" # "multi-step" # "Runge Kutta fifth order" # "Runge Kutta second order" # "Runge Kutta third order" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "surface pressure" # "wind components" # "divergence/curl" # "temperature" # "potential temperature" # "total water" # "water vapour" # "water liquid" # "water ice" # "total water moments" # "clouds" # "radiation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "iterated Laplacian" # "bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heun" # "Roe and VanLeer" # "Roe and Superbee" # "Prather" # "UTOPIA" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Eulerian" # "modified Euler" # "Lagrangian" # "semi-Lagrangian" # "cubic semi-Lagrangian" # "quintic semi-Lagrangian" # "mass-conserving" # "finite volume" # "flux-corrected" # "linear" # "quadratic" # "quartic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "dry mass" # "tracer mass" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Priestley algorithm" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "VanLeer" # "Janjic" # "SUPG (Streamline Upwind Petrov-Galerkin)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "2nd order" # "4th order" # "cell-centred" # "staggered grid" # "semi-staggered grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa D-grid" # "Arakawa E-grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Angular momentum" # "Horizontal momentum" # "Enstrophy" # "Mass" # "Total energy" # "Vorticity" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.aerosols') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "sulphate" # "nitrate" # "sea salt" # "dust" # "ice" # "organic" # "BC (black carbon / soot)" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "polar stratospheric ice" # "NAT (nitric acid trihydrate)" # "NAD (nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particle)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Mellor-Yamada" # "Holtslag-Boville" # "EDMF" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "TKE prognostic" # "TKE diagnostic" # "TKE coupled with water" # "vertical profile of Kz" # "non-local diffusion" # "Monin-Obukhov similarity" # "Coastal Buddy Scheme" # "Coupled with convection" # "Coupled with gravity waves" # "Depth capped at cloud base" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "adjustment" # "plume ensemble" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CAPE" # "bulk" # "ensemble" # "CAPE/WFN based" # "TKE/CIN based" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vertical momentum transport" # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "updrafts" # "downdrafts" # "radiative effect of anvils" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "cumulus-capped boundary layer" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "same as deep (unified)" # "included in boundary layer turbulence" # "separate diagnosis" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "liquid rain" # "snow" # "hail" # "graupel" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mixed phase" # "cloud droplets" # "cloud ice" # "ice nucleation" # "water vapour deposition" # "effect of raindrops" # "effect of snow" # "effect of graupel" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "atmosphere_radiation" # "atmosphere_microphysics_precipitation" # "atmosphere_turbulence_convection" # "atmosphere_gravity_waves" # "atmosphere_solar" # "atmosphere_volcano" # "atmosphere_cloud_simulator" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "entrainment" # "detrainment" # "bulk cloud" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud amount" # "liquid" # "ice" # "rain" # "snow" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "random" # "maximum" # "maximum-random" # "exponential" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "no adjustment" # "IR brightness" # "visible optical depth" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "lowest altitude level" # "highest altitude level" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Inline" # "Offline" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "surface" # "space borne" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "ice spheres" # "ice non-spherical" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "max" # "random" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Rayleigh friction" # "Diffusive sponge layer" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "continuous spectrum" # "discrete spectrum" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "effect on drag" # "effect on lifting" # "enhanced topography" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "linear mountain waves" # "hydraulic jump" # "envelope orography" # "low level flow blocking" # "statistical sub-grid scale variance" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "non-linear calculation" # "more than two cardinal directions" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "includes boundary layer ducting" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convection" # "precipitation" # "background spectrum" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "spatially dependent" # "temporally dependent" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SW radiation" # "precipitating energetic particles" # "cosmic rays" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Berger 1978" # "Laskar 2004" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "high frequency solar constant anomaly" # "stratospheric aerosols optical thickness" # "Other: [Please specify]" # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Descriptive statistics Step2: Interval estimation, correlation measures, and statistical tests Step3: Distribution fitting Step4: Distances Step5: The output from next commands might be different from the ones shown in the book as the main matrix elements are obtained <font color=red>randomly</font> Step6: Clustering Step7: Hierarchical clustering
<ASSISTANT_TASK:> Python Code: %matplotlib inline import scipy.misc from scipy.stats import signaltonoise from scipy.stats import norm # Gaussian distribution lena=scipy.misc.lena().astype(float) lena+= norm.rvs(loc=0,scale=16,size=lena.shape) signaltonoise(lena,axis=None) import numpy from scipy.stats import pareto import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (15.0, 5.0) x=numpy.linspace(1,10,1000) plt.subplot(131); plt.plot(pareto.pdf(x,5)) plt.subplot(132); plt.plot(pareto.cdf(x,5)) plt.subplot(133); plt.plot(pareto.rvs(5,size=1000)) plt.show() import numpy data = numpy.array([[113,105,130,101,138,118,87,116,75,96, \ 122,103,116,107,118,103,111,104,111,89,78,100,89,85,88], \ [137,105,133,108,115,170,103,145,78,107, \ 84,148,147,87,166,146,123,135,112,93,76,116,78,101,123]]) dataDiff = data[1,:]-data[0,:] dataDiff.mean(), dataDiff.std() import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (15.0, 5.0) plt.hist(dataDiff) plt.show() from scipy.stats import ttest_1samp t_stat,p_value=ttest_1samp(dataDiff,0.0) print (p_value/2.0) from scipy.stats import norm # Gaussian distribution mean,std=norm.fit(dataDiff) plt.hist(dataDiff, normed=1) x=numpy.linspace(dataDiff.min(),dataDiff.max(),1000) pdf=norm.pdf(x,mean,std) plt.plot(x,pdf) from scipy.stats import gaussian_kde pdf = gaussian_kde(dataDiff) pdf = pdf.evaluate(x) plt.hist(dataDiff, normed=1) plt.plot(x,pdf,'k') plt.savefig("hist2.png") plt.show() plt.hist(dataDiff, normed=1) plt.plot(x,pdf,'k.-',label='Kernel fit') plt.plot(x,norm.pdf(x,mean,std),'r',label='Normal fit') plt.legend() plt.savefig("hist3.png") plt.show() import numpy from scipy.spatial.distance import minkowski Square=numpy.mgrid[-1.1:1.1:512j,-1.1:1.1:512j] X=Square[0]; Y=Square[1] f=lambda x,y,p: minkowski([x,y],[0.0,0.0],p)<=1.0 Ball=lambda p:numpy.vectorize(f)(X,Y,p) import matplotlib.pylab as plt plt.imshow(Ball(3), cmap = plt.cm.gray) plt.axis('off') plt.subplots_adjust(left=0.0127,bottom=0.0164,right=0.987,top=0.984) plt.show() # IT WILL TAKE SOME TIME FOR THE PLOT TO SHOW UP import scipy.stats from scipy.spatial.distance import pdist V=scipy.stats.randint.rvs(0.4,3,size=(5,4))-1 print (V) pdist(V,metric='cityblock') from scipy.spatial.distance import cdist V=scipy.stats.randint.rvs(0.4, 2, size=(3,4)).astype(bool) W=scipy.stats.randint.rvs(0.4, 3, size=(2,4)).astype(bool) cdist(V,W,'jaccard') from scipy.spatial import KDTree data=scipy.stats.randint.rvs(0.4,10,size=(10,4)) print (data) tree=KDTree(data) tree.query([0,0,0,0]) import numpy from scipy.stats import norm from numpy import array,vstack data=norm.rvs(0,0.3,size=(10000,2)) inside_ball=numpy.hypot(data[:,0],data[:,1])<1.0 data=data[inside_ball] data = vstack((data, data+array([1,1]),data+array([-1,1]))) from scipy.cluster.vq import * centroids, distortion = kmeans(data,3) cluster_assignment, distances = vq(data,centroids) import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (8.0, 6.0) plt.plot(data[cluster_assignment==0,0], data[cluster_assignment==0,1], 'ro') plt.plot(data[cluster_assignment==1,0], data[cluster_assignment==1,1], 'b+') plt.plot(data[cluster_assignment==2,0], data[cluster_assignment==2,1], 'k.') plt.show() import numpy file=open("dentitio.dat","r") # open the file lines=file.readlines() # read each line in memory file.close() # close the file mammals=[] # this stores the names dataset=numpy.zeros((len(lines),8)) # this stores the data for index,line in enumerate(lines): mammals.append( line[0:27].rstrip(" ").capitalize() ) for tooth in range(8): dataset[index,tooth]=int(line[27+tooth]) import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (10.0, 20.0) from scipy.cluster.hierarchy import linkage, dendrogram Z=linkage(dataset) dendrogram(Z, labels=mammals, orientation="right") plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: <br>
<ASSISTANT_TASK:> Python Code: from IPython.display import Image Image(filename='images/06_03.jpg', width=1000) Image(filename='images/07_01.png', width=500) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: The basic work flow that takes place above can be summarized as Step3: The first argument to this function is the Pyomo model. The second argument is the name to use for the NL file. Along with the NL file, another file with the suffix ".symbol_map.pickle" will be created that contains information that can be used to efficiently rebuild the symbol map for any Pyomo model with component names matching those used to build the NL file. Additional options can be passed to the NL writer as keywords to this function. These include Step5: Loading the SOL File
<ASSISTANT_TASK:> Python Code: # %load script.py from pyomo.environ import * from pyomo.opt import SolverFactory, TerminationCondition def create_model(): model = ConcreteModel() model.x = Var() model.o = Objective(expr=model.x) model.c = Constraint(expr=model.x >= 1) model.x.set_value(1.0) return model if __name__ == "__main__": with SolverFactory("ipopt") as opt: model = create_model() results = opt.solve(model, load_solutions=False) if results.solver.termination_condition != TerminationCondition.optimal: raise RuntimeError('Solver did not report optimality:\n%s' % (results.solver)) model.solutions.load_from(results) print("Objective: %s" % (model.o())) # %load write.py import pyomo.environ from pyomo.core import ComponentUID from pyomo.opt import ProblemFormat # use fast version of pickle (python 2 or 3) from six.moves import cPickle as pickle def write_nl(model, nl_filename, **kwds): Writes a Pyomo model in NL file format and stores information about the symbol map that allows it to be recovered at a later time for a Pyomo model with matching component names. symbol_map_filename = nl_filename+".symbol_map.pickle" # write the model and obtain the symbol_map _, smap_id = model.write(nl_filename, format=ProblemFormat.nl, io_options=kwds) symbol_map = model.solutions.symbol_map[smap_id] # save a persistent form of the symbol_map (using pickle) by # storing the NL file label with a ComponentUID, which is # an efficient lookup code for model components (created # by John Siirola) tmp_buffer = {} # this makes the process faster symbol_cuid_pairs = tuple( (symbol, ComponentUID(var_weakref(), cuid_buffer=tmp_buffer)) for symbol, var_weakref in symbol_map.bySymbol.items()) with open(symbol_map_filename, "wb") as f: pickle.dump(symbol_cuid_pairs, f) return symbol_map_filename if __name__ == "__main__": from script import create_model model = create_model() nl_filename = "example.nl" symbol_map_filename = write_nl(model, nl_filename) print(" NL File: %s" % (nl_filename)) print("Symbol Map File: %s" % (symbol_map_filename)) %%bash ipopt -s example.nl # %load read.py import pyomo.environ from pyomo.core import SymbolMap from pyomo.opt import (ReaderFactory, ResultsFormat) # use fast version of pickle (python 2 or 3) from six.moves import cPickle as pickle def read_sol(model, sol_filename, symbol_map_filename, suffixes=[".*"]): Reads the solution from the SOL file and generates a results object with an appropriate symbol map for loading it into the given Pyomo model. By default all suffixes found in the NL file will be extracted. This can be overridden using the suffixes keyword, which should be a list of suffix names or regular expressions (or None). if suffixes is None: suffixes = [] # parse the SOL file with ReaderFactory(ResultsFormat.sol) as reader: results = reader(sol_filename, suffixes=suffixes) # regenerate the symbol_map for this model with open(symbol_map_filename, "rb") as f: symbol_cuid_pairs = pickle.load(f) symbol_map = SymbolMap() symbol_map.addSymbols((cuid.find_component(model), symbol) for symbol, cuid in symbol_cuid_pairs) # tag the results object with the symbol_map results._smap = symbol_map return results if __name__ == "__main__": from pyomo.opt import TerminationCondition from script import create_model model = create_model() sol_filename = "example.sol" symbol_map_filename = "example.nl.symbol_map.pickle" results = read_sol(model, sol_filename, symbol_map_filename) if results.solver.termination_condition != \ TerminationCondition.optimal: raise RuntimeError("Solver did not terminate with status = optimal") model.solutions.load_from(results) print("Objective: %s" % (model.o())) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: And many sources Step2: For this example, we will load the X_morg and the y sources for all the sets. These are circular fingerprints, and the target labels (in this case, whether the molecule was a mutagen). Step3: The requested data is loaded as nested tuples, sorted first by set, and then by source, which can easily be unpacked as above. Step4: The raw data is loaded as numpy arrays Step5: Which should be ready to use as fuel for modelling! Step6: Target variables under 'targets' Step7: Set membership masks under 'indices' Step8: Finally, molecules are accessible via 'structure'
<ASSISTANT_TASK:> Python Code: skchem.data.BursiAmes.available_sets() skchem.data.BursiAmes.available_sources() kws = {'sets': ('train', 'valid', 'test'), 'sources':('X_morg', 'y')} (X_train, y_train), (X_valid, y_valid), (X_test, y_test) = skchem.data.BursiAmes.load_data(**kws) print('train shapes:', X_train.shape, y_train.shape) print('valid shapes:', X_valid.shape, y_valid.shape) print('test shapes:', X_test.shape, y_test.shape) X_train y_train skchem.data.BursiAmes.read_frame('feats/X_morg') skchem.data.BursiAmes.read_frame('targets/y') skchem.data.BursiAmes.read_frame('indices/train') skchem.data.BursiAmes.read_frame('structure') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: <span id="Urbanization_Using_NDBI_plat_prod">Choose Platform and Product &#9652;</span> Step2: Choose the platforms and products Step3: <span id="Urbanization_Using_NDBI_define_extents">Define the Extents of the Analysis &#9652;</span> Step4: Visualize the selected area Step5: <span id="Urbanization_Using_NDBI_retrieve_data">Load Data from the Data Cube &#9652;</span> Step6: <span id="Urbanization_Using_NDBI_rgb">Show RGB Representation of the Area &#9652;</span> Step7: <span id="Urbanization_Using_NDBI_analysis">Urbanization Analysis &#9652;</span> Step8: Merge into one Dataset Step9: Building a False Color Composite Step10: Analyze The False Color Image
<ASSISTANT_TASK:> Python Code: import sys import os sys.path.append(os.environ.get('NOTEBOOK_ROOT')) import matplotlib.pyplot as plt import xarray as xr from utils.data_cube_utilities.dc_display_map import display_map from utils.data_cube_utilities.dc_rgb import rgb from utils.data_cube_utilities.urbanization import NDBI from utils.data_cube_utilities.vegetation import NDVI from utils.data_cube_utilities.dc_water_classifier import NDWI from datacube.utils.aws import configure_s3_access configure_s3_access(requester_pays=True) import datacube dc = datacube.Datacube() # Get available products products_info = dc.list_products() print("LANDSAT 7 Products:") products_info[["platform", "name"]][products_info.platform == "LANDSAT_7"] print("LANDSAT 8 Products:") products_info[["platform", "name"]][products_info.platform == "LANDSAT_8"] # These are the platforms (satellites) and products (datacube sets) # used for this demonstration. platform = 'LANDSAT_8' product = 'ls8_usgs_sr_scene' collection = 'c1' level = 'l2' # Kumasi, Ghana # lat = (6.597724,6.781856) # lon = (-1.727843,-1.509147) # Accra, Ghana lat = (5.5162, 5.6338) lon = (-0.2657, -0.1373) time_range = ("2019-01-01", "2019-12-31") display_map(lat, lon) desired_bands = ['red','green','nir','swir1', 'swir2', 'pixel_qa'] # needed by ndvi, ndwi, ndbi and cloud masking desired_bands = desired_bands + ['blue'] # blue is needed for a true color visualization purposes landsat_ds = dc.load(product = product, platform = platform, lat = lat, lon = lon, time = time_range, measurements = desired_bands, dask_chunks={'time':1, 'latitude':1000, 'longitude':1000}) from utils.data_cube_utilities.clean_mask import landsat_clean_mask_full clean_mask = landsat_clean_mask_full(dc, landsat_ds, product=product, platform=platform, collection=collection, level=level) landsat_ds = landsat_ds.where(clean_mask) median_composite = landsat_ds.median('time') plt.figure(figsize=(8,8)) median_composite[['red', 'green', 'blue']].to_array().plot.imshow(vmin=0, vmax=2500) plt.show() ndbi = NDBI(median_composite) # Urbanization ndvi = NDVI(median_composite) # Dense Vegetation ndwi = NDWI(median_composite) # High Concentrations of Water plt.figure(figsize=(8,8)) ndvi.plot(cmap = "Greens") plt.show() plt.figure(figsize=(8,8)) ndwi.plot(cmap = "Blues") plt.show() plt.figure(figsize=(8,8)) ndbi.plot(cmap = "Reds") plt.show() urbanization_dataset = xr.merge((ndvi.rename('NDVI'), ndwi.rename('NDWI'), ndbi.rename('NDBI'))) urbanization_dataset plt.figure(figsize=(8,8)) urbanization_dataset[["NDBI", "NDVI", "NDWI"]].to_array().plot.imshow(vmin=0, vmax=1) plt.show() display_map(latitude = lat ,longitude = lon) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Loading the Corpus Step2: Running a CorpusParser Step3: We can then use simple database queries (written in the syntax of SQLAlchemy, which Snorkel uses) to check how many documents and sentences were parsed Step4: Generating Candidates Step5: Writing a basic CandidateExtractor Step6: Next, we'll split up the documents into train, development, and test splits; and collect the associated sentences. Step7: Finally, we'll apply the candidate extractor to the three sets of sentences. The results will be persisted in the database backend. Step8: Loading Gold Labels
<ASSISTANT_TASK:> Python Code: %load_ext autoreload %autoreload 2 %matplotlib inline import os # Connect to the database backend and initalize a Snorkel session from lib.init import * # Here, we just set how many documents we'll process for automatic testing- you can safely ignore this! n_docs = 1000 if 'CI' in os.environ else 2591 from snorkel.parser import TSVDocPreprocessor doc_preprocessor = TSVDocPreprocessor('data/articles.tsv', max_docs=n_docs) from snorkel.parser.spacy_parser import Spacy from snorkel.parser import CorpusParser corpus_parser = CorpusParser(parser=Spacy()) %time corpus_parser.apply(doc_preprocessor, count=n_docs, parallelism=1) from snorkel.models import Document, Sentence print("Documents:", session.query(Document).count()) print("Sentences:", session.query(Sentence).count()) from snorkel.models import candidate_subclass Spouse = candidate_subclass('Spouse', ['person1', 'person2']) from snorkel.candidates import Ngrams, CandidateExtractor from snorkel.matchers import PersonMatcher ngrams = Ngrams(n_max=7) person_matcher = PersonMatcher(longest_match_only=True) cand_extractor = CandidateExtractor(Spouse, [ngrams, ngrams], [person_matcher, person_matcher], symmetric_relations=False) from snorkel.models import Document from lib.util import number_of_people docs = session.query(Document).order_by(Document.name).all() train_sents = set() dev_sents = set() test_sents = set() for i, doc in enumerate(docs): for s in doc.sentences: if number_of_people(s) <= 5: if i % 10 == 8: dev_sents.add(s) elif i % 10 == 9: test_sents.add(s) else: train_sents.add(s) %%time for i, sents in enumerate([train_sents, dev_sents, test_sents]): cand_extractor.apply(sents, split=i, parallelism=1) print("Number of candidates:", session.query(Spouse).filter(Spouse.split == i).count()) from lib.util import load_external_labels %time load_external_labels(session, Spouse, annotator_name='gold') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: An stateful RNN model to generate sequences Step3: Generate fake dataset [WORK REQUIRED] Step4: Hyperparameters Step5: Visualize training sequences Step6: The model [WORK REQUIRED] Step7: The training loop Step8: You can re-execute this cell to continue training Step9: Inference
<ASSISTANT_TASK:> Python Code: # using Tensorflow 2 %tensorflow_version 2.x import math import numpy as np from matplotlib import pyplot as plt import tensorflow as tf print("Tensorflow version: " + tf.__version__) #@title Data formatting and display utilites [RUN ME] def dumb_minibatch_sequencer(data, batch_size, sequence_size, nb_epochs): Divides the data into batches of sequences in the simplest way: sequentially. :param data: the training sequence :param batch_size: the size of a training minibatch :param sequence_size: the unroll size of the RNN :param nb_epochs: number of epochs to train on :return: x: one batch of training sequences y: one batch of target sequences, i.e. training sequences shifted by 1 data_len = data.shape[0] nb_batches = data_len // (batch_size * sequence_size) rounded_size = nb_batches * batch_size * sequence_size xdata = data[:rounded_size] ydata = np.roll(data, -1)[:rounded_size] xdata = np.reshape(xdata, [nb_batches, batch_size, sequence_size]) ydata = np.reshape(ydata, [nb_batches, batch_size, sequence_size]) for epoch in range(nb_epochs): for batch in range(nb_batches): yield xdata[batch,:,:], ydata[batch,:,:] def rnn_minibatch_sequencer(data, batch_size, sequence_size, nb_epochs): Divides the data into batches of sequences so that all the sequences in one batch continue in the next batch. This is a generator that will keep returning batches until the input data has been seen nb_epochs times. Sequences are continued even between epochs, apart from one, the one corresponding to the end of data. The remainder at the end of data that does not fit in an full batch is ignored. :param data: the training sequence :param batch_size: the size of a training minibatch :param sequence_size: the unroll size of the RNN :param nb_epochs: number of epochs to train on :return: x: one batch of training sequences y: one batch of target sequences, i.e. training sequences shifted by 1 data_len = data.shape[0] # using (data_len-1) because we must provide for the sequence shifted by 1 too nb_batches = (data_len - 1) // (batch_size * sequence_size) assert nb_batches > 0, "Not enough data, even for a single batch. Try using a smaller batch_size." rounded_data_len = nb_batches * batch_size * sequence_size xdata = np.reshape(data[0:rounded_data_len], [batch_size, nb_batches * sequence_size]) ydata = np.reshape(data[1:rounded_data_len + 1], [batch_size, nb_batches * sequence_size]) whole_epochs = math.floor(nb_epochs) frac_epoch = nb_epochs - whole_epochs last_nb_batch = math.floor(frac_epoch * nb_batches) for epoch in range(whole_epochs+1): for batch in range(nb_batches if epoch < whole_epochs else last_nb_batch): x = xdata[:, batch * sequence_size:(batch + 1) * sequence_size] y = ydata[:, batch * sequence_size:(batch + 1) * sequence_size] x = np.roll(x, -epoch, axis=0) # to continue the sequence from epoch to epoch (do not reset rnn state!) y = np.roll(y, -epoch, axis=0) yield x, y plt.rcParams['figure.figsize']=(16.8,6.0) plt.rcParams['axes.grid']=True plt.rcParams['axes.linewidth']=0 plt.rcParams['grid.color']='#DDDDDD' plt.rcParams['axes.facecolor']='white' plt.rcParams['xtick.major.size']=0 plt.rcParams['ytick.major.size']=0 plt.rcParams['axes.titlesize']=15.0 def display_lr(lr_schedule, nb_epochs): x = np.arange(nb_epochs) y = [lr_schedule(i) for i in x] plt.figure(figsize=(9,5)) plt.plot(x,y) plt.title("Learning rate schedule\nmax={:.2e}, min={:.2e}".format(np.max(y), np.min(y)), y=0.85) plt.show() def display_loss(history, full_history, nb_epochs): plt.figure() plt.plot(np.arange(0, len(full_history['loss']))/steps_per_epoch, full_history['loss'], label='detailed loss') plt.plot(np.arange(1, nb_epochs+1), history['loss'], color='red', linewidth=3, label='average loss per epoch') plt.ylim(0,3*max(history['loss'][1:])) plt.xlabel('EPOCH') plt.ylabel('LOSS') plt.xlim(0, nb_epochs+0.5) plt.legend() for epoch in range(nb_epochs//2+1): plt.gca().axvspan(2*epoch, 2*epoch+1, alpha=0.05, color='grey') plt.show() def picture_this_7(features): subplot = 231 for i in range(6): plt.subplot(subplot) plt.plot(features[i]) subplot += 1 plt.show() def picture_this_8(data, prime_data, results, offset, primelen, runlen, rmselen): disp_data = data[offset:offset+primelen+runlen] colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] plt.subplot(211) plt.xlim(0, disp_data.shape[0]) plt.text(primelen,2.5,"DATA |", color=colors[1], horizontalalignment="right") plt.text(primelen,2.5,"| PREDICTED", color=colors[0], horizontalalignment="left") displayresults = np.ma.array(np.concatenate((np.zeros([primelen]), results))) displayresults = np.ma.masked_where(displayresults == 0, displayresults) plt.plot(displayresults) displaydata = np.ma.array(np.concatenate((prime_data, np.zeros([runlen])))) displaydata = np.ma.masked_where(displaydata == 0, displaydata) plt.plot(displaydata) plt.subplot(212) plt.xlim(0, disp_data.shape[0]) plt.text(primelen,2.5,"DATA |", color=colors[1], horizontalalignment="right") plt.text(primelen,2.5,"| +PREDICTED", color=colors[0], horizontalalignment="left") plt.plot(displayresults) plt.plot(disp_data) plt.axvspan(primelen, primelen+rmselen, color='grey', alpha=0.1, ymin=0.05, ymax=0.95) plt.show() rmse = math.sqrt(np.mean((data[offset+primelen:offset+primelen+rmselen] - results[:rmselen])**2)) print("RMSE on {} predictions (shaded area): {}".format(rmselen, rmse)) WAVEFORM_SELECT = 0 # select 0, 1 or 2 def create_time_series(datalen): # good waveforms frequencies = [(0.2, 0.15), (0.35, 0.3), (0.6, 0.55)] freq1, freq2 = frequencies[WAVEFORM_SELECT] noise = [np.random.random()*0.1 for i in range(datalen)] x1 = np.sin(np.arange(0,datalen) * freq1) + noise x2 = np.sin(np.arange(0,datalen) * freq2) + noise x = x1 + x2 return x.astype(np.float32) DATA_LEN = 1024*128+1 data = create_time_series(DATA_LEN) plt.plot(data[:512]) plt.show() RNN_CELLSIZE = 80 # size of the RNN cells SEQLEN = 32 # unrolled sequence length BATCHSIZE = 30 # mini-batch size DROPOUT = 0.3 # dropout regularization: probability of neurons being dropped. Should be between 0 and 0.5 # The function dumb_minibatch_sequencer splits the data into batches of sequences sequentially. for features, labels in dumb_minibatch_sequencer(data, BATCHSIZE, SEQLEN, nb_epochs=1): break print("Features shape: " + str(features.shape)) print("Labels shape: " + str(labels.shape)) print("Excerpt from first batch:") picture_this_7(features) def keras_model(batchsize, seqlen): model = tf.keras.Sequential([ # # YOUR MODEL HERE # This is a dummy model that always predicts 1 # tf.keras.layers.Lambda(lambda x: tf.ones([batchsize,seqlen]), input_shape=[seqlen,]) ]) # to finalize the model, specify the loss, the optimizer and metrics model.compile( loss = 'mean_squared_error', optimizer = 'adam', metrics = ['RootMeanSquaredError']) return model # Keras model callbacks # This callback records a per-step loss history instead of the average loss per # epoch that Keras normally reports. It allows you to see more problems. class LossHistory(tf.keras.callbacks.Callback): def on_train_begin(self, logs={}): self.history = {'loss': []} def on_batch_end(self, batch, logs={}): self.history['loss'].append(logs.get('loss')) # This callback resets the RNN state at each epoch class ResetStateCallback(tf.keras.callbacks.Callback): def on_epoch_begin(self, batch, logs={}): self.model.reset_states() print('reset state') reset_state = ResetStateCallback() # learning rate decay callback def lr_schedule(epoch): return 0.01 #def lr_schedule(epoch): return 0.0001 + 0.01 * math.pow(0.65, epoch) lr_decay = tf.keras.callbacks.LearningRateScheduler(lr_schedule, verbose=True) # Execute this cell to reset the model NB_EPOCHS = 8 model = keras_model(BATCHSIZE, SEQLEN) # this prints a description of the model model.summary() display_lr(lr_schedule, NB_EPOCHS) # You can re-execute this cell to continue training steps_per_epoch = (DATA_LEN-1) // SEQLEN // BATCHSIZE #generator = rnn_minibatch_sequencer(data, BATCHSIZE, SEQLEN, NB_EPOCHS) generator = dumb_minibatch_sequencer(data, BATCHSIZE, SEQLEN, NB_EPOCHS) full_history = LossHistory() history = model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=NB_EPOCHS, callbacks=[full_history, lr_decay]) display_loss(history.history, full_history.history, NB_EPOCHS) # Inference from stateful model def keras_prediction_run(model, prime_data, run_length): model.reset_states() data_len = prime_data.shape[0] #prime_data = np.expand_dims(prime_data, axis=0) # single batch with everything prime_data = np.expand_dims(prime_data, axis=-1) # each sequence is of size 1 # prime the state from data for i in range(data_len - 1): # keep last sample to serve as the input sequence for predictions model.predict(np.expand_dims(prime_data[i], axis=0)) # prediction run results = [] Yout = prime_data[-1] # start predicting from the last element of the prime_data sequence for i in range(run_length+1): Yout = model.predict(Yout) results.append(Yout[0,0]) # Yout shape is [1,1] i.e one sequence of one element return np.array(results) PRIMELEN=256 RUNLEN=512 OFFSET=20 RMSELEN=128 prime_data = data[OFFSET:OFFSET+PRIMELEN] # For inference, we need a single RNN cell (no unrolling) # Create a new model that takes a single sequence of a single value (i.e. just one RNN cell) inference_model = keras_model(1, 1) # Copy the trained weights into it inference_model.set_weights(model.get_weights()) results = keras_prediction_run(inference_model, prime_data, RUNLEN) picture_this_8(data, prime_data, results, OFFSET, PRIMELEN, RUNLEN, RMSELEN) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Data preparation Step2: Defining cost function Step3: Defining gradient descent Step4: Now, lets plot fit line on the training data
<ASSISTANT_TASK:> Python Code: import pandas as pd import matplotlib.pyplot as plt #shape = (97, 2) data = pd.read_csv('ex1data1.txt', header=None) plt.scatter(data[0], data[1]) plt.xlabel('population') plt.ylabel('profit') plt.close() import numpy as np # Now we want to have our hypothesis function: h_theta = theta' * x #creating a cols of ones ones = np.ones((len(data[0]), 1), float) #input X = pd.concat([pd.DataFrame(ones), pd.DataFrame(data[0])], axis=1).values #label y = data[1].values def computeCost(X, y, theta): m = X.shape[0] h = X.dot(theta) J = (1/(2*m)) * (np.sum(np.square(h-y))) return J theta = np.zeros(2) cost = computeCost(X, y, theta) print(cost) # run gradient descent def gradientDescent(X, y, theta, alpha, iterations): m = X.shape[0] J_history = np.zeros(iterations) for iter in np.arange(iterations): h = X.dot(theta) theta = theta - alpha * (1/m) * X.T.dot(h-y) J_history[iter] = computeCost(X, y, theta) return (theta, J_history) theta = np.zeros(2) iterations = 2000 alpha = 0.01 theta, J_history = gradientDescent(X, y, theta, alpha, iterations) plt.xlim(0,iterations) plt.plot(J_history) plt.ylabel('Cost J') plt.xlabel('Iterations') plt.show() print(theta) xs = np.arange(1,25) ones = np.ones(xs.shape, float) inputXs = pd.concat([pd.DataFrame(ones),pd.DataFrame(xs)], axis=1).values outputYs = inputXs.dot(theta) #trying to compare with Scikit-learn from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X, y) outputSkLearn = clf.predict(inputXs) plt.xlim(4,24) plt.plot(inputXs, outputSkLearn, c='b', label='scikit-learn') plt.plot(inputXs, outputYs, c='r', label='gradient descent') plt.legend() plt.scatter(data[0], data[1]) plt.xlabel('population') plt.ylabel('profit') plt.show() print('Looks great :D') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 2. Key Properties --&gt; Flux Correction Step7: 3. Key Properties --&gt; Genealogy Step8: 3.2. CMIP3 Parent Step9: 3.3. CMIP5 Parent Step10: 3.4. Previous Name Step11: 4. Key Properties --&gt; Software Properties Step12: 4.2. Code Version Step13: 4.3. Code Languages Step14: 4.4. Components Structure Step15: 4.5. Coupler Step16: 5. Key Properties --&gt; Coupling Step17: 5.2. Atmosphere Double Flux Step18: 5.3. Atmosphere Fluxes Calculation Grid Step19: 5.4. Atmosphere Relative Winds Step20: 6. Key Properties --&gt; Tuning Applied Step21: 6.2. Global Mean Metrics Used Step22: 6.3. Regional Metrics Used Step23: 6.4. Trend Metrics Used Step24: 6.5. Energy Balance Step25: 6.6. Fresh Water Balance Step26: 7. Key Properties --&gt; Conservation --&gt; Heat Step27: 7.2. Atmos Ocean Interface Step28: 7.3. Atmos Land Interface Step29: 7.4. Atmos Sea-ice Interface Step30: 7.5. Ocean Seaice Interface Step31: 7.6. Land Ocean Interface Step32: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Step33: 8.2. Atmos Ocean Interface Step34: 8.3. Atmos Land Interface Step35: 8.4. Atmos Sea-ice Interface Step36: 8.5. Ocean Seaice Interface Step37: 8.6. Runoff Step38: 8.7. Iceberg Calving Step39: 8.8. Endoreic Basins Step40: 8.9. Snow Accumulation Step41: 9. Key Properties --&gt; Conservation --&gt; Salt Step42: 10. Key Properties --&gt; Conservation --&gt; Momentum Step43: 11. Radiative Forcings Step44: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Step45: 12.2. Additional Information Step46: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Step47: 13.2. Additional Information Step48: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Step49: 14.2. Additional Information Step50: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Step51: 15.2. Additional Information Step52: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Step53: 16.2. Additional Information Step54: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Step55: 17.2. Equivalence Concentration Step56: 17.3. Additional Information Step57: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 Step58: 18.2. Additional Information Step59: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Step60: 19.2. Additional Information Step61: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Step62: 20.2. Additional Information Step63: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Step64: 21.2. Additional Information Step65: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Step66: 22.2. Aerosol Effect On Ice Clouds Step67: 22.3. Additional Information Step68: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Step69: 23.2. Aerosol Effect On Ice Clouds Step70: 23.3. RFaci From Sulfate Only Step71: 23.4. Additional Information Step72: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Step73: 24.2. Additional Information Step74: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation Step76: 25.3. Future Explosive Volcanic Aerosol Implementation Step77: 25.4. Additional Information Step78: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation Step80: 26.3. Future Explosive Volcanic Aerosol Implementation Step81: 26.4. Additional Information Step82: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Step83: 27.2. Additional Information Step84: 28. Radiative Forcings --&gt; Other --&gt; Land Use Step85: 28.2. Crop Change Only Step86: 28.3. Additional Information Step87: 29. Radiative Forcings --&gt; Other --&gt; Solar Step88: 29.2. Additional Information
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'nims-kma', 'sandbox-3', 'toplevel') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Az ismรฉtlรฉsi szรกm elรฉg magas. Step2: Jelenรญtsรผk meg a generรกlt pontokat รฉs az egyenest. Step3: Futtasa tรถbbszรถr a fenti kรณdblokkot รฉs vegye รฉszre az egyes egyenletรฉnek vรกltozรกsรกt.
<ASSISTANT_TASK:> Python Code: # Python packages used import numpy as np # for array operations from matplotlib import pyplot as plt # for graphic output from math import sqrt # parameters tolerance = 2.5 # max distance from the plane to accept point rep = 1000 # number of repetition n = 100 # number of inliers k = 200 # number of outliers range = 100.0 # range of x, y coordinates l = [0.451, -1.0, 2.0] # line equation ax + by + c = 0 x = np.zeros(n+k) y = np.zeros(n+k) # points near to the line x[:n] = np.random.rand(n) * range y[:n] = -l[0] / l[1] * x[:n] - l[2] / l[1] + (np.random.rand(n) * 2 * tolerance - tolerance) # outlier points (noise) x[n:] = np.random.rand(k) * range y[n:] = np.random.rand(k) * range points = np.c_[x, y, np.full(n+k, 1.0)] # put together inliers and outliers fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot() ax.scatter(x, y) ax.plot([0,100], [-l[2] / l[1], -l[0] / l[1] * 100 - l[2] / l[1]], 'r', label='original line') _ = ax.set_title('Pointok รฉs az egyenes') best_n = 0 # number of points on the best fit line so far best_i = 0 # iteration index of best fit line so far best_inliers = np.array([]) # indices of inliers of the best fit line so far for i in np.arange(rep): # select two random points p = [] # list of random indices for points while len(p) != 2: p = list(set(np.random.randint(n+k, size=2))) # remove repeated random integers p1 = points[p] # randomly selected points x1 = p1[:,0] # x coordinates y1 = p1[:,1] # y coordinates # line equation from the two points using homogenouos coordinates l1 = np.array([y1[0] - y1[1], x1[1] - x1[0], x1[0] * y1[1] - x1[1] * y1[0]]) l1 = l1 / sqrt(l1[0]**2 + l1[1]**2) # normalize # select close points inliers = points[np.abs(np.dot(points, l1)) < tolerance] if inliers.shape[0] > best_n: # better solution found best_n = inliers.shape[0] best_i = i best_inliers = inliers.copy() best_line = l1.copy() print(f'Legjobb megoldรกs {best_i} ismรฉtlรฉs utรกn, pontok szรกma az egyenesen: {best_n} egyenes: {best_line}') fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot() ax.scatter(x, y) ax.scatter(best_inliers[:,0], best_inliers[:,1], c='g') ax.plot([0,100], [-l[2] / l[1], -l[0] / l[1] * 100 - l[2] / l[1]], 'g', label='eredeti vonal') ax.plot([0,100], [-best_line[2] / best_line[1], -best_line[0] / best_line[1] * 100 - best_line[2] / best_line[1]], 'r', label='legjobb kรถzelรญtรฉs') ax.legend() _ = ax.set_title('RANSAC vonal') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Spearman Rank Correlation Step2: Let's take a look at the distribution of measured correlation coefficients and compare the spearman with the regular metric. Step3: Now let's see how the Spearman rank and Regular coefficients cope when we add more noise to the situation. Step4: We can see that the Spearman rank correlation copes with the non-linear relationship much better at most levels of noise. Interestingly, at very high levels, it seems to do worse than regular correlation. Step5: Sure enough, the relationship is not detected. It is important when using both regular and spearman correlation to check for lagged relationships by offsetting your data and testing for different offset values. Step6: We now have ourselves an $r_S$, but how do we interpret it? It's positive, so we know that the variables are not anticorrelated. It's not very large, so we know they aren't perfectly positively correlated, but it's hard to say from a glance just how significant the correlation is. Luckily, spearmanr also computes the p-value for this coefficient and sample size for us. We can see that the p-value here is above 0.05; therefore, we cannot claim that $X$ and $Y$ are correlated. Step7: Our p-value is below the cutoff, which means we accept the hypothesis that the two are correlated. The negative coefficient indicates that there is a negative correlation, and that more expensive mutual funds have worse sharpe ratios. However, there is some weird clustering in the data, it seems there are expensive groups with low sharpe ratios, and a main group whose sharpe ratio is unrelated to the expense. Further analysis would be required to understand what's going on here.
<ASSISTANT_TASK:> Python Code: import numpy as np import scipy.stats as stats import matplotlib.pyplot as plt import math # Example of ranking data l = [10, 9, 5, 7, 5] print 'Raw data: ', l print 'Ranking: ', list(stats.rankdata(l, method='average')) ## Let's see an example of this n = 100 def compare_correlation_and_spearman_rank(n, noise): X = np.random.poisson(size=n) Y = np.exp(X) + noise * np.random.normal(size=n) Xrank = stats.rankdata(X, method='average') # n-2 is the second to last element Yrank = stats.rankdata(Y, method='average') diffs = Xrank - Yrank # order doesn't matter since we'll be squaring these values r_s = 1 - 6*sum(diffs*diffs)/(n*(n**2 - 1)) c_c = np.corrcoef(X, Y)[0,1] return r_s, c_c experiments = 1000 spearman_dist = np.ndarray(experiments) correlation_dist = np.ndarray(experiments) for i in range(experiments): r_s, c_c = compare_correlation_and_spearman_rank(n, 1.0) spearman_dist[i] = r_s correlation_dist[i] = c_c print 'Spearman Rank Coefficient: ' + str(np.mean(spearman_dist)) # Compare to the regular correlation coefficient print 'Correlation coefficient: ' + str(np.mean(correlation_dist)) plt.hist(spearman_dist, bins=50, alpha=0.5) plt.hist(correlation_dist, bins=50, alpha=0.5) plt.legend(['Spearman Rank', 'Regular Correlation']) plt.xlabel('Correlation Coefficient') plt.ylabel('Frequency'); n = 100 noises = np.linspace(0, 3, 30) experiments = 100 spearman = np.ndarray(len(noises)) correlation = np.ndarray(len(noises)) for i in range(len(noises)): # Run many experiments for each noise setting rank_coef = 0.0 corr_coef = 0.0 noise = noises[i] for j in range(experiments): r_s, c_c = compare_correlation_and_spearman_rank(n, noise) rank_coef += r_s corr_coef += c_c spearman[i] = rank_coef/experiments correlation[i] = corr_coef/experiments plt.scatter(noises, spearman, color='r') plt.scatter(noises, correlation) plt.legend(['Spearman Rank', 'Regular Correlation']) plt.xlabel('Amount of Noise') plt.ylabel('Average Correlation Coefficient') n = 100 X = np.random.rand(n) Xrank = stats.rankdata(X, method='average') # n-2 is the second to last element Yrank = stats.rankdata([1,1] + list(X[:(n-2)]), method='average') diffs = Xrank - Yrank # order doesn't matter since we'll be squaring these values r_s = 1 - 6*sum(diffs*diffs)/(n*(n**2 - 1)) print r_s # Generate two random data sets np.random.seed(161) X = np.random.rand(10) Y = np.random.rand(10) r_s = stats.spearmanr(X, Y) print 'Spearman Rank Coefficient: ', r_s[0] print 'p-value: ', r_s[1] mutual_fund_data = local_csv('mutual_fund_data.csv') expense = mutual_fund_data['Annual Expense Ratio'].values sharpe = mutual_fund_data['Three Year Sharpe Ratio'].values plt.scatter(expense, sharpe) plt.xlabel('Expense Ratio') plt.ylabel('Sharpe Ratio') r_S = stats.spearmanr(expense, sharpe) print 'Spearman Rank Coefficient: ', r_S[0] print 'p-value: ', r_S[1] symbol_list = ['A', 'AA', 'AAC', 'AAL', 'AAMC', 'AAME', 'AAN', 'AAOI', 'AAON', 'AAP', 'AAPL', 'AAT', 'AAU', 'AAV', 'AAVL', 'AAWW', 'AB', 'ABAC', 'ABAX', 'ABB', 'ABBV', 'ABC', 'ABCB', 'ABCD', 'ABCO', 'ABCW', 'ABDC', 'ABEV', 'ABG', 'ABGB'] # Get the returns over the lookback window start = '2014-12-01' end = '2015-01-01' historical_returns = get_pricing(symbol_list, fields='price', start_date=start, end_date=end).pct_change()[1:] # Compute our stock score scores = np.mean(historical_returns) print 'Our Scores\n' print scores print '\n' start = '2015-01-01' end = '2015-02-01' walk_forward_returns = get_pricing(symbol_list, fields='price', start_date=start, end_date=end).pct_change()[1:] walk_forward_returns = np.mean(walk_forward_returns) print 'The Walk Forward Returns\n' print walk_forward_returns print '\n' plt.scatter(scores, walk_forward_returns) plt.xlabel('Scores') plt.ylabel('Walk Forward Returns') r_s = stats.spearmanr(scores, walk_forward_returns) print 'Correlation Coefficient: ' + str(r_s[0]) print 'p-value: ' + str(r_s[1]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Olvasd el Step2: Olvasd el Step3: 1 Step4: Kรฉszรญtรผnk egy mappรกt, ahovรก a PDF oldalait exportรกljuk kรฉpkรฉnt. Step5: 2 Step6: 3 Step7: Felismert szรถveg beolvasรกsa Step8: Sorok felosztรกsa รบjsork karakterek \n szerint Step9: Csak azokat a sorokat tartjuk meg, amelyek szรกmmal kezdล‘dnek Step10: Tipikus hibรกs felismert karaterek javรญtรกsa Step11: Hibรกs sorok tovรกbbi kiszลฑrรฉse manuรกlisan Step12: Adatok feldolgozรกsa Step13: extra
<ASSISTANT_TASK:> Python Code: !pip install Pillow !pip install pdf2image !pip install pytesseract !pip install opencv-python from PIL import Image import sys from pdf2image import convert_from_path import os # Path of the pdf PDF_file = "Lista inscrisi Admitere Licenta sept 11.09.2015.pdf" if not os.path.exists('pdf/'+PDF_file+'/'): os.makedirs('pdf/'+PDF_file+'/') # Store all the pages of the PDF in a variable #a masodik szam a felbontas, ezt 300-600 kozott probalgasd pages = convert_from_path(PDF_file, 300) # Counter to store images of each page of PDF to image image_counter = 1 # Iterate through all the pages stored above for page in pages: # Declaring filename for each page of PDF as JPG # For each page, filename will be: # PDF page 1 -> page_1.jpg # PDF page 2 -> page_2.jpg # PDF page 3 -> page_3.jpg # .... # PDF page n -> page_n.jpg filename = 'pdf/'+PDF_file+"/page_"+str(image_counter)+".jpg" print(image_counter,'oldal kรฉsz..') # Save the image of the page in system page.save(filename, 'JPEG') # Increment the counter to update filename image_counter = image_counter + 1 import pytesseract # Variable to get count of total number of pages filelimit = image_counter-1 # Creating a text file to write the output outfile = 'pdf/'+PDF_file+"/text.txt" # Open the file in append mode so that # All contents of all images are added to the same file f = open(outfile, "a") # Iterate from 1 to total number of pages for i in range(1, filelimit + 1): # Set filename to recognize text from # Again, these files will be: # page_1.jpg # page_2.jpg # .... # page_n.jpg filename = 'pdf/'+PDF_file+"/page_"+str(i)+".jpg" # Recognize the text as string in image using pytesserct text = str(((pytesseract.image_to_string(Image.open(filename))))) print(i,'oldal kรฉsz..') # The recognized text is stored in variable text # Any string processing may be applied on text # Here, basic formatting has been done: # In many PDFs, at line ending, if a word can't # be written fully, a 'hyphen' is added. # The rest of the word is written in the next line # Eg: This is a sample text this word here GeeksF- # orGeeks is half on first line, remaining on next. # To remove this, we replace every '-\n' to ''. text = text.replace('-\n', '') # Finally, write the processed text to the file. f.write(text) # Close the file after writing all the text. f.close() import pandas as pd pages=open(outfile,'r').read() lines=[i for i in pages.split('\n') if i] good_lines=[line for line in lines if line[0].isdigit()] good_lines=[line.replace('_',' ').replace('. ',' ').replace('-',' ').replace('โ€”',' ')\ .replace('~',' ').replace('=',' ').replace(' ',' ').replace(' ',' ')\ .replace('ยป',' ')for line in good_lines] good_lines[:10] for i,l in enumerate(good_lines): print(str(i)+':::'+l) good_lines[126]=good_lines[126].split(',ve Nr Leg')[0] good_lines[146]=good_lines[146].replace('7 AL','7,41') helper=good_lines[195].split('199 4514') good_lines[195]=helper[0] good_lines.append('199 4514'+helper[1]) clean_lines=[] for line in good_lines: values=line.split(' ') try: for k in [2,3,4]: values[k]=values[k].replace('1','I').replace('l','I')\ .replace('|','I').replace('7','I').replace('0','O') counter=0 name_start=100 name_end=0 while counter<len(values)-1: counter+=1 try: if not values[counter][0].isdigit(): name_start=min(name_start,counter) if values[counter+1][0].isdigit(): name_end=counter+1 counter=len(values) except: print('HIBA: '+line) clean_values=values[:name_start]+[' '.join(values[name_start:name_end])]+values[name_end:] clean_values=[szo for szo in clean_values if szo] clean_lines.append(clean_values) except: print('HIBA: '+line) df=pd.DataFrame(clean_lines) df=df[range(6)] df.columns=['ID','Leg','Nume','Admitere','Bac 2','Lb. Mat.'] df.head() df.to_excel('pdf/'+PDF_file+'/data.xlsx') for line in clean_lines: try: for i in [3,4,5]: jegy=line[i] jegy=jegy.replace('.',',') jegy=jegy.replace('I','1').replace('l','1')\ .replace('|','1').replace('A','4').replace('O','0') if jegy[0]==',':jegy=jegy[1:] if jegy[-1]==',':jegy=jegy[:-1] if jegy!='10,00': if jegy!='1000': if len(jegy)<4: jegy=jegy[0]+','+jegy[1:] jegy=float(jegy.replace(',','.')) else: jegy=10 else: jegy=10 line[i]=jegy except: print(line) df=pd.DataFrame(clean_lines) df=df[range(6)] df.columns=['ID','Leg','Nume','Admitere','Bac 2','Lb. Mat.'] df.head() df['Mean']=df.mean(axis=1) df <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Hat potential Step2: Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$ Step3: Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$.
<ASSISTANT_TASK:> Python Code: %matplotlib inline import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt def hat(x,a,b): return -1*a*(x**2) + b*(x**4) assert hat(0.0, 1.0, 1.0)==0.0 assert hat(0.0, 1.0, 1.0)==0.0 assert hat(1.0, 10.0, 1.0)==-9.0 a = 5.0 b = 1.0 x = np.linspace(-3,3,100) v = hat(x,a,b) graph = plt.plot(x,v) assert True # leave this to grade the plot f = lambda g: hat(g,a,b) x1 = float(opt.minimize(f,-2 ).x) x2 = float(opt.minimize(f,2 ).x) print(x1) print(x2) graph = plt.plot(x,v) plt.plot([x1,x2],[f(x1),f(x2)],'ro') assert True # leave this for grading the plot <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Python built-in string methods Step3: Regular expression methods
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np import re val = 'a, b, guido , bajo' print(val) # splitting the data by , and strip the whitespace val2 = [x.strip() for x in val.split(',')] val2 # tuple assignment first, second, third, four = val2 first + "::" + second + "::" + third # practical method is join "::".join(val2) # checking if guido is in val2 'guido' in val2 # searching in string print("index",val.index(',')) print("find",val.find(',')) print("find",val.find(':')) # find and index behave same if string is available # print("index",val.index(':')) # index throws an exception where find returns -1 # get string counts print(", -- ", val.count(','),"\n" "a --", val.count('a')) # replace will substitute occurrences of one pattern for another. This is commonly used # to delete patterns, too, by passing an empty string: val.replace(',', '::') val.replace(',', '') import re text = "foo bar\t baz \tqux" text re.split('\s+', text) # compiled version of regex regex = re.compile('\s+') regex.split(text) # to get a list of all patterns matching the regex, you can use the findall method: regex.findall(text) # match and search are closely related to findall. While findall returns all matches in a # string, search returns only the first match. More rigidly, match only matches at the # beginning of the string. text = Dave dave@google.com Steve steve@gmail.com Rob rob@gmail.com Ryan ryan@yahoo.com pattern = r'[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}' # re.IGNORECASE makes the regex case-insensitive regex = re.compile(pattern, flags=re.IGNORECASE) regex.findall(text) # search returns a special match object for the first email address in the text. For the # above regex, the match object can only tell us the start and end position of the pattern # in the string: m = regex.search(text) m text[m.start():m.end()] # regex.match returns None, as it only will match if the pattern occurs at the start of the string: print(regex.match(text)) # sub will return a new string with occurrences of the pattern replaced by the a new string: print (regex.sub('REDACTED', text)) # to find email addresses and simultaneously segment each address # into its 3 components: username, domain name, and domain suffix. # To do this, put parentheses around the parts of the pattern to segment: pattern = r'([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})' regex = re.compile(pattern, flags=re.IGNORECASE) # A match object produced by this modified regex returns a tuple of the pattern components # with its groups method m = regex.match('wesm@bright.net') m.groups() # findall returns a list of tuples when the pattern has groups: regex.findall(text) # sub also has access to groups in each match using special symbols like \1, \2, etc.: print (regex.sub(r'Username: \1, Domain: \2, Suffix: \3', text)) data = {'Dave': 'dave@google.com', 'Steve': 'steve@gmail.com', 'Rob': 'rob@gmail.com', 'Wes': np.nan} data = pd.Series(data) data # String and regular expression methods can be applied (passing a lambda or other function) # to each value using data.map, but it will fail on the NA. To cope with this, Series # has concise methods for string operations that skip NA values. These are accessed # through Seriesโ€™s str attribute; for example, we could check whether each email address # has 'gmail' in it with str.contains: data.str.contains('gmail') pattern = '([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\\.([A-Z]{2,4})' data.str.findall(pattern, flags=re.IGNORECASE) # There are a couple of ways to do vectorized element retrieval. Either use str.get or # index into the str attribute: matches = data.str.match(pattern, flags=re.IGNORECASE) matches matches.str.get(1) matches.str[0] data.str[:5] <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Part 1 Step2: WARNING Step3: (1b) Sparse vectors Step4: (1c) OHE features as sparse vectors Step7: (1d) Define a OHE function Step8: (1e) Apply OHE to a dataset Step9: Part 2 Step10: (2b) OHE Dictionary from distinct features Step12: (2c) Automated creation of an OHE dictionary Step13: Part 3 Step14: (3a) Loading and splitting the data Step16: (3b) Extract features Step17: (3c) Create an OHE dictionary from the dataset Step19: (3d) Apply OHE to the dataset Step22: Visualization 1 Step24: (3e) Handling unseen features Step25: Part 4 Step27: (4b) Log loss Step28: (4c) Baseline log loss Step30: (4d) Predicted probability Step32: (4e) Evaluate the model Step33: (4f) Validation log loss Step34: Visualization 2 Step36: Part 5 Step38: (5b) Creating hashed features Step40: (5c) Sparsity
<ASSISTANT_TASK:> Python Code: labVersion = 'cs190.1x-lab4-1.0.4' print labVersion # Data for manual OHE # Note: the first data point does not include any value for the optional third feature sampleOne = [(0, 'mouse'), (1, 'black')] sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')] sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')] sampleDataRDD = sc.parallelize([sampleOne, sampleTwo, sampleThree]) print sampleDataRDD.count() # print sampleDataRDD.take(5) # TODO: Replace <FILL IN> with appropriate code sampleOHEDictManual = {} sampleOHEDictManual[(0,'bear')] = 0 sampleOHEDictManual[(0,'cat')] = 1 sampleOHEDictManual[(0,'mouse')] = 2 sampleOHEDictManual[(1, 'black')] = 3 sampleOHEDictManual[(1, 'tabby')] = 4 sampleOHEDictManual[(2, 'mouse')] = 5 sampleOHEDictManual[(2, 'salmon')] = 6 print len(sampleOHEDictManual) # TEST One-hot-encoding (1a) from test_helper import Test Test.assertEqualsHashed(sampleOHEDictManual[(0,'bear')], 'b6589fc6ab0dc82cf12099d1c2d40ab994e8410c', "incorrect value for sampleOHEDictManual[(0,'bear')]") Test.assertEqualsHashed(sampleOHEDictManual[(0,'cat')], '356a192b7913b04c54574d18c28d46e6395428ab', "incorrect value for sampleOHEDictManual[(0,'cat')]") Test.assertEqualsHashed(sampleOHEDictManual[(0,'mouse')], 'da4b9237bacccdf19c0760cab7aec4a8359010b0', "incorrect value for sampleOHEDictManual[(0,'mouse')]") Test.assertEqualsHashed(sampleOHEDictManual[(1,'black')], '77de68daecd823babbb58edb1c8e14d7106e83bb', "incorrect value for sampleOHEDictManual[(1,'black')]") Test.assertEqualsHashed(sampleOHEDictManual[(1,'tabby')], '1b6453892473a467d07372d45eb05abc2031647a', "incorrect value for sampleOHEDictManual[(1,'tabby')]") Test.assertEqualsHashed(sampleOHEDictManual[(2,'mouse')], 'ac3478d69a3c81fa62e60f5c3696165a4e5e6ac4', "incorrect value for sampleOHEDictManual[(2,'mouse')]") Test.assertEqualsHashed(sampleOHEDictManual[(2,'salmon')], 'c1dfd96eea8cc2b62785275bca38ac261256e278', "incorrect value for sampleOHEDictManual[(2,'salmon')]") Test.assertEquals(len(sampleOHEDictManual.keys()), 7, 'incorrect number of keys in sampleOHEDictManual') import numpy as np from pyspark.mllib.linalg import SparseVector # TODO: Replace <FILL IN> with appropriate code aDense = np.array([0., 3., 0., 4.]) aSparse = SparseVector(len(aDense), range(0,len(aDense)), aDense) bDense = np.array([0., 0., 0., 1.]) bSparse = SparseVector(len(bDense), range(0,len(bDense)), bDense) w = np.array([0.4, 3.1, -1.4, -.5]) print aDense.dot(w) print aSparse.dot(w) print bDense.dot(w) print bSparse.dot(w) print aDense print bDense print aSparse print bSparse # TEST Sparse Vectors (1b) Test.assertTrue(isinstance(aSparse, SparseVector), 'aSparse needs to be an instance of SparseVector') Test.assertTrue(isinstance(bSparse, SparseVector), 'aSparse needs to be an instance of SparseVector') Test.assertTrue(aDense.dot(w) == aSparse.dot(w), 'dot product of aDense and w should equal dot product of aSparse and w') Test.assertTrue(bDense.dot(w) == bSparse.dot(w), 'dot product of bDense and w should equal dot product of bSparse and w') # Reminder of the sample features # sampleOne = [(0, 'mouse'), (1, 'black')] # sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')] # sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')] # TODO: Replace <FILL IN> with appropriate code sampleOneOHEFeatManual = SparseVector(7, [2,3], np.array([1.0,1.0])) sampleTwoOHEFeatManual = SparseVector(7, [1,4,5], np.array([1.0,1.0,1.0])) sampleThreeOHEFeatManual = SparseVector(7, [0,3,6], np.array([1.0,1.0,1.0])) print sampleOneOHEFeatManual print sampleTwoOHEFeatManual print sampleThreeOHEFeatManual # TEST OHE Features as sparse vectors (1c) Test.assertTrue(isinstance(sampleOneOHEFeatManual, SparseVector), 'sampleOneOHEFeatManual needs to be a SparseVector') Test.assertTrue(isinstance(sampleTwoOHEFeatManual, SparseVector), 'sampleTwoOHEFeatManual needs to be a SparseVector') Test.assertTrue(isinstance(sampleThreeOHEFeatManual, SparseVector), 'sampleThreeOHEFeatManual needs to be a SparseVector') Test.assertEqualsHashed(sampleOneOHEFeatManual, 'ecc00223d141b7bd0913d52377cee2cf5783abd6', 'incorrect value for sampleOneOHEFeatManual') Test.assertEqualsHashed(sampleTwoOHEFeatManual, '26b023f4109e3b8ab32241938e2e9b9e9d62720a', 'incorrect value for sampleTwoOHEFeatManual') Test.assertEqualsHashed(sampleThreeOHEFeatManual, 'c04134fd603ae115395b29dcabe9d0c66fbdc8a7', 'incorrect value for sampleThreeOHEFeatManual') # TODO: Replace <FILL IN> with appropriate code def oneHotEncoding_old(rawFeats, OHEDict, numOHEFeats): Produce a one-hot-encoding from a list of features and an OHE dictionary. Note: You should ensure that the indices used to create a SparseVector are sorted. Args: rawFeats (list of (int, str)): The features corresponding to a single observation. Each feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne) OHEDict (dict): A mapping of (featureID, value) to unique integer. numOHEFeats (int): The total number of unique OHE features (combinations of featureID and value). Returns: SparseVector: A SparseVector of length numOHEFeats with indices equal to the unique identifiers for the (featureID, value) combinations that occur in the observation and with values equal to 1.0. newFeats = [] idx = [] for k,i in sorted(OHEDict.items(), key=lambda x: x[1]): if k in rawFeats: newFeats += [1.0] idx += [i] return SparseVector(numOHEFeats, idx, np.array(newFeats)) # TODO: Replace <FILL IN> with appropriate code def oneHotEncoding(rawFeats, OHEDict, numOHEFeats): Produce a one-hot-encoding from a list of features and an OHE dictionary. Note: You should ensure that the indices used to create a SparseVector are sorted. Args: rawFeats (list of (int, str)): The features corresponding to a single observation. Each feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne) OHEDict (dict): A mapping of (featureID, value) to unique integer. numOHEFeats (int): The total number of unique OHE features (combinations of featureID and value). Returns: SparseVector: A SparseVector of length numOHEFeats with indices equal to the unique identifiers for the (featureID, value) combinations that occur in the observation and with values equal to 1.0. newFeats = [] idx = [] for f in rawFeats: if f in OHEDict: newFeats += [1.0] idx += [OHEDict[f]] return SparseVector(numOHEFeats, sorted(idx), np.array(newFeats)) # Calculate the number of features in sampleOHEDictManual numSampleOHEFeats = len(sampleOHEDictManual) # Run oneHotEnoding on sampleOne sampleOneOHEFeat = oneHotEncoding(sampleOne,sampleOHEDictManual,numSampleOHEFeats) print sampleOneOHEFeat # TEST Define an OHE Function (1d) Test.assertTrue(sampleOneOHEFeat == sampleOneOHEFeatManual, 'sampleOneOHEFeat should equal sampleOneOHEFeatManual') Test.assertEquals(sampleOneOHEFeat, SparseVector(7, [2,3], [1.0,1.0]), 'incorrect value for sampleOneOHEFeat') Test.assertEquals(oneHotEncoding([(1, 'black'), (0, 'mouse')], sampleOHEDictManual, numSampleOHEFeats), SparseVector(7, [2,3], [1.0,1.0]), 'incorrect definition for oneHotEncoding') # TODO: Replace <FILL IN> with appropriate code def toOHE(row): return oneHotEncoding(row,sampleOHEDictManual,numSampleOHEFeats) sampleOHEData = sampleDataRDD.map(toOHE) print sampleOHEData.collect() # TEST Apply OHE to a dataset (1e) sampleOHEDataValues = sampleOHEData.collect() Test.assertTrue(len(sampleOHEDataValues) == 3, 'sampleOHEData should have three elements') Test.assertEquals(sampleOHEDataValues[0], SparseVector(7, {2: 1.0, 3: 1.0}), 'incorrect OHE for first sample') Test.assertEquals(sampleOHEDataValues[1], SparseVector(7, {1: 1.0, 4: 1.0, 5: 1.0}), 'incorrect OHE for second sample') Test.assertEquals(sampleOHEDataValues[2], SparseVector(7, {0: 1.0, 3: 1.0, 6: 1.0}), 'incorrect OHE for third sample') flat = sampleDataRDD.flatMap(lambda r: r).distinct() print flat.count() for i in flat.take(8): print i # TODO: Replace <FILL IN> with appropriate code sampleDistinctFeats = (sampleDataRDD.flatMap(lambda r: r).distinct()) # TEST Pair RDD of (featureID, category) (2a) Test.assertEquals(sorted(sampleDistinctFeats.collect()), [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'), (1, 'tabby'), (2, 'mouse'), (2, 'salmon')], 'incorrect value for sampleDistinctFeats') # TODO: Replace <FILL IN> with appropriate code sampleOHEDict = sampleDistinctFeats.zipWithIndex().collectAsMap() print sampleOHEDict # TEST OHE Dictionary from distinct features (2b) Test.assertEquals(sorted(sampleOHEDict.keys()), [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'), (1, 'tabby'), (2, 'mouse'), (2, 'salmon')], 'sampleOHEDict has unexpected keys') Test.assertEquals(sorted(sampleOHEDict.values()), range(7), 'sampleOHEDict has unexpected values') # TODO: Replace <FILL IN> with appropriate code def createOneHotDict(inputData): Creates a one-hot-encoder dictionary based on the input data. Args: inputData (RDD of lists of (int, str)): An RDD of observations where each observation is made up of a list of (featureID, value) tuples. Returns: dict: A dictionary where the keys are (featureID, value) tuples and map to values that are unique integers. flat = inputData.flatMap(lambda r: r).distinct() return flat.zipWithIndex().collectAsMap() sampleOHEDictAuto = createOneHotDict(sampleDataRDD) print sampleOHEDictAuto # TEST Automated creation of an OHE dictionary (2c) Test.assertEquals(sorted(sampleOHEDictAuto.keys()), [(0, 'bear'), (0, 'cat'), (0, 'mouse'), (1, 'black'), (1, 'tabby'), (2, 'mouse'), (2, 'salmon')], 'sampleOHEDictAuto has unexpected keys') Test.assertEquals(sorted(sampleOHEDictAuto.values()), range(7), 'sampleOHEDictAuto has unexpected values') import os.path baseDir = os.path.join('/Users/bill.walrond/Documents/dsprj/data') inputPath = os.path.join('CS190_Mod4', 'dac_sample.txt') fileName = os.path.join(baseDir, inputPath) if os.path.isfile(fileName): rawData = (sc .textFile(fileName, 2) .map(lambda x: x.replace('\t', ','))) # work with either ',' or '\t' separated data print rawData.take(1) print rawData.count() else: print 'Couldn\'t find filename: %s' % fileName # TODO: Replace <FILL IN> with appropriate code import glob from io import BytesIO import os.path import tarfile import urllib import urlparse # Paste in url, url should end with: dac_sample.tar.gz url = '<FILL IN>' url = url.strip() if 'rawData' in locals(): print 'rawData already loaded. Nothing to do.' elif not url.endswith('dac_sample.tar.gz'): print 'Check your download url. Are you downloading the Sample dataset?' else: try: tmp = BytesIO() urlHandle = urllib.urlopen(url) tmp.write(urlHandle.read()) tmp.seek(0) tarFile = tarfile.open(fileobj=tmp) dacSample = tarFile.extractfile('dac_sample.txt') dacSample = [unicode(x.replace('\n', '').replace('\t', ',')) for x in dacSample] rawData = (sc .parallelize(dacSample, 1) # Create an RDD .zipWithIndex() # Enumerate lines .map(lambda (v, i): (i, v)) # Use line index as key .partitionBy(2, lambda i: not (i < 50026)) # Match sc.textFile partitioning .map(lambda (i, v): v)) # Remove index print 'rawData loaded from url' print rawData.take(1) except IOError: print 'Unable to unpack: {0}'.format(url) # TODO: Replace <FILL IN> with appropriate code weights = [.8, .1, .1] seed = 42 # Use randomSplit with weights and seed rawTrainData, rawValidationData, rawTestData = rawData.randomSplit(weights, seed) # Cache the data rawTrainData.cache() rawValidationData.cache() rawTestData.cache() nTrain = rawTrainData.count() nVal = rawValidationData.count() nTest = rawTestData.count() print nTrain, nVal, nTest, nTrain + nVal + nTest print rawTrainData.take(1) # TEST Loading and splitting the data (3a) Test.assertTrue(all([rawTrainData.is_cached, rawValidationData.is_cached, rawTestData.is_cached]), 'you must cache the split data') Test.assertEquals(nTrain, 79911, 'incorrect value for nTrain') Test.assertEquals(nVal, 10075, 'incorrect value for nVal') Test.assertEquals(nTest, 10014, 'incorrect value for nTest') # TODO: Replace <FILL IN> with appropriate code def parsePoint(point): Converts a comma separated string into a list of (featureID, value) tuples. Note: featureIDs should start at 0 and increase to the number of features - 1. Args: point (str): A comma separated string where the first value is the label and the rest are features. Returns: list: A list of (featureID, value) tuples. # make a list of (featureID, value) tuples, skipping the first element (the label) return [(k,v) for k,v in enumerate(point[2:].split(','))] parsedTrainFeat = rawTrainData.map(parsePoint) print parsedTrainFeat.count() numCategories = (parsedTrainFeat .flatMap(lambda x: x) .distinct() .map(lambda x: (x[0], 1)) .reduceByKey(lambda x, y: x + y) .sortByKey() .collect()) print numCategories[2][1] # TEST Extract features (3b) Test.assertEquals(numCategories[2][1], 855, 'incorrect implementation of parsePoint') Test.assertEquals(numCategories[32][1], 4, 'incorrect implementation of parsePoint') # TODO: Replace <FILL IN> with appropriate code ctrOHEDict = createOneHotDict(parsedTrainFeat) print 'Len of ctrOHEDict: {0}'.format(len(ctrOHEDict)) numCtrOHEFeats = len(ctrOHEDict.keys()) print numCtrOHEFeats print ctrOHEDict.has_key((0, '')) theItems = ctrOHEDict.items() for i in range(0,9): print theItems[i] # TEST Create an OHE dictionary from the dataset (3c) Test.assertEquals(numCtrOHEFeats, 233286, 'incorrect number of features in ctrOHEDict') Test.assertTrue((0, '') in ctrOHEDict, 'incorrect features in ctrOHEDict') from pyspark.mllib.regression import LabeledPoint print rawTrainData.count() r = rawTrainData.first() l = parsePoint(r) print 'Length of parsed list: %d' % len(l) print 'Here\'s the list ...' print l sv = oneHotEncoding(l, ctrOHEDict, numCtrOHEFeats) print 'Here\'s the sparsevector ...' print sv lp = LabeledPoint(float(r[:1]), sv) print 'Here\'s the labeledpoint ...' print lp # TODO: Replace <FILL IN> with appropriate code def parseOHEPoint(point, OHEDict, numOHEFeats): Obtain the label and feature vector for this raw observation. Note: You must use the function `oneHotEncoding` in this implementation or later portions of this lab may not function as expected. Args: point (str): A comma separated string where the first value is the label and the rest are features. OHEDict (dict of (int, str) to int): Mapping of (featureID, value) to unique integer. numOHEFeats (int): The number of unique features in the training dataset. Returns: LabeledPoint: Contains the label for the observation and the one-hot-encoding of the raw features based on the provided OHE dictionary. # first, get the label label = float(point[:1]) parsed = parsePoint(point) features = oneHotEncoding(parsed, OHEDict, numOHEFeats) # return parsed return LabeledPoint(label,features) def toOHEPoint(point): return parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats) sc.setLogLevel("INFO") rawTrainData = rawTrainData.repartition(8) rawTrainData.cache() OHETrainData = rawTrainData.map(toOHEPoint) OHETrainData.cache() print OHETrainData.take(1) # Check that oneHotEncoding function was used in parseOHEPoint backupOneHot = oneHotEncoding oneHotEncoding = None withOneHot = False try: parseOHEPoint(rawTrainData.take(1)[0], ctrOHEDict, numCtrOHEFeats) except TypeError: withOneHot = True oneHotEncoding = backupOneHot # TEST Apply OHE to the dataset (3d) numNZ = sum(parsedTrainFeat.map(lambda x: len(x)).take(5)) numNZAlt = sum(OHETrainData.map(lambda lp: len(lp.features.indices)).take(5)) Test.assertEquals(numNZ, numNZAlt, 'incorrect implementation of parseOHEPoint') Test.assertTrue(withOneHot, 'oneHotEncoding not present in parseOHEPoint') x = sc.parallelize([("a", 1), ("b", 1), ("a", 1), ("a", 1),("b", 1), ("b", 1), ("b", 1), ("b", 1)], 3) y = x.reduceByKey(lambda accum, n: accum + n) y.collect() def bucketFeatByCount(featCount): Bucket the counts by powers of two. for i in range(11): size = 2 ** i if featCount <= size: return size return -1 featCounts = (OHETrainData .flatMap(lambda lp: lp.features.indices) .map(lambda x: (x, 1)) .reduceByKey(lambda x, y: x + y)) featCountsBuckets = (featCounts .map(lambda x: (bucketFeatByCount(x[1]), 1)) .filter(lambda (k, v): k != -1) .reduceByKey(lambda x, y: x + y) .collect()) print featCountsBuckets import matplotlib.pyplot as plt %matplotlib inline x, y = zip(*featCountsBuckets) x, y = np.log(x), np.log(y) def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0): Template for generating the plot layout. plt.close() fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white') ax.axes.tick_params(labelcolor='#999999', labelsize='10') for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]: axis.set_ticks_position('none') axis.set_ticks(ticks) axis.label.set_color('#999999') if hideLabels: axis.set_ticklabels([]) plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-') map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right']) return fig, ax # generate layout and plot data fig, ax = preparePlot(np.arange(0, 10, 1), np.arange(4, 14, 2)) ax.set_xlabel(r'$\log_e(bucketSize)$'), ax.set_ylabel(r'$\log_e(countInBucket)$') plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75) # display(fig) plt.show() pass # TODO: Replace <FILL IN> with appropriate code def oneHotEncoding(rawFeats, OHEDict, numOHEFeats): Produce a one-hot-encoding from a list of features and an OHE dictionary. Note: If a (featureID, value) tuple doesn't have a corresponding key in OHEDict it should be ignored. Args: rawFeats (list of (int, str)): The features corresponding to a single observation. Each feature consists of a tuple of featureID and the feature's value. (e.g. sampleOne) OHEDict (dict): A mapping of (featureID, value) to unique integer. numOHEFeats (int): The total number of unique OHE features (combinations of featureID and value). Returns: SparseVector: A SparseVector of length numOHEFeats with indices equal to the unique identifiers for the (featureID, value) combinations that occur in the observation and with values equal to 1.0. newFeats = [] idx = [] for f in rawFeats: if f in OHEDict: newFeats += [1.0] idx += [OHEDict[f]] return SparseVector(numOHEFeats, sorted(idx), np.array(newFeats)) OHEValidationData = rawValidationData.map(lambda point: parseOHEPoint(point, ctrOHEDict, numCtrOHEFeats)) OHEValidationData.cache() print OHEValidationData.take(1) # TEST Handling unseen features (3e) numNZVal = (OHEValidationData .map(lambda lp: len(lp.features.indices)) .sum()) Test.assertEquals(numNZVal, 372080, 'incorrect number of features') from pyspark.mllib.classification import LogisticRegressionWithSGD # fixed hyperparameters numIters = 50 stepSize = 10. regParam = 1e-6 regType = 'l2' includeIntercept = True # TODO: Replace <FILL IN> with appropriate code model0 = LogisticRegressionWithSGD.train(OHETrainData, iterations=numIters, step=stepSize, regParam=regParam, regType=regType, intercept=includeIntercept) sortedWeights = sorted(model0.weights) print sortedWeights[:5], model0.intercept # TEST Logistic regression (4a) Test.assertTrue(np.allclose(model0.intercept, 0.56455084025), 'incorrect value for model0.intercept') Test.assertTrue(np.allclose(sortedWeights[0:5], [-0.45899236853575609, -0.37973707648623956, -0.36996558266753304, -0.36934962879928263, -0.32697945415010637]), 'incorrect value for model0.weights') # TODO: Replace <FILL IN> with appropriate code from math import log def computeLogLoss(p, y): Calculates the value of log loss for a given probabilty and label. Note: log(0) is undefined, so when p is 0 we need to add a small value (epsilon) to it and when p is 1 we need to subtract a small value (epsilon) from it. Args: p (float): A probabilty between 0 and 1. y (int): A label. Takes on the values 0 and 1. Returns: float: The log loss value. epsilon = 10e-12 if p not in [0.0,1.0]: logeval = p elif p == 0: logeval = p+epsilon else: logeval = p-epsilon if y == 1: return (-log(logeval)) elif y == 0: return (-log(1-logeval)) print computeLogLoss(.5, 1) print computeLogLoss(.5, 0) print computeLogLoss(.99, 1) print computeLogLoss(.99, 0) print computeLogLoss(.01, 1) print computeLogLoss(.01, 0) print computeLogLoss(0, 1) print computeLogLoss(1, 1) print computeLogLoss(1, 0) # TEST Log loss (4b) Test.assertTrue(np.allclose([computeLogLoss(.5, 1), computeLogLoss(.01, 0), computeLogLoss(.01, 1)], [0.69314718056, 0.0100503358535, 4.60517018599]), 'computeLogLoss is not correct') Test.assertTrue(np.allclose([computeLogLoss(0, 1), computeLogLoss(1, 1), computeLogLoss(1, 0)], [25.3284360229, 1.00000008275e-11, 25.3284360229]), 'computeLogLoss needs to bound p away from 0 and 1 by epsilon') # TODO: Replace <FILL IN> with appropriate code # Note that our dataset has a very high click-through rate by design # In practice click-through rate can be one to two orders of magnitude lower classOneFracTrain = OHETrainData.map(lambda p: p.label).mean() print classOneFracTrain logLossTrBase = OHETrainData.map(lambda p: computeLogLoss(classOneFracTrain,p.label) ).mean() print 'Baseline Train Logloss = {0:.6f}\n'.format(logLossTrBase) # TEST Baseline log loss (4c) Test.assertTrue(np.allclose(classOneFracTrain, 0.22717773523), 'incorrect value for classOneFracTrain') Test.assertTrue(np.allclose(logLossTrBase, 0.535844), 'incorrect value for logLossTrBase') # TODO: Replace <FILL IN> with appropriate code from math import exp # exp(-t) = e^-t def getP(x, w, intercept): Calculate the probability for an observation given a set of weights and intercept. Note: We'll bound our raw prediction between 20 and -20 for numerical purposes. Args: x (SparseVector): A vector with values of 1.0 for features that exist in this observation and 0.0 otherwise. w (DenseVector): A vector of weights (betas) for the model. intercept (float): The model's intercept. Returns: float: A probability between 0 and 1. rawPrediction = w.dot(x) + intercept # Bound the raw prediction value rawPrediction = min(rawPrediction, 20) rawPrediction = max(rawPrediction, -20) return ( 1 / (1 + exp(-1*rawPrediction)) ) trainingPredictions = OHETrainData.map(lambda p: getP(p.features,model0.weights, model0.intercept)) print trainingPredictions.take(5) # TEST Predicted probability (4d) Test.assertTrue(np.allclose(trainingPredictions.sum(), 18135.4834348), 'incorrect value for trainingPredictions') a = OHETrainData.map(lambda p: (getP(p.features, model0.weights, model0.intercept), p.label)) print a.count() print a.take(5) b = a.map(lambda lp: computeLogLoss(lp[0],lp[1])) print b.count() print b.take(5) # TODO: Replace <FILL IN> with appropriate code def evaluateResults(model, data): Calculates the log loss for the data given the model. Args: model (LogisticRegressionModel): A trained logistic regression model. data (RDD of LabeledPoint): Labels and features for each observation. Returns: float: Log loss for the data. # Run a map to create an RDD of (prediction, label) tuples preds_labels = data.map(lambda p: (getP(p.features, model.weights, model.intercept), p.label)) return preds_labels.map(lambda lp: computeLogLoss(lp[0], lp[1])).mean() logLossTrLR0 = evaluateResults(model0, OHETrainData) print ('OHE Features Train Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.6f}' .format(logLossTrBase, logLossTrLR0)) # TEST Evaluate the model (4e) Test.assertTrue(np.allclose(logLossTrLR0, 0.456903), 'incorrect value for logLossTrLR0') # TODO: Replace <FILL IN> with appropriate code logLossValBase = OHEValidationData.map(lambda p: computeLogLoss(classOneFracTrain, p.label)).mean() logLossValLR0 = evaluateResults(model0, OHEValidationData) print ('OHE Features Validation Logloss:\n\tBaseline = {0:.3f}\n\tLogReg = {1:.6f}' .format(logLossValBase, logLossValLR0)) # TEST Validation log loss (4f) Test.assertTrue(np.allclose(logLossValBase, 0.527603), 'incorrect value for logLossValBase') Test.assertTrue(np.allclose(logLossValLR0, 0.456957), 'incorrect value for logLossValLR0') labelsAndScores = OHEValidationData.map(lambda lp: (lp.label, getP(lp.features, model0.weights, model0.intercept))) labelsAndWeights = labelsAndScores.collect() labelsAndWeights.sort(key=lambda (k, v): v, reverse=True) labelsByWeight = np.array([k for (k, v) in labelsAndWeights]) length = labelsByWeight.size truePositives = labelsByWeight.cumsum() numPositive = truePositives[-1] falsePositives = np.arange(1.0, length + 1, 1.) - truePositives truePositiveRate = truePositives / numPositive falsePositiveRate = falsePositives / (length - numPositive) # Generate layout and plot data fig, ax = preparePlot(np.arange(0., 1.1, 0.1), np.arange(0., 1.1, 0.1)) ax.set_xlim(-.05, 1.05), ax.set_ylim(-.05, 1.05) ax.set_ylabel('True Positive Rate (Sensitivity)') ax.set_xlabel('False Positive Rate (1 - Specificity)') plt.plot(falsePositiveRate, truePositiveRate, color='#8cbfd0', linestyle='-', linewidth=3.) plt.plot((0., 1.), (0., 1.), linestyle='--', color='#d6ebf2', linewidth=2.) # Baseline model # display(fig) plt.show() pass from collections import defaultdict import hashlib def hashFunction(numBuckets, rawFeats, printMapping=False): Calculate a feature dictionary for an observation's features based on hashing. Note: Use printMapping=True for debug purposes and to better understand how the hashing works. Args: numBuckets (int): Number of buckets to use as features. rawFeats (list of (int, str)): A list of features for an observation. Represented as (featureID, value) tuples. printMapping (bool, optional): If true, the mappings of featureString to index will be printed. Returns: dict of int to float: The keys will be integers which represent the buckets that the features have been hashed to. The value for a given key will contain the count of the (featureID, value) tuples that have hashed to that key. mapping = {} for ind, category in rawFeats: featureString = category + str(ind) mapping[featureString] = int(int(hashlib.md5(featureString).hexdigest(), 16) % numBuckets) if(printMapping): print mapping sparseFeatures = defaultdict(float) for bucket in mapping.values(): sparseFeatures[bucket] += 1.0 return dict(sparseFeatures) # Reminder of the sample values: # sampleOne = [(0, 'mouse'), (1, 'black')] # sampleTwo = [(0, 'cat'), (1, 'tabby'), (2, 'mouse')] # sampleThree = [(0, 'bear'), (1, 'black'), (2, 'salmon')] # TODO: Replace <FILL IN> with appropriate code # Use four buckets sampOneFourBuckets = hashFunction(4, sampleOne, True) sampTwoFourBuckets = hashFunction(4, sampleTwo, True) sampThreeFourBuckets = hashFunction(4, sampleThree, True) # Use one hundred buckets sampOneHundredBuckets = hashFunction(100, sampleOne, True) sampTwoHundredBuckets = hashFunction(100, sampleTwo, True) sampThreeHundredBuckets = hashFunction(100, sampleThree, True) print '\t\t 4 Buckets \t\t\t 100 Buckets' print 'SampleOne:\t {0}\t\t {1}'.format(sampOneFourBuckets, sampOneHundredBuckets) print 'SampleTwo:\t {0}\t\t {1}'.format(sampTwoFourBuckets, sampTwoHundredBuckets) print 'SampleThree:\t {0}\t {1}'.format(sampThreeFourBuckets, sampThreeHundredBuckets) # TEST Hash function (5a) Test.assertEquals(sampOneFourBuckets, {2: 1.0, 3: 1.0}, 'incorrect value for sampOneFourBuckets') Test.assertEquals(sampThreeHundredBuckets, {72: 1.0, 5: 1.0, 14: 1.0}, 'incorrect value for sampThreeHundredBuckets') feats = [(k,v) for k,v in enumerate(rawTrainData.take(1)[0][2:].split(','))] print feats hashDict = hashFunction(2 ** 15, feats) print hashDict print len(hashDict) print 2**15 # TODO: Replace <FILL IN> with appropriate code def parseHashPoint(point, numBuckets): Create a LabeledPoint for this observation using hashing. Args: point (str): A comma separated string where the first value is the label and the rest are features. numBuckets: The number of buckets to hash to. Returns: LabeledPoint: A LabeledPoint with a label (0.0 or 1.0) and a SparseVector of hashed features. label = float(point[:1]) rawFeats = [(k,v) for k,v in enumerate(point[2:].split(','))] hashDict = hashFunction(numBuckets, rawFeats) return LabeledPoint(label,SparseVector(len(hashDict), sorted(hashDict.keys()), hashDict.values())) numBucketsCTR = 2 ** 15 hashTrainData = rawTrainData.map(lambda r: parseHashPoint(r,numBucketsCTR)) hashTrainData.cache() hashValidationData = rawValidationData.map(lambda r: parseHashPoint(r,numBucketsCTR)) hashValidationData.cache() hashTestData = rawTestData.map(lambda r: parseHashPoint(r,numBucketsCTR)) hashTestData.cache() a = hashTrainData.take(1) print a # TEST Creating hashed features (5b) hashTrainDataFeatureSum = sum(hashTrainData .map(lambda lp: len(lp.features.indices)) .take(20)) print hashTrainDataFeatureSum hashTrainDataLabelSum = sum(hashTrainData .map(lambda lp: lp.label) .take(100)) print hashTrainDataLabelSum hashValidationDataFeatureSum = sum(hashValidationData .map(lambda lp: len(lp.features.indices)) .take(20)) hashValidationDataLabelSum = sum(hashValidationData .map(lambda lp: lp.label) .take(100)) hashTestDataFeatureSum = sum(hashTestData .map(lambda lp: len(lp.features.indices)) .take(20)) hashTestDataLabelSum = sum(hashTestData .map(lambda lp: lp.label) .take(100)) Test.assertEquals(hashTrainDataFeatureSum, 772, 'incorrect number of features in hashTrainData') Test.assertEquals(hashTrainDataLabelSum, 24.0, 'incorrect labels in hashTrainData') Test.assertEquals(hashValidationDataFeatureSum, 776, 'incorrect number of features in hashValidationData') Test.assertEquals(hashValidationDataLabelSum, 16.0, 'incorrect labels in hashValidationData') Test.assertEquals(hashTestDataFeatureSum, 774, 'incorrect number of features in hashTestData') Test.assertEquals(hashTestDataLabelSum, 23.0, 'incorrect labels in hashTestData') s = sum(hashTrainData.map(lambda lp: len(lp.features.indices) / float(numBucketsCTR) ).collect()) / nTrain # ratios.count() s # TODO: Replace <FILL IN> with appropriate code def computeSparsity(data, d, n): Calculates the average sparsity for the features in an RDD of LabeledPoints. Args: data (RDD of LabeledPoint): The LabeledPoints to use in the sparsity calculation. d (int): The total number of features. n (int): The number of observations in the RDD. Returns: float: The average of the ratio of features in a point to total features. return sum(hashTrainData.map(lambda lp: len(lp.features.indices) / float(d) ).collect()) / n averageSparsityHash = computeSparsity(hashTrainData, numBucketsCTR, nTrain) averageSparsityOHE = computeSparsity(OHETrainData, numCtrOHEFeats, nTrain) print 'Average OHE Sparsity: {0:.7e}'.format(averageSparsityOHE) print 'Average Hash Sparsity: {0:.7e}'.format(averageSparsityHash) # TEST Sparsity (5c) Test.assertTrue(np.allclose(averageSparsityOHE, 1.6717677e-04), 'incorrect value for averageSparsityOHE') Test.assertTrue(np.allclose(averageSparsityHash, 1.1805561e-03), 'incorrect value for averageSparsityHash') sc.stop() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Example of use in v3 is based on do_state_tomo_analysis function in pycqedscripts.scripts.characterization.state_tomo Step3: Load two experiment results for v2 and v3 test respectively Step4: Analysis_v3 example Step5: We can also set manually the maximum iterations and the minimum tolerance for change between consecutive steps. In this example the limit will be set by iterations before the tolerance is reached. Step6: After testing, a good default value for tolerance seemed -15 for 2 qubits and -18 for 3 qubits. Iterations are set to 1000 by default to give preference to tolerance limits; -18 tolerance corresponds to iterations on the order of 100 on average. Step7: Analysis_v2 example Step8: Unlike v3, the method is specified with a keyworded parameter 'name_of_method'=True. The hierarchy of preference in the methods is 'pauli_values' > 'pauli_raw'* > 'imle' > 'mle'; this order is also specified in the code. Step9: Bluefors1 Step10: Analysis on 2 qubits Step11: Load parameters needed for v2 analysis. These can be found and extracted from the hdf measurement file if they are not known. I also define the target state. Step12: Examples with IMLE method on n of the 6 loaded examples Step13: We can also set arbitrary tolerance and iteration limits like this (Effect and default values are explained in the XLD block). Step14: Examples with pauli_values Step15: Analysis on 3 qubits Step16: Load parameters needed for v2 analysis. These can be found and extracted from the hdf measurement file if they are not known Step17: This reads the prepared states from the experiment files, which correspond to some ground states of a quantum phase recognition experiment and are not trivial to write like the 2 qb Bell state. Step18: Imle and pauli tests on n of the 9 states of the experiment.
<ASSISTANT_TASK:> Python Code: from pycqedscripts.init.xld.virtual_ATC75_M136_S17HW02_PQSC import * import warnings warnings.filterwarnings("ignore") # If running into problems with the initalization, it could be the AWG wave loading lines at the end of the init file. # Commenting out: # for AWG in AWGs: # pulsar.set(f'{AWG.name}_use_placeholder_waves', True) # pulsar.use_sequence_cache(True) # in lines 1096-1098 (on the last revision in 04/2021) should solve the issue import traceback import qutip as qtp import collections odict = collections.OrderedDict from pycqed.analysis_v3 import * def do_state_tomo_analysis(timestamp, meas_obj_names=None, rho_target=None, estimation_types=None, iterations=None, tolerance=None, save_processed_data=True, save_figures=True): meas_obj_names: list of qb names. If not provided, will be taken from task_list pp = pp_mod.ProcessingPipeline(add_param_method='replace') try: if estimation_types is None: estimation_types=('least_squares', 'max_likelihood') task_list = hlp_mod.get_param_from_metadata_group(timestamp, 'task_list') if rho_target is None: rho_target = qtp.Qobj(task_list[0]['rho_target']) if meas_obj_names is None: meas_obj_names = task_list[0]['qubits'] movnm = hlp_mod.get_param_from_metadata_group(timestamp, 'meas_obj_value_names_map') final_rots_basis = hlp_mod.get_param_from_metadata_group(timestamp, 'final_rots_basis') params_dict = {} params_dict.update({f'{qbn}.state_prob_mtx': f'Instrument settings.{qbn}.acq_state_prob_mtx' for qbn in meas_obj_names}) pp.add_node('extract_data_hdf', timestamps=timestamp, params_dict=params_dict) pp.add_node('do_postselection_f_level', keys_in='raw', num_keys_out=1, meas_obj_names=meas_obj_names) pp.add_node('calculate_flat_multiqubit_shots', keys_in='raw', joint_processing=True, do_preselection=True, meas_obj_names=meas_obj_names) pp.add_node('average_data', keys_in='previous calculate_flat_multiqubit_shots', averaging_axis=0, joint_processing=True, meas_obj_names=meas_obj_names) pp.add_node('correct_readout', keys_in='previous average_data', joint_processing=True, meas_obj_names=meas_obj_names) pp.add_node('extract_leakage_classified_shots', keys_in='previous correct_readout', joint_processing=True, meas_obj_names=meas_obj_names) pp.add_node('state_tomography_analysis', keys_in='previous do_postselection_f_level', meas_obj_names=meas_obj_names, keys_in_leakage=['previous extract_leakage_classified_shots'], joint_processing=True, basis_rots=final_rots_basis, do_preselection=True, rho_target=rho_target, iterations=iterations, tolerance=tolerance, estimation_types=estimation_types) pp.resolve(movnm) pp() pp.save(save_processed_data=save_processed_data, save_figures=save_figures) return pp except Exception: traceback.print_exc() return pp timestamp = '20210131_201258' a_tools.datadir = r'Q:\USERS\SergiM\MLE' MC.datadir(r'Q:\USERS\SergiM\MLE') for qb in qubits: gen.load_settings(qb, timestamp=timestamp) gen.load_settings(dev, timestamp=timestamp) ts_start = '20210131_201258' ts_end = '20210131_222513' timestamps_v3 = a_tools.get_timestamps_in_range(ts_start, ts_end) print(timestamps_v3) rho_phi = qtp.Qobj([[1/2,0,0,1/2],[0,0,0,0],[0,0,0,0],[1/2,0,0,1/2]]) do_state_tomo_analysis(timestamps_v3[1], rho_target=rho_phi, estimation_types=['iterative_mle']) do_state_tomo_analysis(timestamps_v3[1], rho_target=rho_phi, estimation_types=['iterative_mle'], iterations=100, tolerance=-18) do_state_tomo_analysis(timestamps_v3[1], rho_target=rho_phi, estimation_types=['pauli_values']) rots_basis=('I', 'X180', 'Y90', 'mY90', 'X90', 'mX90') use_cal_points = True preselection = True qbs = (qb10, qb9) measure_qubits = [qb10,qb9] num_qubits = len(measure_qubits) meas_obj_names = ['qb10','qb9'] thresholds = {qb.name: qb.acq_classifier_params()['thresholds'][0] for qb in measure_qubits} for qb in measure_qubits: qb.update_detector_functions() channel_map = {qb.name: qb.int_log_det.value_names[0] + f' {qb.instr_uhf()}' for qb in measure_qubits} n_segments = len(rots_basis)**num_qubits + (2**num_qubits if use_cal_points else 0) ts = timestamps_v3[0] start_time = time.time() MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_phi, basis_rots_str=rots_basis, covar_matrix=np.diag(np.ones(2**num_qubits)), imle=True, use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) print(time.time()-start_time) ts = timestamps_v3[0] start_time = time.time() MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_phi, basis_rots_str=rots_basis, covar_matrix=np.diag(np.ones(2**num_qubits)), pauli_values=True, use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) print(time.time()-start_time) from pycqedscripts.init.bluefors1.ATC81_M138_S705_test import * init_dict = initialize_setup(virtual_setup=True) globals().update(**init_dict) clear_output() import csv import warnings warnings.filterwarnings("ignore") startup = True a_tools.datadir = r'Q:\USERS\SergiM\MLE' MC.datadir(r'Q:\USERS\SergiM\MLE') ts_start = '20210210_140836' ts_end = '20210210_142102' timestamps = a_tools.get_timestamps_in_range(ts_start, ts_end) for qb in qubits: gen.load_settings(qb, timestamp=timestamps[0]) if startup: dev.add_2qb_gate('CZ_nztc', 'NZTransitionControlledPulse') startup = False gen.load_settings(dev, timestamp=timestamps[0]) print(timestamps) rots_basis=('I', 'X180', 'Y90', 'mY90', 'X90', 'mX90') use_cal_points = True preselection = False qbs_list = [(qb3, qb1),(qb3, qb6),(qb4, qb1),(qb4, qb7),(qb5, qb2),(qb5, qb7)] measure_qubits_list = [[qb3, qb1],[qb3, qb6],[qb4, qb1],[qb4, qb7],[qb5, qb2],[qb5, qb7]] num_qubits = 2 thresholds = {qb.name: qb.acq_classifier_params()['thresholds'][0] for qb in qubits} for qb in qubits: qb.update_detector_functions() channel_map = {qb.name: qb.int_log_det.value_names[0] + f' {qb.instr_uhf()}' for qb in qubits} n_segments = len(rots_basis)**2 + (2**2 if use_cal_points else 0) rho_phi = qtp.Qobj([[1/2,0,0,1/2],[0,0,0,0],[0,0,0,0],[1/2,0,0,1/2]]) n = 1 for i, ts in enumerate(timestamps[:n]): print(i) measure_qubits = measure_qubits_list[i] qbs = qbs_list[i] rho_target = rho_phi start_time = time.time() num_qubits = len(measure_qubits) MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_target, basis_rots_str=rots_basis, imle = True, covar_matrix=np.diag(np.ones(2**num_qubits)), use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) for i, ts in enumerate(timestamps[:n]): print(i) measure_qubits = measure_qubits_list[i] qbs = qbs_list[i] rho_target = rho_phi start_time = time.time() num_qubits = len(measure_qubits) MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_target, basis_rots_str=rots_basis, imle = True, iterations = 100, tolerance = -18, covar_matrix=np.diag(np.ones(2**num_qubits)), use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) for i, ts in enumerate(timestamps[:n]): print(i) measure_qubits = measure_qubits_list[i] qbs = qbs_list[i] rho_target = rho_phi start_time = time.time() num_qubits = len(measure_qubits) MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_target, basis_rots_str=rots_basis, pauli_values = True, covar_matrix=np.diag(np.ones(2**num_qubits)), use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) timestamp = '20201208_140754' a_tools.datadir = r'Q:\USERS\SergiM\MLE' MC.datadir(r'Q:\USERS\SergiM\MLE') for qb in qubits: gen.load_settings(qb, timestamp=timestamp) cz_type = 'CZ_nztc' if startup: dev.add_2qb_gate('CZ_nztc', 'NZTransitionControlledPulse') startup = False gen.load_settings(dev, timestamp=timestamp) ts_start = '20201208_140754' ts_end = '20201208_144607' timestamps = a_tools.get_timestamps_in_range(ts_start, ts_end) rots_basis=('I', 'X180', 'Y90', 'mY90', 'X90', 'mX90') use_cal_points = True preselection = True qbs = (qb1, qb3, qb6) measure_qubits = [qb1, qb3, qb6] num_qubits = len(measure_qubits) meas_obj_names = ['qb1','qb3','qb6'] thresholds = {qb.name: qb.acq_classifier_params()['thresholds'][0] for qb in measure_qubits} for qb in measure_qubits: qb.update_detector_functions() channel_map = {qb.name: qb.int_log_det.value_names[0] + f' {qb.instr_uhf()}' for qb in measure_qubits} n_segments = len(rots_basis)**3 + (2**3 if use_cal_points else 0) def state_preparation(num_qubits,param_set): g_st = qtp.basis(2,0) if num_qubits == 3: initial = qtp.tensor(g_st,g_st,g_st) rot1 = qtp.tensor(qtp.qip.operations.ry(param_set[0]),qtp.qip.operations.ry(param_set[1]),qtp.qeye(2)) cz1 = qtp.tensor(qtp.qip.operations.csign(),qtp.qeye(2)) rot2 = qtp.tensor(qtp.qip.operations.ry(param_set[2]), qtp.qip.operations.ry(param_set[3]), qtp.qip.operations.ry(param_set[4])) cz2 = qtp.tensor(qtp.qeye(2),qtp.qip.operations.csign()) rot3 = qtp.tensor(qtp.qeye(2), qtp.qip.operations.ry(param_set[5]), qtp.qip.operations.ry(param_set[6])) elif num_qubits == 7: initial = qtp.tensor(g_st,g_st,g_st,g_st,g_st,g_st,g_st) rot1 = qtp.tensor(qtp.ry(param_set[0]),qtp.ry(param_set[1]),qtp.ry(param_set[2]),qtp.ry(param_set[3]),qtp.ry(param_set[4]),qtp.ry(param_set[5]),qeye(2)) cz1 = qtp.tensor(qtp.csign(),qtp.csign(),qtp.csign(),qtp.qeye(2)) rot2 = qtp.tensor(qtp.ry(param_set[6]),qtp.ry(param_set[7]),qtp.ry(param_set[8]),qtp.ry(param_set[9]),qtp.ry(param_set[10]),qtp.ry(param_set[11]),qtp.ry(param_set[12])) cz2 = qtp.tensor(qtp.qeye(2),qtp.csign(),qtp.csign(),qtp.csign()) rot3 = qtp.tensor(qtp.qeye(2),qtp.ry(param_set[13]),qtp.ry(param_set[14]),qtp.ry(param_set[15]),qtp.ry(param_set[16]),qtp.ry(param_set[17]),qtp.ry(param_set[18])) psi_target = rot3 * cz2 * rot2 * cz1 * rot1 * initial rho_target = (psi_target * psi_target.dag()).full() return rho_target # Read the generated states from the csv files param_set = [] rho_target_list = [] try: for ts in timestamps: read_data = [] with open(a_tools.data_from_time(ts) + "\\OptParams.csv") as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: read_data.append(row) param = [float(var) for var in read_data[0]] param_set.append(param) exp_target = qtp.Qobj(state_preparation(num_qubits, param), dims=[[2,2,2],[2,2,2]]) rho_target_list.append(exp_target) except FileNotFoundError: print('file not found. Using default rho comparison') J = 1.0 h2_list = np.linspace(-1.6, 1.6, 9) h1_list = [0.1] fid_list = [] rho_target_list = [] exp_list = [] i=0 for h1 in h1_list: for h2 in h2_list: # Target ground state psi_target = H_qpr_s(J, h1, h2).groundstate()[1] rho_target = psi_target * psi_target.dag() rho_target_list.append(rho_target) n = 1 for j, ts in enumerate(timestamps[:n]): print(j) start_time = time.time() rho_target = rho_target_list[j] MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_target, basis_rots_str=rots_basis, covar_matrix=np.diag(np.ones(2**num_qubits)), imle=True, use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) print(time.time()-start_time) for j, ts in enumerate(timestamps[:1]): print(j) start_time = time.time() num_qubits = 3 rho_target = rho_target_list[j] MA = tda.StateTomographyAnalysis(t_start=ts, options_dict=dict( n_readouts=(2 if preselection else 1)*n_segments, thresholds=odict([(qb.name, thresholds[qb.name]) for qb in qbs]), channel_map=odict([(qb.name, channel_map[qb.name]) for qb in qbs]), cal_points=[odict([(channel_map[qb.name], [2*i+1 if preselection else i]) for qb in measure_qubits]) for i in np.arange(-2**num_qubits, 0)], data_type='singleshot', rho_target=rho_target, basis_rots_str=rots_basis, covar_matrix=np.diag(np.ones(2**num_qubits)), pauli_values=True, use_preselection=preselection, data_filter=(lambda data: data[1:2*len(rots_basis)**num_qubits+1:2]) if preselection \ else (lambda data: data[:len(rots_basis)**num_qubits]) )) print(time.time()-start_time) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Lesson 2 Step2: Lesson 3 Step3: Lesson 4 Step4: Lesson 5 Step7: Lesson 6 Step8: Lesson 6
<ASSISTANT_TASK:> Python Code: import nltk from nltk import word_tokenize from nltk.corpus import stopwords import string punctuations = list(string.punctuation) #read the two text files from your hard drive, assign first mystery text to variable 'text1' and second mystery text to variable 'text2' text1 = open('../01-Intro-to-NLP/text1.txt').read() text2 = open('../01-Intro-to-NLP/text2.txt').read() ###word frequencies #tokenize texts text1_tokens = word_tokenize(text1) text2_tokens = word_tokenize(text2) #pre-process for word frequency #lowercase text1_tokens_lc = [word.lower() for word in text1_tokens] text2_tokens_lc = [word.lower() for word in text2_tokens] #remove stopwords text1_tokens_clean = [word for word in text1_tokens_lc if word not in stopwords.words('english')] text2_tokens_clean = [word for word in text2_tokens_lc if word not in stopwords.words('english')] #remove punctuation using the list of punctuation from the string pacage text1_tokens_clean = [word for word in text1_tokens_clean if word not in punctuations] text2_tokens_clean = [word for word in text2_tokens_clean if word not in punctuations] #frequency distribution text1_word_frequency = nltk.FreqDist(text1_tokens_clean) text2_word_frequency = nltk.FreqDist(text2_tokens_clean) print("Frequent Words for Text1") print("________________________") for word in text1_word_frequency.most_common(20): print(word[0]) print() print("Frequent Words for Text2") print("________________________") for word in text2_word_frequency.most_common(20): print(word[0]) ### Can you guess the novel from most frequent words? # Nothing to see here, folks import pandas dialogue_df = pandas.read_csv('../03-Operationalizing/antigone_dialogue.csv', index_col=0) dialogue_tokens = [character.split() for character in dialogue_df['DIALOGUE']] dialogue_len = [len(tokens) for tokens in dialogue_tokens] dialogue_df['WORDS_SPOKEN'] = dialogue_len dialogue_df = dialogue_df.sort_values('WORDS_SPOKEN', ascending = False) # Let's visualize! # Tells Jupyter to produce images in notebook % pylab inline # Makes images look good style.use('ggplot') dialogue_df['WORDS_SPOKEN'].plot(kind='bar') ###Who is the main protagonist? Maybe not Antigone? from sklearn.feature_extraction.text import TfidfVectorizer df = pandas.read_csv("../04-Discriminating-Words/BDHSI2016_music_reviews.csv", sep = '\t') tfidfvec = TfidfVectorizer() #create the dtm, but with cells weigthed by the tf-idf score. dtm_tfidf_df = pandas.DataFrame(tfidfvec.fit_transform(df.body).toarray(), columns=tfidfvec.get_feature_names(), index = df.index) df_genre = df['genre'].to_frame() merged_df = df_genre.join(dtm_tfidf_df, how = 'right', lsuffix='_x') #pull out the reviews for three genres, Rap, Alternative/Indie Rock, and Jazz dtm_rap = merged_df[merged_df['genre_x']=="Rap"] dtm_indie = merged_df[merged_df['genre_x']=="Alternative/Indie Rock"] dtm_jazz = merged_df[merged_df['genre_x']=="Jazz"] #print the words with the highest tf-idf scores for each genre print("Rap Words") print(dtm_rap.max(numeric_only=True).sort_values(ascending=False)[0:20]) print() print("Indie Words") print(dtm_indie.max(numeric_only=True).sort_values(ascending=False)[0:20]) print() print("Jazz Words") print(dtm_jazz.max(numeric_only=True).sort_values(ascending=False)[0:20]) ###What words are distinct to reviews of Rap albums, Indie albums, and Jazz albums? ##Notice the word weights for the Rap albums compared to others. Are these reviews more different than other reviews? pos_sent = open("../05-Dictionary-Method/positive_words.txt").read() neg_sent = open("../05-Dictionary-Method/negative_words.txt").read() positive_words=pos_sent.split('\n') negative_words=neg_sent.split('\n') text1_pos = [word for word in text1_tokens_clean if word in positive_words] text2_pos = [word for word in text2_tokens_clean if word in positive_words] text1_neg = [word for word in text1_tokens if word in negative_words] text2_neg = [word for word in text2_tokens if word in negative_words] print("Postive words in Melville") print(len(text1_pos)/len(text1_tokens)) print() print("Negative words in Melville") print(len(text1_neg)/len(text1_tokens)) print() print("Postive words in Austen") print(len(text2_pos)/len(text2_tokens)) print() print("Negative words in Austen") print(len(text2_neg)/len(text2_tokens)) ## Who is more postive, Melville or Austen? ## Melville has a similar precentage of postive and negative words (a whale is a whale, neither good nor bad) ## Austen is decidedly more positive than negative (it's the gentleman thing to do) from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction.text import CountVectorizer import os review_path = '../06-Literary Distinction (Probably)/poems/reviewed/' random_path = '../06-Literary Distinction (Probably)/poems/random/' review_files = os.listdir(review_path) random_files = os.listdir(random_path) review_texts = [open(review_path+file_name).read() for file_name in review_files] random_texts = [open(random_path+file_name).read() for file_name in random_files] all_texts = review_texts + random_texts all_file_names = review_files + random_files all_labels = ['reviewed'] * len(review_texts) + ['random'] * len(random_texts) cv = CountVectorizer(stop_words = 'english', min_df=180, binary = True, max_features = None) dtm = cv.fit_transform(all_texts).toarray() nb = MultinomialNB() nb.fit(dtm, all_labels) dickinson_canonic = Because I could not stop for Death โ€“ He kindly stopped for me โ€“ The Carriage held but just Ourselves โ€“ And Immortality. We slowly drove โ€“ He knew no haste And I had put away My labor and my leisure too, For His Civility โ€“ We passed the School, where Children strove At Recess โ€“ in the Ring โ€“ We passed the Fields of Gazing Grain โ€“ We passed the Setting Sun โ€“ Or rather โ€“ He passed us โ€“ The Dews drew quivering and chill โ€“ For only Gossamer, my Gown โ€“ My Tippet โ€“ only Tulle โ€“ We paused before a House that seemed A Swelling of the Ground โ€“ The Roof was scarcely visible โ€“ The Cornice โ€“ in the Ground โ€“ Since then โ€“ โ€˜tis Centuries โ€“ and yet Feels shorter than the Day I first surmised the Horsesโ€™ Heads Were toward Eternity โ€“ anthem_patriotic = O! say can you see, by the dawn's early light, What so proudly we hailed at the twilight's last gleaming, Whose broad stripes and bright stars through the perilous fight, O'er the ramparts we watched, were so gallantly streaming? And the rockets' red glare, the bombs bursting in air, Gave proof through the night that our flag was still there; O! say does that star-spangled banner yet wave O'er the land of the free and the home of the brave? unknown_dtm = cv.transform([dickinson_canonic,anthem_patriotic]).toarray() nb.predict(unknown_dtm) ## Can a computer predict whether a poem would be considered 'presitgious'? import gensim import pandas from nltk.corpus import stopwords, words metadata_df = pandas.read_csv('../07-Topic Modeling/txtlab_Novel150_English.csv') fiction_path = '../07-Topic Modeling/txtalb_Novel150_English/' novel_list = [open(fiction_path+file_name).read() for file_name in metadata_df['filename']] novel_tokens_list = [novel.lower().split() for novel in novel_list] dictionary = gensim.corpora.dictionary.Dictionary(novel_tokens_list) proper_names = [word.lower() for word in words.words() if word.istitle()] noise_tokens = [word for word in dictionary.values() if word.isalpha()==False or len(word)<=2] bad_words = stopwords.words('english') + proper_names + noise_tokens stop_ids = [_id for _id, count in dictionary.doc2bow(bad_words)] dictionary.filter_tokens(bad_ids = stop_ids) dictionary.filter_extremes(no_below = 40) corpus = [dictionary.doc2bow(text) for text in novel_tokens_list] lda_model = gensim.models.LdaModel(corpus, num_topics=25, alpha='auto', id2word=dictionary, iterations=2500, passes = 4) list_of_doctopics = [lda_model.get_document_topics(text, minimum_probability=0) for text in corpus] list_of_probabilities = [[probability for label,probability in distribution] for distribution in list_of_doctopics] proba_distro_df = pandas.DataFrame(list_of_probabilities) metadata_df = pandas.concat([metadata_df, pandas.DataFrame(list_of_probabilities)], axis=1) annual_means_df = metadata_df.groupby('date').mean() annual_means_df[8].plot(kind='bar', figsize=(8,8)) lda_model.show_topic(8) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Fรญjate en la estructura de la lista Step2: Al lรญo
<ASSISTANT_TASK:> Python Code: !gunzip ../data/2017-superbowl-tweets.tsv.gz !ls ../data tweets = [] RUTA = '../data/2017-superbowl-tweets.tsv' for line in open(RUTA).readlines(): tweets.append(line.split('\t')) ultimo_tweet = tweets[-1] print('id =>', ultimo_tweet[0]) print('fecha =>', ultimo_tweet[1]) print('autor =>', ultimo_tweet[2]) print('texto =>', ultimo_tweet[3]) from textblob import TextBlob for tweet in tweets: try: t = TextBlob(tweet[3]) # in Python2: t = TextBlob(tweet[3].decode('utf-8')) if t.sentiment.polarity < -0.5: print(tweet[3], '-->', t.sentiment) except IndexError: pass for tweet in tweets: try: t = TextBlob(tweet[3]) # in Python2: t = TextBlob(tweet[3].decode('utf-8')) print(" ".join(t.noun_phrases)) except IndexError: pass for tweet in tweets[:20]: try: t = TextBlob(tweet[3]) # in Python2: t = TextBlob(tweet[3].decode('utf-8')) print(t.translate(to='es')) except IndexError: pass <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Sunpots Data Step2: Does our model obey the theory? Step3: This indicates a lack of fit. Step4: Exercise Step5: Let's make sure this model is estimable. Step6: What does this mean? Step7: For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags. Step8: Exercise Step9: Hint Step10: P-value of the unit-root test, resoundingly rejects the null of a unit-root.
<ASSISTANT_TASK:> Python Code: %matplotlib inline from __future__ import print_function import numpy as np from scipy import stats import pandas as pd import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.graphics.api import qqplot print(sm.datasets.sunspots.NOTE) dta = sm.datasets.sunspots.load_pandas().data dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008')) del dta["YEAR"] dta.plot(figsize=(12,8)); fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2) arma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit(disp=False) print(arma_mod20.params) arma_mod30 = sm.tsa.ARMA(dta, (3,0)).fit(disp=False) print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic) print(arma_mod30.params) print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic) sm.stats.durbin_watson(arma_mod30.resid.values) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax = arma_mod30.resid.plot(ax=ax); resid = arma_mod30.resid stats.normaltest(resid) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) fig = qqplot(resid, line='q', ax=ax, fit=True) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2) r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True) print(predict_sunspots) fig, ax = plt.subplots(figsize=(12, 8)) ax = dta.loc['1950':].plot(ax=ax) fig = arma_mod30.plot_predict('1990', '2012', dynamic=True, ax=ax, plot_insample=False) def mean_forecast_err(y, yhat): return y.sub(yhat).mean() mean_forecast_err(dta.SUNACTIVITY, predict_sunspots) from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess np.random.seed(1234) # include zero-th lag arparams = np.array([1, .75, -.65, -.55, .9]) maparams = np.array([1, .65]) arma_t = ArmaProcess(arparams, maparams) arma_t.isinvertible arma_t.isstationary fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(arma_t.generate_sample(nsample=50)); arparams = np.array([1, .35, -.15, .55, .1]) maparams = np.array([1, .65]) arma_t = ArmaProcess(arparams, maparams) arma_t.isstationary arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2) arma11 = sm.tsa.ARMA(arma_rvs, (1,1)).fit(disp=False) resid = arma11.resid r,q,p = sm.tsa.acf(resid, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) arma41 = sm.tsa.ARMA(arma_rvs, (4,1)).fit(disp=False) resid = arma41.resid r,q,p = sm.tsa.acf(resid, qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) print(table.set_index('lag')) macrodta = sm.datasets.macrodata.load_pandas().data macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3')) cpi = macrodta["cpi"] fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax = cpi.plot(ax=ax); ax.legend(); print(sm.tsa.adfuller(cpi)[1]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 2. Create a null vector Z of size 10. Don't use [0, 0, ...] notation. Step2: 3. Create a null vector of size 10 but the fifth value which is 1 Step3: 4. Create a Numpy vector with values ranging from 10 to 49 Step4: 5. Reverse the vector from the previous task (first element becomes last) Step5: 6. Create a 3x3 matrix with values ranging from 0 to 8 Step6: 7. Find the indices of non-zero elements from [1,2,0,0,4,0] and store the result in nz Step7: 8. Create a 3x3x3 (i.e. three dimensions with three values each) array with random values invariable Z Step8: 13. Create a 10x10 array with random values and find the minimum and maximum values and store them in Zmin and Zmax. Step9: 14. Create a random vector of size 30 and find the mean value using Numpy. Store the result into mean Step10: 15. Create a 8x8 matrix and fill it with a chessboard pattern (say, 1 == 'black' and 0 == 'white'). Use fancy indexing. Step11: 16. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product) Step12: 17. Given a array np.arange(11), negate all elements which are between 3 and 8, in place.
<ASSISTANT_TASK:> Python Code: import numpy as np try: np except NameError: print('Numpy not correctly imported') Z = np.zeros(10) print(Z) assert type(Z).__module__ == np.__name__ assert len(Z) == 10 assert sum(Z) == 0 Z = np.zeros(10) Z[4] = 1 print(Z) assert type(Z).__module__ == np.__name__ assert len(Z) == 10 assert sum(Z) == 1 Z = np.arange(10,50) print(Z) assert type(Z).__module__ == np.__name__ assert len(Z) == 40 assert sum(Z) == 1180 Z = Z[::-1] print(Z) assert type(Z).__module__ == np.__name__ assert len(Z) == 40 assert sum(Z) == 1180 assert Z[0] == 49 assert Z[-1] == 10 Z = np.arange(9).reshape(3,3) print(Z) assert Z.shape == (3, 3) assert np.all(sum(Z) == np.array([9, 12, 15])) nz = np.array([1,2,0,0,4,0]) nz = (nz[nz != 0]) assert np.all(nz == np.array([1, 2, 4])) Z = np.random.random((3,3,3)) print(Z) assert Z.shape == (3, 3, 3) Z = np.random.random((10,10)) Zmin, Zmax = Z.min(), Z.max() print(Zmin, Zmax) z = Z.ravel() idx = z.argsort() assert z[idx[0]] == Zmin assert z[idx[-1]] == Zmax Z = np.random.random(30) mean = Z.mean() print(mean) accumulative = 0 for z in Z: accumulative += z assert mean - (accumulative/len(Z)) < 0.0001 Z = np.zeros((8,8), dtype=int) Z[1::2,::2] = 1 Z[::2,1::2] = 1 print(Z) Z = np.dot(np.ones((5,3)), np.ones((3,2))) print(Z) Z = np.arange(11) Z[(3 < Z) & (Z <= 8)] *= -1 print(Z) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 1) Is the distribution of body temperatures normal? Step3: It is difficult to conclude whether this data is normally distributed from this histogram alone. A better visual would be made by using the empirical CDF and CDF of the temperature data. Step4: The ECDF and CDF on the graph above seem to allign together implying that the temperature data is likely normally distributed. We can perform a normal test to double check. Step5: 2) Is the sample size large? Are the observations independent? Step6: We will now perform a bootstrap hypothesis test with the following Step7: The p value is extremely small after 100,000 replicates. This implies that the true mean is different from 98.6 degrees F Step8: The p value is extremely small which confirms that the true mean is likely different from 98.6. We will compare the results with the t statistic. The $t$ and $z$ values should be approximately the same. Step9: The p value from the $t$ test is different but it still implies that the null hypothesis is false. Step10: The p values for the t and z tests are significantly different. This shows that if you apply the wrong test to a problem you can end up with an incorrect result. It is important to know when it is appropriate to apply the $z$ statistic and the $t$ statistic. When the sample size is less than 30, the $t$ statistic should be used. Step11: The average temperatures of all humans is estimated with 95% confidence to be 98.25 +/- 0.126 or between 98.124 and 98.376 in degrees Fahrenheit. If we define an "abnormal" temperature to be outside of the range of the mean, this would include all temperatures greater than 98.376 and less than 98.124. Step12: If we define an "abnormal" temperature to be outside the 95% confidence interval, this would include temperatures greater than 99.478 and less than 96.723. Step13: We can see that the male and female ECDF graphs overlap which tells us that there is a small difference between the two data sets to begin with (0.289). We can now continue with hypothesis testing to see if this difference is due to the differenes in gender or by chance.
<ASSISTANT_TASK:> Python Code: import pandas as pd import numpy as np df = pd.read_csv('data/human_body_temperature.csv') df.info() df.head() # Plots the histogram of temperatures import matplotlib.pyplot as plt import seaborn as sns temperature = df['temperature'] sns.set() plt.hist(temperature, bins='auto', normed=True) plt.xlabel('Temperature(F)') plt.ylabel('Count') plt.title('Human Body Temperature') plt.show() # Plots the ECDF and CDF of the human body temperatures def ecdf(data): Compute ECDF for a one-dimensional array of measurements. Returns tuple of arrays (x,y) that contain x and y values for ECDF. x = np.sort(data) y = np.arange(1, len(x) + 1) / len(x) return x, y x_ecdf, y_ecdf = ecdf(temperature) temperature_theoretical = np.random.normal(np.mean(temperature), np.std(temperature), size=10000) x_theoretical_cdf, y_theoretical_cdf = ecdf(temperature_theoretical) plt.plot(x_ecdf, y_ecdf, marker='.', linestyle='none') plt.plot(x_theoretical_cdf, y_theoretical_cdf) plt.xlabel('Temperature(F)') plt.ylabel('CDF') plt.title('Human Body Temperature') plt.legend(('CDF', 'ECDF'), loc='lower right') plt.show() # Performs normal test import scipy.stats as stats def isNormal(data): z, p = stats.mstats.normaltest(data) if (p < 0.055): print('The data is more likely NOT normally distributed') else: print('The data is more likely normally distributed') isNormal(temperature) df.describe() # Calculates p value using 100,000 boostrap replicates bootstrap_replicates = np.empty(100000) size = len(bootstrap_replicates) for i in range(size): bootstrap_sample = np.random.choice(temperature, size=len(temperature)) bootstrap_replicates[i] = np.mean(bootstrap_sample) p = np.sum(bootstrap_replicates >= 98.6) / len(bootstrap_replicates) print('p =', p) # Calculates z and p values and performs z test z = (np.mean(temperature) - 98.6) / (np.std(temperature) / np.sqrt(len(temperature))) print('z =', z) p_z = stats.norm.sf(abs(z))*2 print('p = p(z >= 5.476) + p(z <= -5.476) =', p_z) # Performs t test t = z print('t =', t) p_t = stats.t.sf(np.abs(t), len(temperature)-1)*2 print('p = p(t >= 5.476) + p(t <= -5.476) =', p_t) # Draws random sample of 10 sample = np.random.choice(temperature, size=10) sample # Performs t test t2 = (np.mean(sample) - 98.6) / (np.std(sample) / np.sqrt(len(sample))) print('t =', t2) p_t2 = stats.t.sf(np.abs(t), len(sample)-1)*2 print('p = ', p_t2) # Performs z test z2 = (np.mean(sample) - 98.6) / (np.std(sample) / np.sqrt(len(sample))) print('z =', z2) p_z2 = stats.norm.sf(abs(z))*2 print('p =', p_z2) # Calculates margin of error for sample mean with 95% confidence print('The mean temperature of the data is', np.mean(temperature)) z = 1.96 # this is the value of z for 95% confidence error = z * np.std(temperature) / np.sqrt(len(temperature)) print('margin of error for a sample mean =', error) # Calculates 95% confidence interval confidence_interval = np.percentile(temperature, [2.5, 97.5]) print('We expect 95% of the temperature data to be between', confidence_interval[0], 'and', confidence_interval[1]) # Plots the ECDF for the temperatures of males and females male_temperature = df[df['gender'] == 'M']['temperature'] female_temperature = df[df['gender'] == 'F']['temperature'] x_male, y_male = ecdf(male_temperature) x_female, y_female = ecdf(female_temperature) plt.plot(x_male, y_male, marker='.', linestyle='none', color='red') plt.plot(x_male, y_male, marker='.', linestyle='none', color='blue') plt.xlabel('Temperature(F)') plt.ylabel('ECDF') plt.legend(('Male', 'Female'), loc='lower right') plt.title('Male vs Female: Human Body Temperature') plt.show() male_and_female_diff = np.abs(np.mean(male_temperature) - np.mean(female_temperature)) print('The difference between the male and female mean temperatures is', male_and_female_diff) permutation_replicates = np.empty(100000) size = len(permutation_replicates) for i in range(size): combined_perm_temperatures = np.random.permutation(np.concatenate((male_temperature, female_temperature))) male_permutation = combined_perm_temperatures[:len(male_temperature)] female_permutation = combined_perm_temperatures[len(male_temperature):] permutation_replicates[i] = np.abs(np.mean(male_permutation) - np.mean(female_permutation)) p_val = np.sum(permutation_replicates >= male_and_female_diff) / len(permutation_replicates) print('p =', p_val) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Look at an overview of your data by running the following line. Step2: Exercises Step3: Follow-up question Step4: 2. Step5: 3. Step6: 4. Step7: 5. Step8: 6. Step9: 7. Step10: 8. Step11: 9.
<ASSISTANT_TASK:> Python Code: import pandas as pd reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0) pd.set_option("display.max_rows", 5) from learntools.core import binder; binder.bind(globals()) from learntools.pandas.indexing_selecting_and_assigning import * print("Setup complete.") reviews.head() # Your code here desc = ____ # Check your answer q1.check() #%%RM_IF(PROD)%% desc = reviews.description.values q1.assert_check_failed() #%%RM_IF(PROD)%% desc = reviews.description q1.assert_check_passed() #%%RM_IF(PROD)%% desc = reviews["description"] q1.assert_check_passed() #_COMMENT_IF(PROD)_ q1.hint() #_COMMENT_IF(PROD)_ q1.solution() first_description = ____ # Check your answer q2.check() first_description #%%RM_IF(PROD)%% first_description = desc[1] q2.assert_check_failed() #%%RM_IF(PROD)%% first_description = desc.iloc[0] q2.assert_check_passed() #%%RM_IF(PROD)%% first_description = desc.loc[0] q2.assert_check_passed() #%%RM_IF(PROD)%% first_description = desc[0] q2.assert_check_passed() #_COMMENT_IF(PROD)_ q2.hint() #_COMMENT_IF(PROD)_ q2.solution() first_row = ____ # Check your answer q3.check() first_row #%%RM_IF(PROD)%% first_row = reviews[0] # Raises KeyError q3.assert_check_failed() #%%RM_IF(PROD)%% first_row = reviews.iloc[0] q3.assert_check_passed() #_COMMENT_IF(PROD)_ q3.hint() #_COMMENT_IF(PROD)_ q3.solution() first_descriptions = ____ # Check your answer q4.check() first_descriptions #%%RM_IF(PROD)%% first_descriptions = desc.head(10) q4.assert_check_passed() #%%RM_IF(PROD)%% first_descriptions = desc[:10] q4.assert_check_passed() #%%RM_IF(PROD)%% first_descriptions = desc.iloc[:10] q4.assert_check_passed() #%%RM_IF(PROD)%% first_descriptions = reviews.loc[:9, "description"] q4.assert_check_passed() #%%RM_IF(PROD)%% first_descriptions = reviews.loc[:9, "description"] q4.assert_check_passed() #%%RM_IF(PROD)%% first_descriptions = desc.iloc[1:10] q4.assert_check_failed() #_COMMENT_IF(PROD)_ q4.hint() #_COMMENT_IF(PROD)_ q4.solution() sample_reviews = ____ # Check your answer q5.check() sample_reviews #%%RM_IF(PROD)%% indices = [1, 2, 3, 5, 8] sample_reviews = reviews.loc[indices] q5.assert_check_passed() #%%RM_IF(PROD)%% indices = [1, 2, 3, 5, 8] sample_reviews = reviews.iloc[indices] q5.assert_check_passed() #_COMMENT_IF(PROD)_ q5.hint() #_COMMENT_IF(PROD)_ q5.solution() df = ____ # Check your answer q6.check() df #%%RM_IF(PROD)%% cols = ['country', 'province', 'region_1', 'region_2'] indices = [0, 1, 10, 100] df = reviews.loc[indices, cols] q6.assert_check_passed() #_COMMENT_IF(PROD)_ q6.hint() #_COMMENT_IF(PROD)_ q6.solution() df = ____ # Check your answer q7.check() df #%%RM_IF(PROD)%% cols = ['country', 'variety'] df = reviews.head(100).loc[:,cols] q7.assert_check_passed() #%%RM_IF(PROD)%% cols = ['country', 'variety'] df = reviews.loc[:99, cols] q7.assert_check_passed() #%%RM_IF(PROD)%% cols_idx = [0, 11] df = reviews.iloc[:100, cols_idx] q7.assert_check_passed() #_COMMENT_IF(PROD)_ q7.hint() #_COMMENT_IF(PROD)_ q7.solution() italian_wines = ____ # Check your answer q8.check() #%%RM_IF(PROD)%% italian_wines = reviews[reviews.country == 'Italy'] q8.assert_check_passed() #_COMMENT_IF(PROD)_ q8.hint() #_COMMENT_IF(PROD)_ q8.solution() top_oceania_wines = ____ # Check your answer q9.check() top_oceania_wines #%%RM_IF(PROD)%% top_oceania_wines = reviews[ (reviews.country.isin(['Australia', 'New Zealand'])) & (reviews.points >= 95) ] q9.assert_check_passed() #%%RM_IF(PROD)%% top_oceania_wines = reviews.loc[ (reviews.country.isin(['Australia', 'New Zealand'])) & (reviews.points >= 95) ] q9.assert_check_passed() #%%RM_IF(PROD)%% top_oceania_wines = reviews[ (reviews.country == 'Australia') & (reviews.points >= 95) ] q9.assert_check_failed() #%%RM_IF(PROD)%% top_oceania_wines = reviews[ (reviews.country == 'Australia') | (reviews.country == 'New Zealand') & (reviews.points >= 95) ] q9.assert_check_failed() #%%RM_IF(PROD)%% top_oceania_wines = reviews[ ((reviews.country == 'Australia') | (reviews.country == 'New Zealand')) & (reviews.points >= 95) ] q9.assert_check_passed() #_COMMENT_IF(PROD)_ q9.hint() #_COMMENT_IF(PROD)_ q9.solution() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Customizing AdaNet Step2: Fashion MNIST dataset Step6: Supply the data in TensorFlow Step7: Launch TensorBoard Step8: Establish baselines Step9: Let's start simple, and train a linear model Step10: The linear model with default parameters achieves about 84.14% accuracy. Step17: The simple_dnn AdaNet model with default parameters achieves about 85.64% Step21: Next, we extend a adanet.subnetwork.Generator, which defines the search Step22: With these defined, we pass them into a new adanet.Estimator Step23: Our SimpleCNNGenerator code achieves 90.21% accuracy.
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #@test {"skip": true} # If you're running this in Colab, first install the adanet package: !pip install adanet import functools import os import shutil import adanet from adanet.examples import simple_dnn import matplotlib.pyplot as plt import tensorflow.compat.v1 as tf # The random seed to use. RANDOM_SEED = 42 LOG_DIR = '/tmp/models' (x_train, y_train), (x_test, y_test) = ( tf.keras.datasets.fashion_mnist.load_data()) FEATURES_KEY = "images" def generator(images, labels): Returns a generator that returns image-label pairs. def _gen(): for image, label in zip(images, labels): yield image, label return _gen def preprocess_image(image, label): Preprocesses an image for an `Estimator`. # First let's scale the pixel values to be between 0 and 1. image = image / 255. # Next we reshape the image so that we can apply a 2D convolution to it. image = tf.reshape(image, [28, 28, 1]) # Finally the features need to be supplied as a dictionary. features = {FEATURES_KEY: image} return features, label def input_fn(partition, training, batch_size): Generate an input_fn for the Estimator. def _input_fn(): if partition == "train": dataset = tf.data.Dataset.from_generator( generator(x_train, y_train), (tf.float32, tf.int32), ((28, 28), ())) elif partition == "predict": dataset = tf.data.Dataset.from_generator( generator(x_test[:10], y_test[:10]), (tf.float32, tf.int32), ((28,28), ())) else: dataset = tf.data.Dataset.from_generator( generator(x_test, y_test), (tf.float32, tf.int32), ((28, 28), ())) # We call repeat after shuffling, rather than before, to prevent separate # epochs from blending together. if training: dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat() dataset = dataset.map(preprocess_image).batch(batch_size) iterator = dataset.make_one_shot_iterator() features, labels = iterator.get_next() return features, labels return _input_fn #@test {"skip": true} get_ipython().system_raw( 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &' .format(LOG_DIR) ) # Install ngrok binary. ! wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip ! unzip ngrok-stable-linux-amd64.zip # Delete old logs dir. shutil.rmtree(LOG_DIR, ignore_errors=True) print("Follow this link to open TensorBoard in a new tab.") get_ipython().system_raw('./ngrok http 6006 &') ! curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])" # The number of classes. NUM_CLASSES = 10 # A `Head` instance defines the loss function and metrics for `Estimators`. head = tf.estimator.MultiClassHead(NUM_CLASSES) # Some `Estimators` use feature columns for understanding their input features. feature_columns = [ tf.feature_column.numeric_column(FEATURES_KEY, shape=[28, 28, 1]) ] def make_config(experiment_name): # Estimator configuration. return tf.estimator.RunConfig( save_checkpoints_steps=1000, save_summary_steps=1000, tf_random_seed=RANDOM_SEED, model_dir=os.path.join(LOG_DIR, experiment_name)) #@test {"skip": true} #@title Parameters LEARNING_RATE = 0.001 #@param {type:"number"} TRAIN_STEPS = 5000 #@param {type:"integer"} BATCH_SIZE = 64 #@param {type:"integer"} estimator = tf.estimator.LinearClassifier( feature_columns=feature_columns, n_classes=NUM_CLASSES, optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE), config=make_config("linear")) tf.estimator.train_and_evaluate( estimator, train_spec=tf.estimator.TrainSpec( input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE), max_steps=TRAIN_STEPS), eval_spec=tf.estimator.EvalSpec( input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None, start_delay_secs=1, throttle_secs=1, )) results = estimator.evaluate( input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None) print("Accuracy:", results["accuracy"]) print("Loss:", results["average_loss"]) #@test {"skip": true} #@title Parameters LEARNING_RATE = 0.003 #@param {type:"number"} TRAIN_STEPS = 5000 #@param {type:"integer"} BATCH_SIZE = 64 #@param {type:"integer"} ADANET_ITERATIONS = 2 #@param {type:"integer"} estimator = adanet.Estimator( head=head, subnetwork_generator=simple_dnn.Generator( feature_columns=feature_columns, optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE), seed=RANDOM_SEED), max_iteration_steps=TRAIN_STEPS // ADANET_ITERATIONS, evaluator=adanet.Evaluator( input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE), steps=None), config=make_config("simple_dnn")) tf.estimator.train_and_evaluate( estimator, train_spec=tf.estimator.TrainSpec( input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE), max_steps=TRAIN_STEPS), eval_spec=tf.estimator.EvalSpec( input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None, start_delay_secs=1, throttle_secs=1, )) results = estimator.evaluate( input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None) print("Accuracy:", results["accuracy"]) print("Loss:", results["average_loss"]) class SimpleCNNBuilder(adanet.subnetwork.Builder): Builds a CNN subnetwork for AdaNet. def __init__(self, learning_rate, max_iteration_steps, seed): Initializes a `SimpleCNNBuilder`. Args: learning_rate: The float learning rate to use. max_iteration_steps: The number of steps per iteration. seed: The random seed. Returns: An instance of `SimpleCNNBuilder`. self._learning_rate = learning_rate self._max_iteration_steps = max_iteration_steps self._seed = seed def build_subnetwork(self, features, logits_dimension, training, iteration_step, summary, previous_ensemble=None): See `adanet.subnetwork.Builder`. images = list(features.values())[0] # Visualize some of the input images in TensorBoard. summary.image("images", images) kernel_initializer = tf.keras.initializers.he_normal(seed=self._seed) x = tf.keras.layers.Conv2D( filters=16, kernel_size=3, padding="same", activation="relu", kernel_initializer=kernel_initializer)( images) x = tf.keras.layers.MaxPool2D(pool_size=2, strides=2)(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense( units=64, activation="relu", kernel_initializer=kernel_initializer)( x) # The `Head` passed to adanet.Estimator will apply the softmax activation. logits = tf.keras.layers.Dense( units=10, activation=None, kernel_initializer=kernel_initializer)( x) # Use a constant complexity measure, since all subnetworks have the same # architecture and hyperparameters. complexity = tf.constant(1) return adanet.Subnetwork( last_layer=x, logits=logits, complexity=complexity, persisted_tensors={}) def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels, iteration_step, summary, previous_ensemble=None): See `adanet.subnetwork.Builder`. # Momentum optimizer with cosine learning rate decay works well with CNNs. learning_rate = tf.train.cosine_decay( learning_rate=self._learning_rate, global_step=iteration_step, decay_steps=self._max_iteration_steps) optimizer = tf.train.MomentumOptimizer(learning_rate, .9) # NOTE: The `adanet.Estimator` increments the global step. return optimizer.minimize(loss=loss, var_list=var_list) def build_mixture_weights_train_op(self, loss, var_list, logits, labels, iteration_step, summary): See `adanet.subnetwork.Builder`. return tf.no_op("mixture_weights_train_op") @property def name(self): See `adanet.subnetwork.Builder`. return "simple_cnn" class SimpleCNNGenerator(adanet.subnetwork.Generator): Generates a `SimpleCNN` at each iteration. def __init__(self, learning_rate, max_iteration_steps, seed=None): Initializes a `Generator` that builds `SimpleCNNs`. Args: learning_rate: The float learning rate to use. max_iteration_steps: The number of steps per iteration. seed: The random seed. Returns: An instance of `Generator`. self._seed = seed self._dnn_builder_fn = functools.partial( SimpleCNNBuilder, learning_rate=learning_rate, max_iteration_steps=max_iteration_steps) def generate_candidates(self, previous_ensemble, iteration_number, previous_ensemble_reports, all_reports): See `adanet.subnetwork.Generator`. seed = self._seed # Change the seed according to the iteration so that each subnetwork # learns something different. if seed is not None: seed += iteration_number return [self._dnn_builder_fn(seed=seed)] #@title Parameters LEARNING_RATE = 0.05 #@param {type:"number"} TRAIN_STEPS = 5000 #@param {type:"integer"} BATCH_SIZE = 64 #@param {type:"integer"} ADANET_ITERATIONS = 2 #@param {type:"integer"} max_iteration_steps = TRAIN_STEPS // ADANET_ITERATIONS estimator = adanet.Estimator( head=head, subnetwork_generator=SimpleCNNGenerator( learning_rate=LEARNING_RATE, max_iteration_steps=max_iteration_steps, seed=RANDOM_SEED), max_iteration_steps=max_iteration_steps, evaluator=adanet.Evaluator( input_fn=input_fn("train", training=False, batch_size=BATCH_SIZE), steps=None), adanet_loss_decay=.99, config=make_config("simple_cnn")) results, _ = tf.estimator.train_and_evaluate( estimator, train_spec=tf.estimator.TrainSpec( input_fn=input_fn("train", training=True, batch_size=BATCH_SIZE), max_steps=TRAIN_STEPS), eval_spec=tf.estimator.EvalSpec( input_fn=input_fn("test", training=False, batch_size=BATCH_SIZE), steps=None, start_delay_secs=1, throttle_secs=1, )) print("Accuracy:", results["accuracy"]) print("Loss:", results["average_loss"]) predictions = estimator.predict(input_fn=input_fn("predict", training=False, batch_size=1)) for i, val in enumerate(predictions): predicted_class = val['class_ids'][0] prediction_confidence = val['probabilities'][predicted_class] * 100 # Display the image plt.imshow(x_test[i]) plt.rcParams['figure.figsize'] = (1,1) plt.show() print('Predicted class: %s, confidence: %s%%' % (predicted_class, round(prediction_confidence, 3))) print('Actual class: %s \n\n' % y_test[i]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Note how this process fluctuates around some central value. This value is the mean of our time series. As we have a constant mean throughout time and the fluctuations seem to all stray within a given distance from the mean, we might hypothesize that this series is stationary. We would want to rigorously test that in practice, which we will explore lightly in the examples at the end of this lecture. Also see the stationarity lecture from the Quantopian Lecture Series. Step2: Estimations of Variance Will be Wrong Step3: Now we'll run an experiment 1000 times in which we compute an AR series, then estimate the mean and take a naive 95% confidence interval around it. Then we'll check if the confidence interval contains 0, the true long-term mean of our series, and record that in our outcomes array. Step4: Finally let's check, if our test is calibrated correctly, then we should have the confidence interval contain 0, 95% of the time. Step5: Looks like something is severly wrong. What's going on here is that the AR series moves around a lot more, but the estimate of variance assumes stationarity and doesn't take into account all of that motion. As a result the confidence intervals are way smaller than they should be and don't contain the correct value nearly enough. Step6: Let's plot out the values now. Step7: Statistical Testing Step8: After getting the confidence interval data, we'll write a function to plot it. Step9: Notice how for the PACF, only the first three lags are the only ones that appear to be significantly different from $0$, which makes sense because we directly constructed an AR model of order $3$. However, these results may vary for each random series generated in this notebook. Step10: The model object has a lot of useful information on it, use the ? notation to find out more. We'll be focusing on a few attributes, starting with model.params the estimated parameters in the model, one for each lag, and model.bse, the estimated standard error for each of the parameters. Step11: Choosing the Number of Lags Step12: Our conclusion is that the AIC estimates the 4 parameter model as most likely, whereas the BIC estimates 3. Because we are always looking for reasons to knock off a parameter, we choose the 3. In this case it happened to be the exact right answer, but this will not always be the case, especially in noisy real data. Don't assume that using this method will always get you the right answer.
<ASSISTANT_TASK:> Python Code: import numpy as np import pandas as pd from scipy import stats import statsmodels.api as sm import statsmodels.tsa as tsa import matplotlib.pyplot as plt # ensures experiment runs the same every time np.random.seed(100) # This function simluates an AR process, generating a new value based on historial values, # autoregressive coefficients b1 ... bk, and some randomness. def AR(b, X, mu, sigma): l = min(len(b)-1, len(X)) b0 = b[0] return b0 + np.dot(b[1:l+1], X[-l:]) + np.random.normal(mu, sigma) b = np.array([0, 0.8, 0.1, 0.05]) X = np.array([1]) mu = 0 sigma = 1 for i in range(10000): X = np.append(X, AR(b, X, mu, sigma)) plt.plot(X) plt.xlabel('Time') plt.ylabel('AR Series Value'); def compare_tails_to_normal(X): # Define matrix to store comparisons A = np.zeros((2,4)) for k in range(4): #stores tail probabilities of the sample series vs a normal series A[0, k] = len(X[X > (k + 1)]) / float(len(X)) # Estimate tails of X A[1, k] = 1 - stats.norm.cdf(k + 1) # Compare to Gaussian distribution print 'Frequency of std events in X \n1: %s\t2: %s\t3: %s\t4: %s' % tuple(A[0]) print 'Frequency of std events in a normal process \n1: %s\t2: %s\t3: %s\t4: %s' % tuple(A[1]) return A compare_tails_to_normal(X); def compute_unadjusted_interval(X): T = len(X) # Compute mu and sigma MLE mu = np.mean(X) sigma = np.std(X) # Compute the bounds using standard error lower = mu - 1.96 * (sigma/np.sqrt(T)) upper = mu + 1.96 * (sigma/np.sqrt(T)) return lower, upper # We'll make a function that returns true when the computed bounds contain 0 def check_unadjusted_coverage(X): l, u = compute_unadjusted_interval(X) # Check to make sure l <= 0 <= u if l <= 0 and u >= 0: return True else: return False def simululate_AR_process(b, T): X = np.array([1]) mu = 0 sigma = 1 for i in range(T): X = np.append(X, AR(b, X, mu, sigma)) return X trials = 1000 outcomes = np.zeros((trials, 1)) for i in range(trials): #note these are the same values we used to generate the initial AR array Z = simululate_AR_process(np.array([0, 0.8, 0.1, 0.05]), 100) if check_unadjusted_coverage(Z): # The internal contains 0, the true value outcomes[i] = 1 else: outcomes[i] = 0 np.sum(outcomes) / trials from statsmodels.tsa.stattools import acf, pacf X = simululate_AR_process(np.array([0, 0.8, 0.1, 0.05]), 1000) # We'll choose 40 lags. This is a bit arbitrary, but you want to include all the lags you think might # feasibly impact the current value. nlags = 40 # Note, this will produce nlags + 1 values, as we include the autocorrelation of # X[-1] with X[-1], which is trivially 1. # The reason this is done is because that is the 0th spot in the array and corresponds # to the 0th lag of X[(-1)-0]. X_acf = acf(X, nlags=nlags) print 'Autocorrelations:\n' + str(X_acf) + '\n' X_pacf = pacf(X, nlags=nlags) print 'Partial Autocorrelations:\n' + str(X_pacf) plt.plot(X_acf, 'ro') plt.xlabel('Lag') plt.ylabel('Autocorrelation') plt.title("ACF"); plt.plot(X_pacf, 'ro') plt.xlabel('Lag') plt.ylabel('Autocorrelation') plt.title("PACF"); # We have to set a confidence level for our intervals, we choose the standard of 95%, # corresponding with an alpha of 0.05. X_acf, X_acf_confs = acf(X, nlags=nlags, alpha=0.05) X_pacf, X_pacf_confs = pacf(X, nlags=nlags, alpha=0.05) def plot_acf(X_acf, X_acf_confs, title='ACF'): # The confidence intervals are returned by the functions as (lower, upper) # The plotting function needs them in the form (x-lower, upper-x) errorbars = np.ndarray((2, len(X_acf))) errorbars[0, :] = X_acf - X_acf_confs[:,0] errorbars[1, :] = X_acf_confs[:,1] - X_acf plt.plot(X_acf, 'ro') plt.errorbar(range(len(X_acf)), X_acf, yerr=errorbars, fmt='none', ecolor='gray', capthick=2) plt.xlabel('Lag') plt.ylabel('Autocorrelation') plt.title(title); plot_acf(X_acf, X_acf_confs) plot_acf(X_pacf, X_pacf_confs, title='PACF') # Construct an unfitted model model = tsa.api.AR(X) # Fit it model = model.fit() print 'Parameters' print model.params print 'Standard Error' print model.bse # To plot this we'll need to format a confidence interval 2D array like the previous functions returned # Here is some quick code to do that model_confs = np.asarray((model.params - model.bse, model.params + model.bse)).T plot_acf(model.params, model_confs, title='Model Estimated Parameters') N = 10 AIC = np.zeros((N, 1)) for i in range(N): model = tsa.api.AR(X) model = model.fit(maxlag=(i+1)) AIC[i] = model.aic AIC_min = np.min(AIC) model_min = np.argmin(AIC) print 'Relative Likelihoods' print np.exp((AIC_min-AIC) / 2) print 'Number of parameters in minimum AIC model %s' % (model_min+1) N = 10 BIC = np.zeros((N, 1)) for i in range(N): model = tsa.api.AR(X) model = model.fit(maxlag=(i+1)) BIC[i] = model.bic BIC_min = np.min(BIC) model_min = np.argmin(BIC) print 'Relative Likelihoods' print np.exp((BIC_min-BIC) / 2) print 'Number of parameters in minimum BIC model %s' % (model_min+1) model = tsa.api.AR(X) model = model.fit(maxlag=3) from statsmodels.stats.stattools import jarque_bera score, pvalue, _, _ = jarque_bera(model.resid) if pvalue < 0.10: print 'We have reason to suspect the residuals are not normally distributed.' else: print 'The residuals seem normally distributed.' <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Homework questions Step2: The temperature data is called air. Take a look at the details
<ASSISTANT_TASK:> Python Code: # Ensure compatibility with Python 2 and 3 from __future__ import print_function, division import xarray as xr url = "http://www.esrl.noaa.gov/psd/thredds/dodsC/Datasets/ncep.reanalysis.derived/surface_gauss/air.2m.mon.1981-2010.ltm.nc" ncep_air2m = xr.open_dataset(url, decode_times=False) ## The NOAA ESRL server is shutdown! January 2019 #url = 'http://apdrc.soest.hawaii.edu:80/dods/public_data/Reanalysis_Data/NCEP/NCEP/clima/' #ncep_air2m = xr.open_dataset(url + 'surface_gauss/air') print( ncep_air2m) print( ncep_air2m.air) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Define Metric Step2: Clip and compare Step3: Observe how these are distinctly different objects in memory Step4: Density Emulation Step5: Clipped Step6: Distances Step7: Interactive Demonstration of compP.density Step8: Below, we show an example of using the comparison object to get a better picture of the sets defined above, without necessarily needing to compare two measures.
<ASSISTANT_TASK:> Python Code: num_samples_left = 50 num_samples_right = 50 delta = 0.5 # width of measure's support per dimension L = unit_center_set(2, num_samples_left, delta) R = unit_center_set(2, num_samples_right, delta) plt.scatter(L._values[:,0], L._values[:,1], c=L._probabilities) plt.xlim([0,1]) plt.ylim([0,1]) plt.show() plt.scatter(R._values[:,0], R._values[:,1], c=R._probabilities) plt.xlim([0,1]) plt.ylim([0,1]) plt.show() num_emulation_samples = 2000 mm = compP.compare(L, R, num_emulation_samples) # initialize metric # mm.get_left().get_values() # mm.get_right().get_values() # cut both sample sets in half mc = mm.clip(num_samples_left//2,num_samples_right//2) # mc.get_left().get_values() # mc.get_right().get_values() mm, mc ld1,rd1 = mm.estimate_density() I = mc.get_emulated().get_values() plt.scatter(I[:,0], I[:,1], c=rd1,s =10, alpha=0.5) plt.scatter(R._values[:,0], R._values[:,1], marker='o', s=50, c='k') plt.xlim([0,1]) plt.ylim([0,1]) plt.title("Right Density") plt.show() plt.scatter(I[:,0], I[:,1], c=ld1, s=10, alpha=0.5) plt.scatter(L._values[:,0], L._values[:,1], marker='o', s=50, c='k') plt.xlim([0,1]) plt.ylim([0,1]) plt.title("Left Density") plt.show() ld2,rd2 = mc.estimate_density() plt.scatter(I[:,0], I[:,1], c=rd2,s =10, alpha=0.5) plt.scatter(mc.get_right()._values[:,0], mc.get_right()._values[:,1], marker='o', s=50, c='k') plt.xlim([0,1]) plt.ylim([0,1]) plt.title("Right Density") plt.show() plt.scatter(I[:,0], I[:,1], c=ld2, s=10, alpha=0.5) plt.scatter(mc.get_left()._values[:,0], mc.get_left()._values[:,1], marker='o', s=50, c='k') plt.xlim([0,1]) plt.ylim([0,1]) plt.title("Left Density") plt.show() from scipy.stats import entropy as kl_div mm.set_left(unit_center_set(2, 1000, delta/2)) mm.set_right(unit_center_set(2, 1000, delta)) print([mm.value(kl_div), mm.value('tv'), mm.value('totvar'), mm.value('mink', w=0.5, p=1), mm.value('norm'), mm.value('sqhell'), mm.value('hell'), mm.value('hellinger')]) import ipywidgets as wd def show_clip(samples=100, delta=0.5): np.random.seed(int(121)) S = unit_center_set(2, samples, delta) compP.density(S) plt.figure() plt.scatter(S._values[:,0], S._values[:,1], c=S._density.ravel()) plt.show() wd.interact(show_clip, samples=(20,500), delta=(0.05,1,0.05)) import scipy.stats as sstats def show_clipm(samples=100, delta=0.5): np.random.seed(int(121)) S = unit_center_set(2, samples, delta) # alternative probabilities xprobs = sstats.distributions.norm(0.5, delta).pdf(S._values[:,0]) yprobs = sstats.distributions.norm(0.5, delta).pdf(S._values[:,1]) probs = xprobs*yprobs S.set_probabilities(probs*S._volumes) I = mm.get_emulated() m = compP.comparison(I,S,None) m.estimate_density_left() plt.figure() plt.scatter(I._values[:,0], I._values[:,1], c=S._emulated_density.ravel()) plt.scatter([0.5], [0.5], marker='x') plt.show() wd.interact(show_clipm, samples=(20,500), delta=(0.1,1,0.05)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load the component using KFP SDK Step2: Sample Step3: Example pipeline that uses the component Step4: Compile the pipeline Step5: Submit the pipeline for execution
<ASSISTANT_TASK:> Python Code: %%capture --no-stderr KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz' !pip3 install $KFP_PACKAGE --upgrade import kfp.components as comp mlengine_deploy_op = comp.load_component_from_url( 'https://raw.githubusercontent.com/kubeflow/pipelines/01a23ae8672d3b18e88adf3036071496aca3552d/components/gcp/ml_engine/deploy/component.yaml') help(mlengine_deploy_op) # Required Parameters PROJECT_ID = '<Please put your project ID here>' # Optional Parameters EXPERIMENT_NAME = 'CLOUDML - Deploy' TRAINED_MODEL_PATH = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/' import kfp.dsl as dsl import json @dsl.pipeline( name='CloudML deploy pipeline', description='CloudML deploy pipeline' ) def pipeline( model_uri = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/', project_id = PROJECT_ID, model_id = 'kfp_sample_model', version_id = '', runtime_version = '1.10', python_version = '', version = '', replace_existing_version = 'False', set_default = 'True', wait_interval = '30'): task = mlengine_deploy_op( model_uri=model_uri, project_id=project_id, model_id=model_id, version_id=version_id, runtime_version=runtime_version, python_version=python_version, version=version, replace_existing_version=replace_existing_version, set_default=set_default, wait_interval=wait_interval) pipeline_func = pipeline pipeline_filename = pipeline_func.__name__ + '.zip' import kfp.compiler as compiler compiler.Compiler().compile(pipeline_func, pipeline_filename) #Specify pipeline argument values arguments = {} #Get or create an experiment and submit a pipeline run import kfp client = kfp.Client() experiment = client.create_experiment(EXPERIMENT_NAME) #Submit a pipeline run run_name = pipeline_func.__name__ + ' run' run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step4: Setup Step5: Evaluating the Hyperparameters Step6: Synchronous Evaluation of Randomly Generated Hyperparameters Step7: Launch asynchronous parallel tasks for evaluating different Step10: Process each hyperparameter and corresponding accuracy in the order that
<ASSISTANT_TASK:> Python Code: import os import numpy as np from filelock import FileLock import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torchvision import datasets, transforms import ray ray.init() # The number of sets of random hyperparameters to try. num_evaluations = 10 # A function for generating random hyperparameters. def generate_hyperparameters(): return { "learning_rate": 10 ** np.random.uniform(-5, 1), "batch_size": np.random.randint(1, 100), "momentum": np.random.uniform(0, 1), } def get_data_loaders(batch_size): mnist_transforms = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ) # We add FileLock here because multiple workers will want to # download data, and this may cause overwrites since # DataLoader is not threadsafe. with FileLock(os.path.expanduser("~/data.lock")): train_loader = torch.utils.data.DataLoader( datasets.MNIST( "~/data", train=True, download=True, transform=mnist_transforms ), batch_size=batch_size, shuffle=True, ) test_loader = torch.utils.data.DataLoader( datasets.MNIST("~/data", train=False, transform=mnist_transforms), batch_size=batch_size, shuffle=True, ) return train_loader, test_loader class ConvNet(nn.Module): Simple two layer Convolutional Neural Network. def __init__(self): super(ConvNet, self).__init__() self.conv1 = nn.Conv2d(1, 3, kernel_size=3) self.fc = nn.Linear(192, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 3)) x = x.view(-1, 192) x = self.fc(x) return F.log_softmax(x, dim=1) def train(model, optimizer, train_loader, device=torch.device("cpu")): Optimize the model with one pass over the data. Cuts off at 1024 samples to simplify training. model.train() for batch_idx, (data, target) in enumerate(train_loader): if batch_idx * len(data) > 1024: return data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() def test(model, test_loader, device=torch.device("cpu")): Checks the validation accuracy of the model. Cuts off at 512 samples for simplicity. model.eval() correct = 0 total = 0 with torch.no_grad(): for batch_idx, (data, target) in enumerate(test_loader): if batch_idx * len(data) > 512: break data, target = data.to(device), target.to(device) outputs = model(data) _, predicted = torch.max(outputs.data, 1) total += target.size(0) correct += (predicted == target).sum().item() return correct / total @ray.remote def evaluate_hyperparameters(config): model = ConvNet() train_loader, test_loader = get_data_loaders(config["batch_size"]) optimizer = optim.SGD( model.parameters(), lr=config["learning_rate"], momentum=config["momentum"] ) train(model, optimizer, train_loader) return test(model, test_loader) # Keep track of the best hyperparameters and the best accuracy. best_hyperparameters = None best_accuracy = 0 # A list holding the object refs for all of the experiments that we have # launched but have not yet been processed. remaining_ids = [] # A dictionary mapping an experiment's object ref to its hyperparameters. # hyerparameters used for that experiment. hyperparameters_mapping = {} # Randomly generate sets of hyperparameters and launch a task to evaluate it. for i in range(num_evaluations): hyperparameters = generate_hyperparameters() accuracy_id = evaluate_hyperparameters.remote(hyperparameters) remaining_ids.append(accuracy_id) hyperparameters_mapping[accuracy_id] = hyperparameters # Fetch and print the results of the tasks in the order that they complete. while remaining_ids: # Use ray.wait to get the object ref of the first task that completes. done_ids, remaining_ids = ray.wait(remaining_ids) # There is only one return result by default. result_id = done_ids[0] hyperparameters = hyperparameters_mapping[result_id] accuracy = ray.get(result_id) print( We achieve accuracy {:.3}% with learning_rate: {:.2} batch_size: {} momentum: {:.2} .format( 100 * accuracy, hyperparameters["learning_rate"], hyperparameters["batch_size"], hyperparameters["momentum"], ) ) if accuracy > best_accuracy: best_hyperparameters = hyperparameters best_accuracy = accuracy # Record the best performing set of hyperparameters. print( Best accuracy over {} trials was {:.3} with learning_rate: {:.2} batch_size: {} momentum: {:.2} .format( num_evaluations, 100 * best_accuracy, best_hyperparameters["learning_rate"], best_hyperparameters["batch_size"], best_hyperparameters["momentum"], ) ) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The standard import command is used. The init_printing command looks at your system to find the clearest way of displaying the output; this isn't necessary, but is helpful for understanding the results. Step2: To declare multiple variables at once, use Step3: Note that the "name" of the variable does not need to match the symbol with which it is displayed. We have used this with z0 above Step4: Once we have variables, we can define new variables by operating on old ones Step5: In addition to variables, we can also define general functions. There is only one option for this Step6: In-built functions Step7: Note the steps taken here. c is an object, something that sympy has created. Once created it can be manipulated and simplified, using the methods on the object. It is useful to use tab completion to look at the available commands. For example, Step8: Now type d. and then tab, to inspect all the available methods. As before, we could do Step9: but there are many other options. Step10: What happened here? sympy is not smart enough to know that we wanted to solve for x! Instead, it solved for the first variable it encountered. Let us try again Step11: This is our expectation Step12: We can substitute in specific values for the parameters to find solutions Step13: We have a list of substitutions. Each substitution is given by a tuple, containing the variable to be replaced, and the expression replacing it. We do not have to substitute in numbers, as here, but could use other variables Step14: We can use similar syntax to solve systems of equations, such as Step15: Differentiation and integration Step16: A parameter can control how many times to differentiate Step17: Partial differentiation with respect to multiple variables can also be performed by increasing the number of arguments Step18: There is also a function representing an unevaluated derivative Step19: These can be useful for display, building up a calculation in stages, simplification, or when the derivative cannot be evaluated. It can be explicitly evaluated using the doit function Step20: Integration Step21: The definite integral is specified by passing a tuple, with the variable to be integrated (here x) and the lower and upper limits (which can be expressions). Step22: Multiple integration for higher dimensional integrals can be performed Step23: Again, there is an unevaluated integral Step24: Again, the doit method will explicitly evaluate the result where possible. Step25: y is a general function, and can be a function of anything at this point (any number of variables with any name). To use it consistently, we must refer to it explicitly as a function of $t$ everywhere. For example, Step26: We then define the differential equation. sympy.Eq defines the equation, and diff differentiates Step27: Here we have used diff as a method applied to the function. As sympy can't differentiate $y(t)$ (as it doesn't have an explicit value), it leaves it unevaluated. Step28: This is simple enough to solve, but we'll use symbolic methods to find the constant, by setting $t = 0$ and $y(t) = y(0) = 1$. Step29: We then find the specific solution of the ODE. Step30: Plotting Step31: We can explicitly set limits, for example Step32: We can plot the solution to the differential equation computed above Step34: This can be visually compared to the previous result. However, we would often like a more precise comparison, which requires numerically evaluating the solution to the ODE at specific points. Step35: We want to evaluate our sympy solution at the same points as our scipy solution, in order to do a direct comparison. In order to do that, we want to construct a function that computes our sympy power series solution, without typing it in. That is what lambdify is for Step36: Then we construct the function using lambdify Step37: The first argument to lambdify is a tuple containing the arguments of the function to be created. In this case that's just t, the time(s) at which we want to evaluate the expression. The second argument to lambdify is the expression that we want converted into a function. The third argument, which is optional, tells lambdify that where possible it should use numpy functions. This means that we can call the function using numpy arrays, and it will calculate using numpy array expressions, doing the whole calculation in a single call. Step38: And we can directly apply this function to the times at which the scipy solution is constructed, for comparison Step39: Now we can we matplotlib to plot both on the same figure Step40: We see good visual agreement everywhere. But how accurate is it?
<ASSISTANT_TASK:> Python Code: import sympy sympy.init_printing() x = sympy.Symbol('x') y, z0 = sympy.symbols(('y', 'z_0')) z0 a = x + y b = y * z0 print("a={}. b={}.".format(a, b)) a f = sympy.Function('f') c = sympy.sin(x)**2 + sympy.cos(x)**2 c c.simplify() d = sympy.cosh(x)**2 - sympy.sinh(x)**2 d.simplify() a, b, c, x = sympy.symbols(('a', 'b', 'c', 'x')) quadratic_equation = sympy.Eq(a*x**2+b*x+c, 0) sympy.solve(quadratic_equation) sympy.solve(quadratic_equation, x) roots = sympy.solve(quadratic_equation, x) xplus, xminus = sympy.symbols(('x_{+}', 'x_{-}')) xplus = roots[0] xminus = roots[1] xplus_solution = xplus.subs([(a,1), (b,2), (c,3)]) xplus_solution xminus_solution = xminus.subs([(b,a), (c,a+z0)]) xminus_solution xminus_solution.simplify() eq1 = sympy.Eq(x+2*y, 0) eq2 = sympy.Eq(x*y, z0) sympy.solve([eq1, eq2], [x, y]) expression = x**2*sympy.sin(sympy.log(x)) sympy.diff(expression, x) sympy.diff(expression, x, 3) expression2 = x*sympy.cos(y**2 + x) sympy.diff(expression2, x, 2, y, 3) sympy.Derivative(expression2, x, 2, y, 3) sympy.Derivative(expression2, x, 2, y, 3).doit() integrand=sympy.log(x)**2 sympy.integrate(integrand, x) sympy.integrate(integrand, (x, 1, 10)) sympy.integrate(sympy.exp(-x), (x, 0, sympy.oo)) sympy.integrate(sympy.exp(-(x+y))*sympy.cos(x)*sympy.sin(y), x, y) sympy.integrate(sympy.exp(-(x+y))*sympy.cos(x)*sympy.sin(y), (x, 0, sympy.pi), (y, 0, sympy.pi)) sympy.Integral(integrand, x) sympy.Integral(integrand, (x, 1, 10)) y = sympy.Function('y') t = sympy.Symbol('t') y(t) ode = sympy.Eq(y(t).diff(t), sympy.exp(-t) - y(t)) ode sympy.dsolve(ode, y(t)) general_solution = sympy.dsolve(ode, y(t)) value = general_solution.subs([(t,0), (y(0), 1)]) value ode_solution = general_solution.subs([(value.rhs,value.lhs)]) ode_solution %matplotlib inline sympy.plot(sympy.sin(x)); sympy.plot(sympy.exp(-x)*sympy.sin(x**2), (x, 0, 1)); sympy.plot(ode_solution.rhs, xlim=(0, 1), ylim=(0.7, 1.05)); from numpy import exp from scipy.integrate import odeint import numpy def dydt(y, t): Defining the ODE dy/dt = e^{-t} - y. Parameters ---------- y : real The value of y at time t (the current numerical approximation) t : real The current time t Returns ------- dydt : real The RHS function defining the ODE. return exp(-t) - y t_scipy = numpy.linspace(0.0, 1.0) y0 = [1.0] y_scipy = odeint(dydt, y0, t_scipy) ode_expression = ode_solution.rhs ode_expression from sympy.utilities.lambdify import lambdify ode_function = lambdify((t,), ode_expression, modules='numpy') print("sympy solution at t=0: {}".format(ode_function(0.0))) print("sympy solution at t=0.5: {}".format(ode_function(0.5))) y_sympy = ode_function(t_scipy) from matplotlib import pyplot pyplot.plot(t_scipy, y_scipy[:,0], 'b-', label='scipy') pyplot.plot(t_scipy, y_sympy, 'k--', label='sympy') pyplot.xlabel(r'$t$') pyplot.ylabel(r'$y$') pyplot.legend(loc='upper right') pyplot.show() pyplot.semilogy(t_scipy, numpy.abs(y_scipy[:,0]-y_sympy)) pyplot.xlabel(r'$t$') pyplot.ylabel('Difference in solutions'); <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Lets find out how much the salinity in each grid point changed over the full period (20 years) Step2: Now we can plot the slope as a map Step3: linear_trend converts the dimension over which to integrate into logical indicies, so the units of the plot above are (salinity/timestep of the original product), so here PSS/month. Step4: This works in any dimension the dataset has Step5: This map shows that in lower latitudes spatial patterns of salinity are generally anticorrlated with temperature and vice versa in the high latitudes. Step6: Taking the mean of these fields, suggests that values increase in the upper-left, upper-right, lower-right quadrant, and the missing values in the third layer distort the mean. Step7: Lets produce a mask to see where all elements along the member dimension have the same sign Step8: You could use this information to indicate the areas of the average, where the members do not agree by hatching Step9: Masking values in the mixed layer Step10: We can remove the values in the mixed layer Step11: Or to have the mixed layer values only Step12: In this case the cell bounds are not available for the model output, but mask_mixedlayer has the option to take those into account and e.g. remove cells only if the lower boundary is within the mixed layer. All you need to do is correctly specify z_bounds with the variable/coordinate name of the cell bounds.
<ASSISTANT_TASK:> Python Code: import xarray as xr import numpy as np %matplotlib inline path = 'http://apdrc.soest.hawaii.edu:80/dods/public_data/Argo_Products/monthly_mean/monthly_mixed_layer' ds = xr.open_dataset(path, use_cftime=True) ds ds.load() from xarrayutils.utils import linear_trend # create an array salinity_regressed = linear_trend(ds.mls, 'time') salinity_regressed salinity_regressed.slope.plot(robust=True) from xarrayutils.utils import linear_trend, xr_linregress tempxsalt = xr_linregress(ds.mlt, ds.mls, dim='time') tempxsalt.r_value.plot() tempxsalt = xr_linregress(ds.mlt, ds.mls, dim='lon') tempxsalt.r_value.plot(x='time') x = np.linspace(-np.pi, np.pi, 25) y = np.linspace(-np.pi, np.pi, 25) xx, yy = np.meshgrid(x,y) data1 = np.sin(xx) data2 = np.sin(yy) data3 = np.ones_like(xx) np.fill_diagonal(data3,np.nan) np.fill_diagonal(data3[1:],np.nan) np.fill_diagonal(data3[:,1:],np.nan) np.fill_diagonal(data3[:,2:],np.nan) da = xr.DataArray(np.array([data1, data2, data3]), dims=['member','x', 'y']) da.plot(col='member') da.mean('member').plot() from xarrayutils.utils import sign_agreement sign_agreement(da, da.mean('member'), 'member', threshold=1.0).plot() da.mean('member').plot() sign_agreement( da, da.mean('member'), 'member' ).plot.contourf( colors='none', hatches=['..', None], levels=[0,0.5], add_colorbar=False ) import intake url = "https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json" col = intake.open_esm_datastore(url) cat = col.search( table_id='Omon', grid_label='gn', experiment_id='historical', member_id='r1i1p1f1', variable_id=['thetao','mlotst'],#, source_id=["ACCESS-ESM1-5"] ) ddict = cat.to_dataset_dict( zarr_kwargs={'consolidated':True, 'decode_times':True}, ) ds = ddict['CMIP.CSIRO.ACCESS-ESM1-5.historical.Omon.gn'] ds from xarrayutils.utils import mask_mixedlayer ds_wo_ml = mask_mixedlayer(ds, ds.mlotst) ds_wo_ml from xarrayutils.utils import mask_mixedlayer ds_ml_only = mask_mixedlayer(ds, ds.mlotst, mask='inside') ds_ml_only import matplotlib.pyplot as plt roi = dict(i=150, time=0, j=slice(50, 200)) plt.figure(figsize=[10, 15]) for di, (data, label) in enumerate(zip([ds, ds_wo_ml, ds_ml_only], ['full data', 'mixed layer removed', 'mixed layer only'])): plt.subplot(3,1,di+1) data.thetao.isel(lev=slice(0,20),**roi).plot(yincrease=False) ds.mlotst.isel(**roi).plot(x='j') plt.title(label) from xarrayutils.utils import remove_bottom_values ds_bot_removed = remove_bottom_values(ds) ds_bot_removed roi = dict(i=150, time=0, j=slice(50, 100)) plt.figure(figsize=[10, 15]) for di, (data, label) in enumerate(zip([ds, ds_bot_removed], ['full data', 'bottom layer removed'])): plt.subplot(2,1,di+1) data.thetao.isel(lev=slice(38, -2),**roi).plot(yincrease=False, robust=True) plt.title(label) plt.grid() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step3: Download buildings data for a region in Africa [takes up to 15 minutes for large countries] Step4: Visualise the data Step5: For some countries there can be tens of millions of buildings, so we also take a random sample for doing plots. Step6: Prepare the data for mapping building statistics Step7: To calculate statistics, we need a function to convert between (longitude, latitude) coordinates in the world and (x, y) coordinates in the grid. Step8: Now we can count how many buildings there are on each cell of the grid. Step9: Plot the counts of buildings Step10: [optional] Export a GeoTIFF file Step11: Generate a map of building sizes Step12: Health facility accessibility Step13: We drop all columns not relevant to the computation of mean distance from health facilities. We also exclude all rows with empty or NaN values, select amenities captured as hospitals in the new geodata and choose values within the range of our area of interest. Step14: Have a look at the locations of health facilities compared to the locations of buildings. Step15: Next we calculate, for each building, the distance to the nearest health facility. We use the sample of the buildings data that we took earlier, so that the computations don't take too long. Step16: That has computed the distance in degrees (longitude and latitude), which is not very intuitive. We can convert this approximately to kilometers by multiplying with the distance spanned by one degree at the equator. Step17: Now we can then find the mean distance to the nearest health facility by administrative area. First, we load data on the shapes of adminstrative areas. Step18: Next, find the average distance to the nearest health facility within each area.
<ASSISTANT_TASK:> Python Code: # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License #@markdown Select a region from either the [Natural Earth low res](https://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-admin-0-countries/) (fastest), [Natural Earth high res](https://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-countries/) or [World Bank high res](https://datacatalog.worldbank.org/dataset/world-bank-official-boundaries) shapefiles: region_border_source = 'Natural Earth (Low Res 110m)' #@param ["Natural Earth (Low Res 110m)", "Natural Earth (High Res 10m)", "World Bank (High Res 10m)"] region = 'GHA (Ghana)' #@param ["", "AGO (Angola)", "BDI (Burundi)", "BEN (Benin)", "BFA (Burkina Faso)", "BWA (Botswana)", "CAF (Central African Republic)", "CIV (Cรดte d'Ivoire)", "COD (Democratic Republic of the Congo)", "COG (Republic of the Congo)", "DJI (Djibouti)", "DZA (Algeria)", "EGY (Egypt)", "ERI (Eritrea)", "ETH (Ethiopia)", "GAB (Gabon)", "GHA (Ghana)", "GIN (Guinea)", "GMB (The Gambia)", "GNB (Guinea-Bissau)", "GNQ (Equatorial Guinea)", "KEN (Kenya)", "LBR (Liberia)", "LSO (Lesotho)", "MDG (Madagascar)", "MOZ (Mozambique)", "MRT (Mauritania)", "MWI (Malawi)", "NAM (Namibia)", "NER (Niger)", "NGA (Nigeria)", "RWA (Rwanda)", "SDN (Sudan)", "SEN (Senegal)", "SLE (Sierra Leone)", "SOM (Somalia)", "SWZ (eSwatini)", "TGO (Togo)", "TUN (Tunisia)", "TZA (Tanzania)", "UGA (Uganda)", "ZAF (South Africa)", "ZMB (Zambia)", "ZWE (Zimbabwe)"] # @markdown Alternatively, specify an area of interest in [WKT format](https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry) (assumes crs='EPSG:4326'); this [tool](https://arthur-e.github.io/Wicket/sandbox-gmaps3.html) might be useful. your_own_wkt_polygon = '' #@param {type:"string"} !pip install s2geometry pygeos geopandas import functools import glob import gzip import multiprocessing import os import shutil import tempfile from typing import List, Optional, Tuple import gdal import geopandas as gpd from google.colab import files from IPython import display from mpl_toolkits.axes_grid1 import make_axes_locatable import matplotlib.pyplot as plt import numpy as np import pandas as pd import s2geometry as s2 import shapely import tensorflow as tf import tqdm.notebook BUILDING_DOWNLOAD_PATH = ('gs://open-buildings-data/v1/' 'polygons_s2_level_6_gzip_no_header') def get_filename_and_region_dataframe( region_border_source: str, region: str, your_own_wkt_polygon: str) -> Tuple[str, gpd.geodataframe.GeoDataFrame]: Returns output filename and a geopandas dataframe with one region row. if your_own_wkt_polygon: filename = 'open_buildings_v1_polygons_your_own_wkt_polygon.csv.gz' region_df = gpd.GeoDataFrame( geometry=gpd.GeoSeries.from_wkt([your_own_wkt_polygon]), crs='EPSG:4326') if not isinstance(region_df.iloc[0].geometry, shapely.geometry.polygon.Polygon) and not isinstance( region_df.iloc[0].geometry, shapely.geometry.multipolygon.MultiPolygon): raise ValueError("`your_own_wkt_polygon` must be a POLYGON or " "MULTIPOLYGON.") print(f'Preparing your_own_wkt_polygon.') return filename, region_df if not region: raise ValueError('Please select a region or set your_own_wkt_polygon.') if region_border_source == 'Natural Earth (Low Res 110m)': url = ('https://www.naturalearthdata.com/http//www.naturalearthdata.com/' 'download/110m/cultural/ne_110m_admin_0_countries.zip') !wget -N {url} display.clear_output() region_shapefile_path = os.path.basename(url) source_name = 'ne_110m' elif region_border_source == 'Natural Earth (High Res 10m)': url = ('https://www.naturalearthdata.com/http//www.naturalearthdata.com/' 'download/10m/cultural/ne_10m_admin_0_countries.zip') !wget -N {url} display.clear_output() region_shapefile_path = os.path.basename(url) source_name = 'ne_10m' elif region_border_source == 'World Bank (High Res 10m)': url = ('https://development-data-hub-s3-public.s3.amazonaws.com/ddhfiles/' '779551/wb_countries_admin0_10m.zip') !wget -N {url} !unzip -o {os.path.basename(url)} display.clear_output() region_shapefile_path = 'WB_countries_Admin0_10m' source_name = 'wb_10m' region_iso_a3 = region.split(' ')[0] filename = f'open_buildings_v1_polygons_{source_name}_{region_iso_a3}.csv.gz' region_df = gpd.read_file(region_shapefile_path).query( f'ISO_A3 == "{region_iso_a3}"').dissolve(by='ISO_A3')[['geometry']] print(f'Preparing {region} from {region_border_source}.') return filename, region_df def get_bounding_box_s2_covering_tokens( region_geometry: shapely.geometry.base.BaseGeometry) -> List[str]: region_bounds = region_geometry.bounds s2_lat_lng_rect = s2.S2LatLngRect_FromPointPair( s2.S2LatLng_FromDegrees(region_bounds[1], region_bounds[0]), s2.S2LatLng_FromDegrees(region_bounds[3], region_bounds[2])) coverer = s2.S2RegionCoverer() # NOTE: Should be kept in-sync with s2 level in BUILDING_DOWNLOAD_PATH. coverer.set_fixed_level(6) coverer.set_max_cells(1000000) return [cell.ToToken() for cell in coverer.GetCovering(s2_lat_lng_rect)] def s2_token_to_shapely_polygon( s2_token: str) -> shapely.geometry.polygon.Polygon: s2_cell = s2.S2Cell(s2.S2CellId_FromToken(s2_token, len(s2_token))) coords = [] for i in range(4): s2_lat_lng = s2.S2LatLng(s2_cell.GetVertex(i)) coords.append((s2_lat_lng.lng().degrees(), s2_lat_lng.lat().degrees())) return shapely.geometry.Polygon(coords) def download_s2_token( s2_token: str, region_df: gpd.geodataframe.GeoDataFrame) -> Optional[str]: Downloads the matching CSV file with polygons for the `s2_token`. NOTE: Only polygons inside the region are kept. NOTE: Passing output via a temporary file to reduce memory usage. Args: s2_token: S2 token for which to download the CSV file with building polygons. The S2 token should be at the same level as the files in BUILDING_DOWNLOAD_PATH. region_df: A geopandas dataframe with only one row that contains the region for which to keep polygons. Returns: Either filepath which contains a gzipped CSV without header for the `s2_token` subfiltered to only contain building polygons inside the region or None which means that there were no polygons inside the region for this `s2_token`. s2_cell_geometry = s2_token_to_shapely_polygon(s2_token) region_geometry = region_df.iloc[0].geometry prepared_region_geometry = shapely.prepared.prep(region_geometry) # If the s2 cell doesn't intersect the country geometry at all then we can # know that all rows would be dropped so instead we can just return early. if not prepared_region_geometry.intersects(s2_cell_geometry): return None try: # Using tf.io.gfile.GFile gives better performance than passing the GCS path # directly to pd.read_csv. with tf.io.gfile.GFile( os.path.join(BUILDING_DOWNLOAD_PATH, f'{s2_token}_buildings.csv.gz'), 'rb') as gf: # If the s2 cell is fully covered by country geometry then can skip # filtering as we need all rows. if prepared_region_geometry.covers(s2_cell_geometry): with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as tmp_f: shutil.copyfileobj(gf, tmp_f) return tmp_f.name # Else take the slow path. # NOTE: We read in chunks to save memory. csv_chunks = pd.read_csv( gf, chunksize=2000000, dtype=object, compression='gzip', header=None) tmp_f = tempfile.NamedTemporaryFile(mode='w+b', delete=False) tmp_f.close() for csv_chunk in csv_chunks: points = gpd.GeoDataFrame( geometry=gpd.points_from_xy(csv_chunk[1], csv_chunk[0]), crs='EPSG:4326') # sjoin 'within' was faster than using shapely's 'within' directly. points = gpd.sjoin(points, region_df, predicate='within') csv_chunk = csv_chunk.iloc[points.index] csv_chunk.to_csv( tmp_f.name, mode='ab', index=False, header=False, compression={ 'method': 'gzip', 'compresslevel': 1 }) return tmp_f.name except tf.errors.NotFoundError: return None # Clear output after pip install. display.clear_output() filename, region_df = get_filename_and_region_dataframe(region_border_source, region, your_own_wkt_polygon) # Remove any old outputs to not run out of disk. for f in glob.glob('/tmp/open_buildings_*'): os.remove(f) # Write header to the compressed CSV file. with gzip.open(f'/tmp/{filename}', 'wt') as merged: merged.write(','.join([ 'latitude', 'longitude', 'area_in_meters', 'confidence', 'geometry', 'full_plus_code' ]) + '\n') download_s2_token_fn = functools.partial(download_s2_token, region_df=region_df) s2_tokens = get_bounding_box_s2_covering_tokens(region_df.iloc[0].geometry) # Downloads CSV files for relevant S2 tokens and after filtering appends them # to the compressed output CSV file. Relies on the fact that concatenating # gzipped files produces a valid gzip file. # NOTE: Uses a pool to speed up output preparation. with open(f'/tmp/{filename}', 'ab') as merged: with multiprocessing.Pool(4) as e: for fname in tqdm.notebook.tqdm( e.imap_unordered(download_s2_token_fn, s2_tokens), total=len(s2_tokens)): if fname: with open(fname, 'rb') as tmp_f: shutil.copyfileobj(tmp_f, merged) os.unlink(fname) buildings = pd.read_csv( f"/tmp/{filename}", engine="c", usecols=['latitude', 'longitude', 'area_in_meters', 'confidence']) print(f"Read {len(buildings):,} records.") sample_size = 200000 #@param buildings_sample = (buildings.sample(sample_size) if len(buildings) > sample_size else buildings) plt.plot(buildings_sample.longitude, buildings_sample.latitude, 'k.', alpha=0.25, markersize=0.5) plt.gcf().set_size_inches(10, 10) plt.xlabel('Longitude') plt.ylabel('Latitude') plt.axis('equal'); max_grid_dimension = 1000 #@param confidence_threshold = 0.75 #@param buildings = buildings.query(f"confidence > {confidence_threshold}") # Create a grid covering the dataset bounds min_lon = buildings.longitude.min() max_lon = buildings.longitude.max() min_lat = buildings.latitude.min() max_lat = buildings.latitude.max() grid_density_degrees = (max(max_lon - min_lon, max_lat - min_lat) / max_grid_dimension) bounds = [min_lon, min_lat, max_lon, max_lat] xcoords = np.arange(min_lon, max_lon, grid_density_degrees) ycoords = np.arange(max_lat, min_lat, -grid_density_degrees) xv, yv = np.meshgrid(xcoords, ycoords) xy = np.stack([xv.ravel(), yv.ravel()]).transpose() print(f"Calculated grid of size {xv.shape[0]} x {xv.shape[1]}.") geotransform = (min_lon, grid_density_degrees, 0, max_lat, 0, -grid_density_degrees) def lonlat_to_xy(lon, lat, geotransform): x = int((lon - geotransform[0])/geotransform[1]) y = int((lat - geotransform[3])/geotransform[5]) return x,y counts = np.zeros(xv.shape) area_totals = np.zeros(xv.shape) for lat, lon, area in tqdm.notebook.tqdm( zip(buildings.latitude, buildings.longitude, buildings.area_in_meters)): x, y = lonlat_to_xy(lon, lat, geotransform) if x >= 0 and y >= 0 and x < len(xcoords) and y < len(ycoords): counts[y, x] += 1 area_totals[y, x] += area area_totals[counts == 0] = np.nan counts[counts == 0] = np.nan mean_area = area_totals / counts plt.imshow(np.log10(np.nan_to_num(counts) + 1.), cmap="viridis") plt.gcf().set_size_inches(15, 15) cbar = plt.colorbar(shrink=0.5) cbar.ax.set_yticklabels([f'{x:.0f}' for x in 10 ** cbar.ax.get_yticks()]) plt.title("Building counts per grid cell"); def save_geotiff(filename, values, geotransform): driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(filename, values.shape[1], values.shape[0], 1, gdal.GDT_Float32) dataset.SetGeoTransform(geotransform) band = dataset.GetRasterBand(1) band.WriteArray(values) band.SetNoDataValue(-1) dataset.FlushCache() filename = "building_counts.tiff" save_geotiff(filename, counts, geotransform) files.download(filename) # Only calculate the mean building size for grid locations with at # least a few buildings, so that we get more reliable averages. mean_area_filtered = mean_area.copy() mean_area_filtered[counts < 10] = 0 # Set a maximum value for the colour scale, to make the plot brighter. plt.imshow(np.nan_to_num(mean_area_filtered), vmax=250, cmap="viridis") plt.title("Mean building size (m$^2$)") plt.colorbar(shrink=0.5, extend="max") plt.gcf().set_size_inches(15, 15) health_sites = pd.read_csv("https://data.humdata.org/dataset/364c5aca-7cd7-4248-b394-335113293c7a/" "resource/b7e55f34-9e3b-417f-b329-841cff6a9554/download/ghana.csv") health_sites = gpd.GeoDataFrame( health_sites, geometry=gpd.points_from_xy(health_sites.X, health_sites.Y)) health_sites.head() health_sites = health_sites[['X', 'Y', 'amenity', 'name', 'geometry']] health_sites.dropna(axis=0, inplace=True) health_sites = health_sites[health_sites['amenity'].isin(['hospital','clinic','health_post', 'doctors'])] health_sites = health_sites.query( f'Y > {min_lat} and Y < {max_lat}' f'and X > {min_lon} and X < {max_lon}') health_sites.head() plt.plot(buildings_sample.longitude, buildings_sample.latitude, 'k.', alpha=0.25, markersize=0.5) plt.plot(health_sites.X, health_sites.Y, marker='$\\oplus$', color= 'red', alpha = 0.8, markersize=10, linestyle='None') plt.gcf().set_size_inches(10, 10) plt.xlabel('Longitude') plt.ylabel('Latitude') plt.legend(['Building', 'Health center']) plt.axis('equal'); buildings_sample = gpd.GeoDataFrame(buildings_sample, geometry=gpd.points_from_xy(buildings_sample.longitude, buildings_sample.latitude)) buildings_sample["distance_to_nearest_health_facility"] = buildings_sample.geometry.apply( lambda g: health_sites.distance(g).min()) buildings_sample.head() buildings_sample["distance_to_nearest_health_facility"] *= 111.32 !wget https://data.humdata.org/dataset/dc4c17cf-59d9-478c-b2b7-acd889241194/resource/4443ddba-eeaf-4367-9457-7820ea482f7f/download/gha_admbnda_gss_20210308_shp.zip !unzip gha_admbnda_gss_20210308_shp.zip display.clear_output() admin_areas = gpd.read_file("gha_admbnda_gss_20210308_SHP/gha_admbnda_adm2_gss_20210308.shp") # Both data frames have the same coordinate system. buildings_sample.crs = admin_areas.crs # Spatial join to find out which administrative area every building is in. points_polys = gpd.sjoin(buildings_sample, admin_areas, how="left") # Aggregate by admin area to get the average distance to nearest health facility. stats = points_polys.groupby("index_right")["distance_to_nearest_health_facility"].agg(["mean"]) admin_areas_with_distances = gpd.GeoDataFrame(stats.join(admin_areas)) admin_areas_with_distances.plot( column="mean", legend=True, legend_kwds={"shrink": 0.5}) plt.title("Average distance to the nearest health facility (km)") plt.gcf().set_size_inches(15, 15) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Results Step2: However, this method isn't very fast and can only approximate Pi, never truly compute the exact value.
<ASSISTANT_TASK:> Python Code: plot_approximation() print("Pi was approximated at %.5f, when the real value is %.5f..." % (best_pi_approximation, real_pi_value)) plot_approximation_evolution_graph() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: In the above picture you can see that there exists multiple lines that offer a solution to the problem. Is any of them better than the others? We can intuitively define a criterion to estimate the worth of the lines Step2: In machine learning, support vector machines (SVMs) are supervised learning models with associated learning algorithms that analyze data and recognize patterns, used for classification and regression analysis. Given a set of training examples, each marked for belonging to one of two categories, an SVM training algorithm builds a model that assigns new examples into one category or the other, making it a non-probabilistic binary linear classifier. An SVM model is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall on. Step3: First we'll start by importing the Data set we are already very familiar with the Iris Data Set from last lecture Step4: Now we will import the SVC (Support Vector Classification) from the SVM library of Sci Kit Learn, I encourage you to check out the other types of SVM options in the Sci Kit Learn Documentation! Step5: Now we will split the data into a training set and a testing set and then train our model. Step6: Now we'll go ahead and see how well our model did! Step7: Looks like we have achieved a 100% accuracy with Support Vector Classification! Step8: The four methods we will explore are two linear models, a Gaussian Radial Basis Function,and a SVC with a polynomial (3rd Degree) kernel. Step9: Now that we have fitted the four models, we will go ahead and begin the process of setting up the visual plots. Note Step10: Now the plot titles Step11: Finally we will go through each model, set its position as a subplot, then scatter the data points and draw a countour of the decision boundaries.
<ASSISTANT_TASK:> Python Code: from IPython.display import Image Image(url="http://docs.opencv.org/2.4/_images/separating-lines.png") Image(url="http://docs.opencv.org/2.4/_images/optimal-hyperplane.png") #Imports import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn import datasets # load the iris datasets iris = datasets.load_iris() # Grab features (X) and the Target (Y) X = iris.data Y = iris.target # Show the Built-in Data Description print iris.DESCR # Support Vector Machine Imports from sklearn.svm import SVC # Fit a SVM model to the data model = SVC() from sklearn.cross_validation import train_test_split # Split the data into Trainging and Testing sets X_train, X_test, Y_train, Y_test = train_test_split(X, Y) # Fit the model model.fit(X_train,Y_train) from sklearn import metrics # Get predictions predicted = model.predict(X_test) expected = Y_test # Compare results print metrics.accuracy_score(expected,predicted) # Kernel Trick for the Feature Space from IPython.display import Image url='http://i.imgur.com/WuxyO.png' Image(url) # Kernel Trick Visualization from IPython.display import YouTubeVideo YouTubeVideo('3liCbRZPrZA') # Import all SVM from sklearn import svm # We'll use all the data and not bother with a split between training and testing. We'll also only use two features. X = iris.data[:,:2] Y = iris.target # SVM regularization parameter C = 1.0 # SVC with a Linear Kernel (our original example) svc = svm.SVC(kernel='linear', C=C).fit(X, Y) # Gaussian Radial Bassis Function rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y) # SVC with 3rd degree poynomial poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y) # SVC Linear lin_svc = svm.LinearSVC(C=C).fit(X,Y) # Set the step size h = 0.02 # X axis min and max x_min=X[:, 0].min() - 1 x_max =X[:, 0].max() + 1 # Y axis min and max y_min = X[:, 1].min() - 1 y_max = X[:, 1].max() + 1 # Finally, numpy can create a meshgrid xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h)) # title for the plots titles = ['SVC with linear kernel', 'LinearSVC (linear kernel)', 'SVC with RBF kernel', 'SVC with polynomial (degree 3) kernel'] # Use enumerate for a count for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.figure(figsize=(15,15)) # Set the subplot position (Size = 2 by 2, position deifined by i count plt.subplot(2, 2, i + 1) # SUbplot spacing plt.subplots_adjust(wspace=0.4, hspace=0.4) # Define Z as the prediction, not the use of ravel to format the arrays Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) # Contour plot (filled with contourf) plt.contourf(xx, yy, Z, cmap=plt.cm.terrain, alpha=0.5) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Dark2) # Labels and Titles plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title(titles[i]) plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Please set the variable parent_directory below to change the directory to which the repositories are cloned. Step2: You might need to install an updated version of cmake to be able to compile the source code. Please make sure that the binary poincare_embedding has been created before proceeding by verifying the above cell does not raise an error. Step3: 2. Training Step6: 2.2 Training C++ embeddings Step8: 2.3 Training numpy embeddings (non-gensim) Step10: 2.4 Training gensim embeddings Step17: 3. Loading the embeddings Step20: 4. Evaluation Step21: 4.1 WordNet reconstruction Step24: Results from the paper - Step25: 4.2.2 Training models Step26: 4.2.3 Evaluating models Step27: Results from the paper -
<ASSISTANT_TASK:> Python Code: %cd ../.. # Some libraries need to be installed that are not part of Gensim ! pip install click>=6.7 nltk>=3.2.5 prettytable>=0.7.2 pygtrie>=2.2 import csv from collections import OrderedDict from IPython.display import display, HTML import logging import os import pickle import random import re import click from gensim.models.poincare import PoincareModel, PoincareRelations, \ ReconstructionEvaluation, LinkPredictionEvaluation, \ LexicalEntailmentEvaluation, PoincareKeyedVectors from gensim.utils import check_output import nltk from prettytable import PrettyTable from smart_open import smart_open logging.basicConfig(level=logging.INFO) nltk.download('wordnet') %cd docs/notebooks/ current_directory = os.getcwd() # Change this variable to `False` to not remove and re-download repos for external implementations force_setup = False # The poincare datasets, models and source code for external models are downloaded to this directory parent_directory = os.path.join(current_directory, 'poincare') ! mkdir -p {parent_directory} %cd {parent_directory} # Clone repos np_repo_name = 'poincare-np-embedding' if force_setup and os.path.exists(np_repo_name): ! rm -rf {np_repo_name} clone_np_repo = not os.path.exists(np_repo_name) if clone_np_repo: ! git clone https://github.com/nishnik/poincare_embeddings.git {np_repo_name} cpp_repo_name = 'poincare-cpp-embedding' if force_setup and os.path.exists(cpp_repo_name): ! rm -rf {cpp_repo_name} clone_cpp_repo = not os.path.exists(cpp_repo_name) if clone_cpp_repo: ! git clone https://github.com/TatsuyaShirakawa/poincare-embedding.git {cpp_repo_name} patches_applied = False # Apply patches if clone_cpp_repo and not patches_applied: %cd {cpp_repo_name} ! git apply ../poincare_burn_in_eps.patch if clone_np_repo and not patches_applied: %cd ../{np_repo_name} ! git apply ../poincare_numpy.patch patches_applied = True # Compile the code for the external c++ implementation into a binary %cd {parent_directory}/{cpp_repo_name} !mkdir -p work %cd work !cmake .. !make %cd {current_directory} cpp_binary_path = os.path.join(parent_directory, cpp_repo_name, 'work', 'poincare_embedding') assert(os.path.exists(cpp_binary_path)), 'Binary file doesnt exist at %s' % cpp_binary_path # These directories are auto created in the current directory for storing poincare datasets and models data_directory = os.path.join(parent_directory, 'data') models_directory = os.path.join(parent_directory, 'models') # Create directories ! mkdir -p {data_directory} ! mkdir -p {models_directory} # Prepare the WordNet data # Can also be downloaded directly from - # https://github.com/jayantj/gensim/raw/wordnet_data/docs/notebooks/poincare/data/wordnet_noun_hypernyms.tsv wordnet_file = os.path.join(data_directory, 'wordnet_noun_hypernyms.tsv') if not os.path.exists(wordnet_file): ! python {parent_directory}/{cpp_repo_name}/scripts/create_wordnet_noun_hierarchy.py {wordnet_file} # Prepare the HyperLex data hyperlex_url = "http://people.ds.cam.ac.uk/iv250/paper/hyperlex/hyperlex-data.zip" ! wget {hyperlex_url} -O {data_directory}/hyperlex-data.zip if os.path.exists(os.path.join(data_directory, 'hyperlex')): ! rm -r {data_directory}/hyperlex ! unzip {data_directory}/hyperlex-data.zip -d {data_directory}/hyperlex/ hyperlex_file = os.path.join(data_directory, 'hyperlex', 'nouns-verbs', 'hyperlex-nouns.txt') def train_cpp_model( binary_path, data_file, output_file, dim, epochs, neg, num_threads, epsilon, burn_in, seed=0): Train a poincare embedding using the c++ implementation Args: binary_path (str): Path to the compiled c++ implementation binary data_file (str): Path to tsv file containing relation pairs output_file (str): Path to output file containing model dim (int): Number of dimensions of the trained model epochs (int): Number of epochs to use neg (int): Number of negative samples to use num_threads (int): Number of threads to use for training the model epsilon (float): Constant used for clipping below a norm of one burn_in (int): Number of epochs to use for burn-in init (0 means no burn-in) Notes: If `output_file` already exists, skips training if os.path.exists(output_file): print('File %s exists, skipping' % output_file) return args = { 'dim': dim, 'max_epoch': epochs, 'neg_size': neg, 'num_thread': num_threads, 'epsilon': epsilon, 'burn_in': burn_in, 'learning_rate_init': 0.1, 'learning_rate_final': 0.0001, } cmd = [binary_path, data_file, output_file] for option, value in args.items(): cmd.append("--%s" % option) cmd.append(str(value)) return check_output(args=cmd) model_sizes = [5, 10, 20, 50, 100, 200] default_params = { 'neg': 20, 'epochs': 50, 'threads': 8, 'eps': 1e-6, 'burn_in': 0, 'batch_size': 10, 'reg': 0.0 } non_default_params = { 'neg': [10], 'epochs': [200], 'burn_in': [10] } def cpp_model_name_from_params(params, prefix): param_keys = ['burn_in', 'epochs', 'neg', 'eps', 'threads'] name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)] return '%s_%s' % (prefix, '_'.join(name)) def train_model_with_params(params, train_file, model_sizes, prefix, implementation): Trains models with given params for multiple model sizes using the given implementation Args: params (dict): parameters to train the model with train_file (str): Path to tsv file containing relation pairs model_sizes (list): list of dimension sizes (integer) to train the model with prefix (str): prefix to use for the saved model filenames implementation (str): whether to use the numpy or c++ implementation, allowed values: 'numpy', 'c++' Returns: tuple (model_name, model_files) model_files is a dict of (size, filename) pairs Example: ('cpp_model_epochs_50', {5: 'models/cpp_model_epochs_50_dim_5'}) files = {} if implementation == 'c++': model_name = cpp_model_name_from_params(params, prefix) elif implementation == 'numpy': model_name = np_model_name_from_params(params, prefix) elif implementation == 'gensim': model_name = gensim_model_name_from_params(params, prefix) else: raise ValueError('Given implementation %s not found' % implementation) for model_size in model_sizes: output_file_name = '%s_dim_%d' % (model_name, model_size) output_file = os.path.join(models_directory, output_file_name) print('Training model %s of size %d' % (model_name, model_size)) if implementation == 'c++': out = train_cpp_model( cpp_binary_path, train_file, output_file, model_size, params['epochs'], params['neg'], params['threads'], params['eps'], params['burn_in'], seed=0) elif implementation == 'numpy': train_external_numpy_model( python_script_path, train_file, output_file, model_size, params['epochs'], params['neg'], seed=0) elif implementation == 'gensim': train_gensim_model( train_file, output_file, model_size, params['epochs'], params['neg'], params['burn_in'], params['batch_size'], params['reg'], seed=0) else: raise ValueError('Given implementation %s not found' % implementation) files[model_size] = output_file return (model_name, files) model_files = {} model_files['c++'] = {} # Train c++ models with default params model_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'cpp_model', 'c++') model_files['c++'][model_name] = {} for dim, filepath in files.items(): model_files['c++'][model_name][dim] = filepath # Train c++ models with non-default params for param, values in non_default_params.items(): params = default_params.copy() for value in values: params[param] = value model_name, files = train_model_with_params(params, wordnet_file, model_sizes, 'cpp_model', 'c++') model_files['c++'][model_name] = {} for dim, filepath in files.items(): model_files['c++'][model_name][dim] = filepath python_script_path = os.path.join(parent_directory, np_repo_name, 'poincare.py') def np_model_name_from_params(params, prefix): param_keys = ['neg', 'epochs'] name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)] return '%s_%s' % (prefix, '_'.join(name)) def train_external_numpy_model( script_path, data_file, output_file, dim, epochs, neg, seed=0): Train a poincare embedding using an external numpy implementation Args: script_path (str): Path to the Python training script data_file (str): Path to tsv file containing relation pairs output_file (str): Path to output file containing model dim (int): Number of dimensions of the trained model epochs (int): Number of epochs to use neg (int): Number of negative samples to use Notes: If `output_file` already exists, skips training if os.path.exists(output_file): print('File %s exists, skipping' % output_file) return args = { 'input-file': data_file, 'output-file': output_file, 'dimensions': dim, 'epochs': epochs, 'learning-rate': 0.01, 'num-negative': neg, } cmd = ['python', script_path] for option, value in args.items(): cmd.append("--%s" % option) cmd.append(str(value)) return check_output(args=cmd) model_files['numpy'] = {} # Train models with default params model_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'np_model', 'numpy') model_files['numpy'][model_name] = {} for dim, filepath in files.items(): model_files['numpy'][model_name][dim] = filepath def gensim_model_name_from_params(params, prefix): param_keys = ['neg', 'epochs', 'burn_in', 'batch_size', 'reg'] name = ['%s_%s' % (key, params[key]) for key in sorted(param_keys)] return '%s_%s' % (prefix, '_'.join(name)) def train_gensim_model( data_file, output_file, dim, epochs, neg, burn_in, batch_size, reg, seed=0): Train a poincare embedding using gensim implementation Args: data_file (str): Path to tsv file containing relation pairs output_file (str): Path to output file containing model dim (int): Number of dimensions of the trained model epochs (int): Number of epochs to use neg (int): Number of negative samples to use burn_in (int): Number of epochs to use for burn-in initialization batch_size (int): Size of batch to use for training reg (float): Coefficient used for l2-regularization while training Notes: If `output_file` already exists, skips training if os.path.exists(output_file): print('File %s exists, skipping' % output_file) return train_data = PoincareRelations(data_file) model = PoincareModel(train_data, size=dim, negative=neg, burn_in=burn_in, regularization_coeff=reg) model.train(epochs=epochs, batch_size=batch_size) model.save(output_file) non_default_params_gensim = [ {'neg': 10,}, {'burn_in': 10,}, {'batch_size': 50,}, {'neg': 10, 'reg': 1, 'burn_in': 10, 'epochs': 200}, ] model_files['gensim'] = {} # Train models with default params model_name, files = train_model_with_params(default_params, wordnet_file, model_sizes, 'gensim_model', 'gensim') model_files['gensim'][model_name] = {} for dim, filepath in files.items(): model_files['gensim'][model_name][dim] = filepath # Train models with non-default params for new_params in non_default_params_gensim: params = default_params.copy() params.update(new_params) model_name, files = train_model_with_params(params, wordnet_file, model_sizes, 'gensim_model', 'gensim') model_files['gensim'][model_name] = {} for dim, filepath in files.items(): model_files['gensim'][model_name][dim] = filepath def transform_cpp_embedding_to_kv(input_file, output_file, encoding='utf8'): Given a C++ embedding tsv filepath, converts it to a KeyedVector-supported file with smart_open(input_file, 'rb') as f: lines = [line.decode(encoding) for line in f] if not len(lines): raise ValueError("file is empty") first_line = lines[0] parts = first_line.rstrip().split("\t") model_size = len(parts) - 1 vocab_size = len(lines) with smart_open(output_file, 'w') as f: f.write('%d %d\n' % (vocab_size, model_size)) for line in lines: f.write(line.replace('\t', ' ')) def transform_numpy_embedding_to_kv(input_file, output_file, encoding='utf8'): Given a numpy poincare embedding pkl filepath, converts it to a KeyedVector-supported file np_embeddings = pickle.load(open(input_file, 'rb')) random_embedding = np_embeddings[list(np_embeddings.keys())[0]] model_size = random_embedding.shape[0] vocab_size = len(np_embeddings) with smart_open(output_file, 'w') as f: f.write('%d %d\n' % (vocab_size, model_size)) for key, vector in np_embeddings.items(): vector_string = ' '.join('%.6f' % value for value in vector) f.write('%s %s\n' % (key, vector_string)) def load_poincare_cpp(input_filename): Load embedding trained via C++ Poincare model. Parameters ---------- filepath : str Path to tsv file containing embedding. Returns ------- PoincareKeyedVectors instance. keyed_vectors_filename = input_filename + '.kv' transform_cpp_embedding_to_kv(input_filename, keyed_vectors_filename) embedding = PoincareKeyedVectors.load_word2vec_format(keyed_vectors_filename) os.unlink(keyed_vectors_filename) return embedding def load_poincare_numpy(input_filename): Load embedding trained via Python numpy Poincare model. Parameters ---------- filepath : str Path to pkl file containing embedding. Returns: PoincareKeyedVectors instance. keyed_vectors_filename = input_filename + '.kv' transform_numpy_embedding_to_kv(input_filename, keyed_vectors_filename) embedding = PoincareKeyedVectors.load_word2vec_format(keyed_vectors_filename) os.unlink(keyed_vectors_filename) return embedding def load_poincare_gensim(input_filename): Load embedding trained via Gensim PoincareModel. Parameters ---------- filepath : str Path to model file. Returns: PoincareKeyedVectors instance. model = PoincareModel.load(input_filename) return model.kv def load_model(implementation, model_file): Convenience function over functions to load models from different implementations. Parameters ---------- implementation : str Implementation used to create model file ('c++'/'numpy'/'gensim'). model_file : str Path to model file. Returns ------- PoincareKeyedVectors instance Notes ----- Raises ValueError in case of invalid value for `implementation` if implementation == 'c++': return load_poincare_cpp(model_file) elif implementation == 'numpy': return load_poincare_numpy(model_file) elif implementation == 'gensim': return load_poincare_gensim(model_file) else: raise ValueError('Invalid implementation %s' % implementation) def display_results(task_name, results): Display evaluation results of multiple embeddings on a single task in a tabular format Args: task_name (str): name the task being evaluated results (dict): mapping between embeddings and corresponding results result_table = PrettyTable() result_table.field_names = ["Model Description", "Metric"] + [str(dim) for dim in sorted(model_sizes)] for model_name, model_results in results.items(): metrics = [metric for metric in model_results.keys()] dims = sorted([dim for dim in model_results[metrics[0]].keys()]) description = model_description_from_name(model_name) row = [description, '\n'.join(metrics) + '\n'] for dim in dims: scores = ['%.2f' % model_results[metric][dim] for metric in metrics] row.append('\n'.join(scores)) result_table.add_row(row) result_table.align = 'r' result_html = result_table.get_html_string() search = "<table>" insert_at = result_html.index(search) + len(search) new_row = <tr> <th colspan="1" style="text-align:left">%s</th> <th colspan="1"></th> <th colspan="%d" style="text-align:center"> Dimensions</th> </tr> % (task_name, len(model_sizes)) result_html = result_html[:insert_at] + new_row + result_html[insert_at:] display(HTML(result_html)) def model_description_from_name(model_name): if model_name.startswith('gensim'): implementation = 'Gensim' elif model_name.startswith('cpp'): implementation = 'C++' elif model_name.startswith('np'): implementation = 'Numpy' else: raise ValueError('Unsupported implementation for model: %s' % model_name) description = [] for param_key in sorted(default_params.keys()): pattern = '%s_([^_]*)_?' % param_key match = re.search(pattern, model_name) if match: description.append("%s=%s" % (param_key, match.groups()[0])) return "%s: %s" % (implementation, ", ".join(description)) reconstruction_results = OrderedDict() metrics = ['mean_rank', 'MAP'] for implementation, models in sorted(model_files.items()): for model_name, files in models.items(): if model_name in reconstruction_results: continue reconstruction_results[model_name] = OrderedDict() for metric in metrics: reconstruction_results[model_name][metric] = {} for model_size, model_file in files.items(): print('Evaluating model %s of size %d' % (model_name, model_size)) embedding = load_model(implementation, model_file) eval_instance = ReconstructionEvaluation(wordnet_file, embedding) eval_result = eval_instance.evaluate(max_n=1000) for metric in metrics: reconstruction_results[model_name][metric][model_size] = eval_result[metric] display_results('WordNet Reconstruction', reconstruction_results) def train_test_split(data_file, test_ratio=0.1): Creates train and test files from given data file, returns train/test file names Args: data_file (str): path to data file for which train/test split is to be created test_ratio (float): fraction of lines to be used for test data Returns (train_file, test_file): tuple of strings with train file and test file paths train_filename = data_file + '.train' test_filename = data_file + '.test' if os.path.exists(train_filename) and os.path.exists(test_filename): print('Train and test files already exist, skipping') return (train_filename, test_filename) root_nodes, leaf_nodes = get_root_and_leaf_nodes(data_file) test_line_candidates = [] line_count = 0 all_nodes = set() with smart_open(data_file, 'rb') as f: for i, line in enumerate(f): node_1, node_2 = line.split() all_nodes.update([node_1, node_2]) if ( node_1 not in leaf_nodes and node_2 not in leaf_nodes and node_1 not in root_nodes and node_2 not in root_nodes and node_1 != node_2 ): test_line_candidates.append(i) line_count += 1 num_test_lines = int(test_ratio * line_count) if num_test_lines > len(test_line_candidates): raise ValueError('Not enough candidate relations for test set') print('Choosing %d test lines from %d candidates' % (num_test_lines, len(test_line_candidates))) test_line_indices = set(random.sample(test_line_candidates, num_test_lines)) train_line_indices = set(l for l in range(line_count) if l not in test_line_indices) train_set_nodes = set() with smart_open(data_file, 'rb') as f: train_file = smart_open(train_filename, 'wb') test_file = smart_open(test_filename, 'wb') for i, line in enumerate(f): if i in train_line_indices: train_set_nodes.update(line.split()) train_file.write(line) elif i in test_line_indices: test_file.write(line) else: raise AssertionError('Line %d not present in either train or test line indices' % i) train_file.close() test_file.close() assert len(train_set_nodes) == len(all_nodes), 'Not all nodes from dataset present in train set relations' return (train_filename, test_filename) def get_root_and_leaf_nodes(data_file): Return keys of root and leaf nodes from a file with transitive closure relations Args: data_file(str): file path containing transitive closure relations Returns: (root_nodes, leaf_nodes) - tuple containing keys of root and leaf nodes root_candidates = set() leaf_candidates = set() with smart_open(data_file, 'rb') as f: for line in f: nodes = line.split() root_candidates.update(nodes) leaf_candidates.update(nodes) with smart_open(data_file, 'rb') as f: for line in f: node_1, node_2 = line.split() if node_1 == node_2: continue leaf_candidates.discard(node_1) root_candidates.discard(node_2) return (leaf_candidates, root_candidates) wordnet_train_file, wordnet_test_file = train_test_split(wordnet_file) # Training models for link prediction lp_model_files = {} lp_model_files['c++'] = {} # Train c++ models with default params model_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'cpp_lp_model', 'c++') lp_model_files['c++'][model_name] = {} for dim, filepath in files.items(): lp_model_files['c++'][model_name][dim] = filepath # Train c++ models with non-default params for param, values in non_default_params.items(): params = default_params.copy() for value in values: params[param] = value model_name, files = train_model_with_params(params, wordnet_train_file, model_sizes, 'cpp_lp_model', 'c++') lp_model_files['c++'][model_name] = {} for dim, filepath in files.items(): lp_model_files['c++'][model_name][dim] = filepath lp_model_files['numpy'] = {} # Train numpy models with default params model_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'np_lp_model', 'numpy') lp_model_files['numpy'][model_name] = {} for dim, filepath in files.items(): lp_model_files['numpy'][model_name][dim] = filepath lp_model_files['gensim'] = {} # Train models with default params model_name, files = train_model_with_params(default_params, wordnet_train_file, model_sizes, 'gensim_lp_model', 'gensim') lp_model_files['gensim'][model_name] = {} for dim, filepath in files.items(): lp_model_files['gensim'][model_name][dim] = filepath # Train models with non-default params for new_params in non_default_params_gensim: params = default_params.copy() params.update(new_params) model_name, files = train_model_with_params(params, wordnet_file, model_sizes, 'gensim_lp_model', 'gensim') lp_model_files['gensim'][model_name] = {} for dim, filepath in files.items(): lp_model_files['gensim'][model_name][dim] = filepath lp_results = OrderedDict() metrics = ['mean_rank', 'MAP'] for implementation, models in sorted(lp_model_files.items()): for model_name, files in models.items(): lp_results[model_name] = OrderedDict() for metric in metrics: lp_results[model_name][metric] = {} for model_size, model_file in files.items(): print('Evaluating model %s of size %d' % (model_name, model_size)) embedding = load_model(implementation, model_file) eval_instance = LinkPredictionEvaluation(wordnet_train_file, wordnet_test_file, embedding) eval_result = eval_instance.evaluate(max_n=1000) for metric in metrics: lp_results[model_name][metric][model_size] = eval_result[metric] display_results('WordNet Link Prediction', lp_results) entailment_results = OrderedDict() eval_instance = LexicalEntailmentEvaluation(hyperlex_file) for implementation, models in sorted(model_files.items()): for model_name, files in models.items(): if model_name in entailment_results: continue entailment_results[model_name] = OrderedDict() entailment_results[model_name]['spearman'] = {} for model_size, model_file in files.items(): print('Evaluating model %s of size %d' % (model_name, model_size)) embedding = load_model(implementation, model_file) entailment_results[model_name]['spearman'][model_size] = eval_instance.evaluate_spearman(embedding) display_results('Lexical Entailment (HyperLex)', entailment_results) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Authenticate your GCP account Step2: Create a Cloud Storage bucket Step3: Only if your bucket doesn't already exist Step4: Finally, validate access to your Cloud Storage bucket by examining its contents Step5: Import libraries and define constants Step6: Create a dataset Step7: Cloud Run Step8: Local training snippet Step9: Cleaning up
<ASSISTANT_TASK:> Python Code: PROJECT_ID = "[your-project-id]" #@param {type:"string"} ! gcloud config set project $PROJECT_ID import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. if 'google.colab' in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. else: %env GOOGLE_APPLICATION_CREDENTIALS '' BUCKET_NAME = "[your-bucket-name]" #@param {type:"string"} REGION = 'us-central1' #@param {type:"string"} ! gsutil mb -l $REGION gs://$BUCKET_NAME ! gsutil ls -al gs://$BUCKET_NAME from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time import pandas as pd import tensorflow as tf from IPython.core.display import HTML bh = tf.keras.datasets.boston_housing (X_train, y_train), (X_eval, y_eval) = bh.load_data() training = pd.DataFrame(X_train) training['target'] = y_train validation = pd.DataFrame(X_eval) validation['target'] = y_eval print('Data head:') display(training.head(2)) data = os.path.join('gs://', BUCKET_NAME, 'data.csv') print('Copy the data in bucket ...') with tf.io.gfile.GFile(data, 'w') as f: training.append(validation).to_csv(f, index=False) output_location = os.path.join('gs://', BUCKET_NAME, 'output') job_name = "data_inspection_{}".format(time.strftime("%Y%m%d%H%M%S")) !gcloud ai-platform jobs submit training $job_name \ --master-image-uri gcr.io/aihub-c2t-containers/kfp-components/oob_algorithm/tabular_data_inspection:latest \ --region $REGION \ --scale-tier CUSTOM \ --master-machine-type standard \ -- \ --output-location {output_location} \ --data {data} \ --data-type csv if not tf.io.gfile.exists(os.path.join(output_location, 'report.html')): raise RuntimeError('The file report.html was not found. Did the training job finish?') with tf.io.gfile.GFile(os.path.join(output_location, 'report.html')) as f: display(HTML(f.read())) # If training job is still running, cancel it ! gcloud ai-platform jobs cancel $job_name --quiet # Delete Cloud Storage objects that were created ! gsutil -m rm -r $BUCKET_NAME <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Loading data from files Step2: Plot the data Step3: The iris dataset consists of 50 samples each from three species of Iris (<em>Iris setosa</em>, <em>Iris virginica</em> and <em>Iris versicolor</em>), for 150 total samples. We have four features (sepal length & width, petal length & width) and three unique labels Step4: <div class="alert alert-success"><strong>NOTE Step5: <div class="alert alert-info"><strong>NOTE Step6: Once we have a dataset we can wrap it with a DataLoader. This gives us a powerful sampler that provides single- or multi-process iterators over the dataset.
<ASSISTANT_TASK:> Python Code: import torch import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline df = pd.read_csv('../Data/iris.csv') df.head() df.shape fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10,7)) fig.tight_layout() plots = [(0,1),(2,3),(0,2),(1,3)] colors = ['b', 'r', 'g'] labels = ['Iris setosa','Iris virginica','Iris versicolor'] for i, ax in enumerate(axes.flat): for j in range(3): x = df.columns[plots[i][0]] y = df.columns[plots[i][1]] ax.scatter(df[df['target']==j][x], df[df['target']==j][y], color=colors[j]) ax.set(xlabel=x, ylabel=y) fig.legend(labels=labels, loc=3, bbox_to_anchor=(1.0,0.85)) plt.show() from sklearn.model_selection import train_test_split train_X, test_X, train_y, test_y = train_test_split(df.drop('target',axis=1).values, df['target'].values, test_size=0.2, random_state=33) X_train = torch.FloatTensor(train_X) X_test = torch.FloatTensor(test_X) y_train = torch.LongTensor(train_y).reshape(-1, 1) y_test = torch.LongTensor(test_y).reshape(-1, 1) print(f'Training size: {len(y_train)}') labels, counts = y_train.unique(return_counts=True) print(f'Labels: {labels}\nCounts: {counts}') X_train.size() y_train.size() from torch.utils.data import TensorDataset, DataLoader data = df.drop('target',axis=1).values labels = df['target'].values iris = TensorDataset(torch.FloatTensor(data),torch.LongTensor(labels)) len(iris) type(iris) for i in iris: print(i) iris_loader = DataLoader(iris, batch_size=105, shuffle=True) for i_batch, sample_batched in enumerate(iris_loader): print(i_batch, sample_batched) list(iris_loader)[0][1].bincount() next(iter(iris_loader)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Document Authors Step2: Document Contributors Step3: Document Publication Step4: Document Table of Contents Step5: 1.2. Model Name Step6: 1.3. Description Step7: 1.4. Land Atmosphere Flux Exchanges Step8: 1.5. Atmospheric Coupling Treatment Step9: 1.6. Land Cover Step10: 1.7. Land Cover Change Step11: 1.8. Tiling Step12: 2. Key Properties --&gt; Conservation Properties Step13: 2.2. Water Step14: 2.3. Carbon Step15: 3. Key Properties --&gt; Timestepping Framework Step16: 3.2. Time Step Step17: 3.3. Timestepping Method Step18: 4. Key Properties --&gt; Software Properties Step19: 4.2. Code Version Step20: 4.3. Code Languages Step21: 5. Grid Step22: 6. Grid --&gt; Horizontal Step23: 6.2. Matches Atmosphere Grid Step24: 7. Grid --&gt; Vertical Step25: 7.2. Total Depth Step26: 8. Soil Step27: 8.2. Heat Water Coupling Step28: 8.3. Number Of Soil layers Step29: 8.4. Prognostic Variables Step30: 9. Soil --&gt; Soil Map Step31: 9.2. Structure Step32: 9.3. Texture Step33: 9.4. Organic Matter Step34: 9.5. Albedo Step35: 9.6. Water Table Step36: 9.7. Continuously Varying Soil Depth Step37: 9.8. Soil Depth Step38: 10. Soil --&gt; Snow Free Albedo Step39: 10.2. Functions Step40: 10.3. Direct Diffuse Step41: 10.4. Number Of Wavelength Bands Step42: 11. Soil --&gt; Hydrology Step43: 11.2. Time Step Step44: 11.3. Tiling Step45: 11.4. Vertical Discretisation Step46: 11.5. Number Of Ground Water Layers Step47: 11.6. Lateral Connectivity Step48: 11.7. Method Step49: 12. Soil --&gt; Hydrology --&gt; Freezing Step50: 12.2. Ice Storage Method Step51: 12.3. Permafrost Step52: 13. Soil --&gt; Hydrology --&gt; Drainage Step53: 13.2. Types Step54: 14. Soil --&gt; Heat Treatment Step55: 14.2. Time Step Step56: 14.3. Tiling Step57: 14.4. Vertical Discretisation Step58: 14.5. Heat Storage Step59: 14.6. Processes Step60: 15. Snow Step61: 15.2. Tiling Step62: 15.3. Number Of Snow Layers Step63: 15.4. Density Step64: 15.5. Water Equivalent Step65: 15.6. Heat Content Step66: 15.7. Temperature Step67: 15.8. Liquid Water Content Step68: 15.9. Snow Cover Fractions Step69: 15.10. Processes Step70: 15.11. Prognostic Variables Step71: 16. Snow --&gt; Snow Albedo Step72: 16.2. Functions Step73: 17. Vegetation Step74: 17.2. Time Step Step75: 17.3. Dynamic Vegetation Step76: 17.4. Tiling Step77: 17.5. Vegetation Representation Step78: 17.6. Vegetation Types Step79: 17.7. Biome Types Step80: 17.8. Vegetation Time Variation Step81: 17.9. Vegetation Map Step82: 17.10. Interception Step83: 17.11. Phenology Step84: 17.12. Phenology Description Step85: 17.13. Leaf Area Index Step86: 17.14. Leaf Area Index Description Step87: 17.15. Biomass Step88: 17.16. Biomass Description Step89: 17.17. Biogeography Step90: 17.18. Biogeography Description Step91: 17.19. Stomatal Resistance Step92: 17.20. Stomatal Resistance Description Step93: 17.21. Prognostic Variables Step94: 18. Energy Balance Step95: 18.2. Tiling Step96: 18.3. Number Of Surface Temperatures Step97: 18.4. Evaporation Step98: 18.5. Processes Step99: 19. Carbon Cycle Step100: 19.2. Tiling Step101: 19.3. Time Step Step102: 19.4. Anthropogenic Carbon Step103: 19.5. Prognostic Variables Step104: 20. Carbon Cycle --&gt; Vegetation Step105: 20.2. Carbon Pools Step106: 20.3. Forest Stand Dynamics Step107: 21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis Step108: 22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration Step109: 22.2. Growth Respiration Step110: 23. Carbon Cycle --&gt; Vegetation --&gt; Allocation Step111: 23.2. Allocation Bins Step112: 23.3. Allocation Fractions Step113: 24. Carbon Cycle --&gt; Vegetation --&gt; Phenology Step114: 25. Carbon Cycle --&gt; Vegetation --&gt; Mortality Step115: 26. Carbon Cycle --&gt; Litter Step116: 26.2. Carbon Pools Step117: 26.3. Decomposition Step118: 26.4. Method Step119: 27. Carbon Cycle --&gt; Soil Step120: 27.2. Carbon Pools Step121: 27.3. Decomposition Step122: 27.4. Method Step123: 28. Carbon Cycle --&gt; Permafrost Carbon Step124: 28.2. Emitted Greenhouse Gases Step125: 28.3. Decomposition Step126: 28.4. Impact On Soil Properties Step127: 29. Nitrogen Cycle Step128: 29.2. Tiling Step129: 29.3. Time Step Step130: 29.4. Prognostic Variables Step131: 30. River Routing Step132: 30.2. Tiling Step133: 30.3. Time Step Step134: 30.4. Grid Inherited From Land Surface Step135: 30.5. Grid Description Step136: 30.6. Number Of Reservoirs Step137: 30.7. Water Re Evaporation Step138: 30.8. Coupled To Atmosphere Step139: 30.9. Coupled To Land Step140: 30.10. Quantities Exchanged With Atmosphere Step141: 30.11. Basin Flow Direction Map Step142: 30.12. Flooding Step143: 30.13. Prognostic Variables Step144: 31. River Routing --&gt; Oceanic Discharge Step145: 31.2. Quantities Transported Step146: 32. Lakes Step147: 32.2. Coupling With Rivers Step148: 32.3. Time Step Step149: 32.4. Quantities Exchanged With Rivers Step150: 32.5. Vertical Grid Step151: 32.6. Prognostic Variables Step152: 33. Lakes --&gt; Method Step153: 33.2. Albedo Step154: 33.3. Dynamics Step155: 33.4. Dynamic Lake Extent Step156: 33.5. Endorheic Basins Step157: 34. Lakes --&gt; Wetlands
<ASSISTANT_TASK:> Python Code: # DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-hm', 'land') # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "water" # "energy" # "carbon" # "nitrogen" # "phospherous" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_cover') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bare soil" # "urban" # "lake" # "land ice" # "lake ice" # "vegetated" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_cover_change') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.energy') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.water') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.horizontal.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.vertical.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.vertical.total_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_water_coupling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.number_of_soil layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.texture') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.organic_matter') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.water_table') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.soil_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.functions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation type" # "soil humidity" # "vegetation state" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "distinction between direct and diffuse albedo" # "no distinction between direct and diffuse albedo" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "perfect connectivity" # "Darcian flow" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Bucket" # "Force-restore" # "Choisnel" # "Explicit diffusion" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.drainage.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.drainage.types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Gravity drainage" # "Horton mechanism" # "topmodel-based" # "Dunne mechanism" # "Lateral subsurface flow" # "Baseflow from groundwater" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Force-restore" # "Explicit diffusion" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "soil moisture freeze-thaw" # "coupling with snow temperature" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.number_of_snow_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.density') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "constant" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.water_equivalent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.heat_content') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.temperature') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.liquid_water_content') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_cover_fractions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "ground snow fraction" # "vegetation snow fraction" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "snow interception" # "snow melting" # "snow freezing" # "blowing snow" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_albedo.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "prescribed" # "constant" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_albedo.functions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation type" # "snow age" # "snow density" # "snow grain type" # "aerosol deposition" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.dynamic_vegetation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation types" # "biome types" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "broadleaf tree" # "needleleaf tree" # "C3 grass" # "C4 grass" # "vegetated" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biome_types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "evergreen needleleaf forest" # "evergreen broadleaf forest" # "deciduous needleleaf forest" # "deciduous broadleaf forest" # "mixed forest" # "woodland" # "wooded grassland" # "closed shrubland" # "opne shrubland" # "grassland" # "cropland" # "wetlands" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_time_variation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed (not varying)" # "prescribed (varying from files)" # "dynamical (varying from simulation)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_map') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.interception') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.phenology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic (vegetation map)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.phenology_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.leaf_area_index') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prescribed" # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.leaf_area_index_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biomass') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biomass_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biogeography') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biogeography_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.stomatal_resistance') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "light" # "temperature" # "water availability" # "CO2" # "O3" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.evaporation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "alpha" # "beta" # "combined" # "Monteith potential evaporation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "transpiration" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "grand slam protocol" # "residence time" # "decay time" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "leaves + stems + roots" # "leaves + stems + roots (leafy + woody)" # "leaves + fine roots + coarse roots + stems" # "whole plant (no distinction)" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "function of vegetation type" # "function of plant allometry" # "explicitly calculated" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.grid_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.number_of_reservoirs') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.water_re_evaporation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "flood plains" # "irrigation" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.coupled_to_land') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "adapted for other periods" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.flooding') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "direct (large rivers)" # "diffuse" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.coupling_with_rivers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.vertical_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.ice_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.dynamics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "No lake dynamics" # "vertical" # "horizontal" # "Other: [Please specify]" # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.endorheic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.wetlands.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: The data were collected with an Elekta Neuromag VectorView system at 1000 Hz Step2: Data channel array consisted of 204 MEG planor gradiometers, Step3: The data have strong line frequency (60 Hz and harmonics) and cHPI coil Step4: Let's use Maxwell filtering to clean the data a bit. Step5: We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter. Step6: Now we epoch our data, average it, and look at the first dipole response. Step7: Let's do some dipole fits. The phantom is properly modeled by a single-shell Step8: Now we can compare to the actual locations, taking the difference in mm
<ASSISTANT_TASK:> Python Code: # Authors: Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import os.path as op import numpy as np import mne from mne import find_events, fit_dipole from mne.datasets.brainstorm import bst_phantom_elekta from mne.io import read_raw_fif print(__doc__) data_path = bst_phantom_elekta.data_path() raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif') raw = read_raw_fif(raw_fname) events = find_events(raw, 'STI201') raw.plot(events=events) raw.info['bads'] = ['MEG2421'] raw.plot_psd(tmax=60.) raw.fix_mag_coil_types() raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.)) raw.filter(None, 40., h_trans_bandwidth='auto', filter_length='auto', phase='zero', fir_window='hamming') raw.plot(events=events) tmin, tmax = -0.1, 0.1 event_id = list(range(1, 33)) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01), decim=5, preload=True) epochs['1'].average().plot() t_peak = 60e-3 # ~60 MS at largest peak sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None) cov = mne.compute_covariance(epochs, tmax=0) data = [] for ii in range(1, 33): evoked = epochs[str(ii)].average().crop(t_peak, t_peak) data.append(evoked.data[:, 0]) evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.) del epochs, raw dip = fit_dipole(evoked, cov, sphere, n_jobs=1)[0] actual_pos = mne.dipole.get_phantom_dipoles()[0] diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1)) print('Differences (mm):\n%s' % diffs[:, np.newaxis]) print('ฮผ = %s' % (np.mean(diffs),)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Few examples of the handwritten words are shown below. Note that the first capitalized letter has been removed. Step3: Define Factor Types and Build Factor Graphs Step5: Next, we write a function to construct the factor graphs and prepare labels for training. For each factor graph instance, the structure is a chain but the number of nodes and edges depend on the number of letters, where unary factors will be added for each letter, pairwise factors will be added for each pair of neighboring letters. Besides, the first and last letter will get an additional bias factor respectively. Step6: An example of graph structure is visualized as below, from which you may have a better sense how a factor graph being built. Note that different colors are used to represent different factor types. Step7: Training Step8: In Shogun, we implemented several batch solvers and online solvers. Let's first try to train the model using a batch solver. We choose the dual bundle method solver (<a href="http Step9: Let's check the duality gap to see if the training has converged. We aim at minimizing the primal problem while maximizing the dual problem. By the weak duality theorem, the optimal value of the primal problem is always greater than or equal to dual problem. Thus, we could expect the duality gap will decrease during the time. A relative small and stable duality gap may indicate the convergence. In fact, the gap doesn't have to become zero, since we know it is not far away from the local minima. Step10: There are other statitics may also be helpful to check if the solution is good or not, such as the number of cutting planes, from which we may have a sense how tight the piece-wise lower bound is. In general, the number of cutting planes should be much less than the dimension of the parameter vector. Step11: In our case, we have 101 active cutting planes, which is much less than 4082, i.e. the number of parameters. We could expect a good model by looking at these statistics. Now come to the online solvers. Unlike the cutting plane algorithms re-optimizes over all the previously added dual variables, an online solver will update the solution based on a single point. This difference results in a faster convergence rate, i.e. less oracle calls, please refer to Table 1 in [4] for more detail. Here, we use the stochastic subgradient descent (<a href="http Step12: We compare the SGD and BMRM in terms of the primal objectives versus effective passes. We first plot the training progress (until both algorithms converge) and then zoom in to check the first 100 passes. In order to make a fair comparison, we set the regularization constant to 1e-2 for both algorithms. Step13: As is shown above, the SGD solver uses less oracle calls to get to converge. Note that the timing is 2 times slower than they actually need, since there are additional computations of primal objective and training error in each pass. The training errors of both algorithms for each pass are shown in below. Step15: Interestingly, the training errors of SGD solver are lower than BMRM's in first 100 passes, but in the end the BMRM solver obtains a better training performance. A probable explanation is that BMRM uses very limited number of cutting planes at beginning, which form a poor approximation of the objective function. As the number of cutting planes increasing, we got a tighter piecewise lower bound, thus improve the performance. In addition, we would like to show the pairwise weights, which may learn important co-occurrances of letters. The hinton diagram is a wonderful tool for visualizing 2D data, in which positive and negative values are represented by white and black squares, respectively, and the size of each square represents the magnitude of each value. In our case, a smaller number i.e. a large black square indicates the two letters tend to coincide. Step16: Inference Step17: Evaluation
<ASSISTANT_TASK:> Python Code: %pylab inline %matplotlib inline import os SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data') import numpy as np import scipy.io dataset = scipy.io.loadmat(os.path.join(SHOGUN_DATA_DIR, 'ocr/ocr_taskar.mat')) # patterns for training p_tr = dataset['patterns_train'] # patterns for testing p_ts = dataset['patterns_test'] # labels for training l_tr = dataset['labels_train'] # labels for testing l_ts = dataset['labels_test'] # feature dimension n_dims = p_tr[0,0].shape[0] # number of states n_stats = 26 # number of training samples n_tr_samples = p_tr.shape[1] # number of testing samples n_ts_samples = p_ts.shape[1] import matplotlib.pyplot as plt def show_word(patterns, index): show a word with padding plt.rc('image', cmap='binary') letters = patterns[0,index][:128,:] n_letters = letters.shape[1] for l in range(n_letters): lett = np.transpose(np.reshape(letters[:,l], (8,16))) lett = np.hstack((np.zeros((16,1)), lett, np.zeros((16,1)))) lett = np.vstack((np.zeros((1,10)), lett, np.zeros((1,10)))) subplot(1,n_letters,l+1) imshow(lett) plt.xticks(()) plt.yticks(()) plt.tight_layout() show_word(p_tr, 174) show_word(p_tr, 471) show_word(p_tr, 57) from shogun import TableFactorType # unary, type_id = 0 cards_u = np.array([n_stats], np.int32) w_gt_u = np.zeros(n_stats*n_dims) fac_type_u = TableFactorType(0, cards_u, w_gt_u) # pairwise, type_id = 1 cards = np.array([n_stats,n_stats], np.int32) w_gt = np.zeros(n_stats*n_stats) fac_type = TableFactorType(1, cards, w_gt) # first bias, type_id = 2 cards_s = np.array([n_stats], np.int32) w_gt_s = np.zeros(n_stats) fac_type_s = TableFactorType(2, cards_s, w_gt_s) # last bias, type_id = 3 cards_t = np.array([n_stats], np.int32) w_gt_t = np.zeros(n_stats) fac_type_t = TableFactorType(3, cards_t, w_gt_t) # all initial parameters w_all = [w_gt_u,w_gt,w_gt_s,w_gt_t] # all factor types ftype_all = [fac_type_u,fac_type,fac_type_s,fac_type_t] def prepare_data(x, y, ftype, num_samples): prepare FactorGraphFeatures and FactorGraphLabels from shogun import Factor, TableFactorType, FactorGraph from shogun import FactorGraphObservation, FactorGraphLabels, FactorGraphFeatures samples = FactorGraphFeatures(num_samples) labels = FactorGraphLabels(num_samples) for i in range(num_samples): n_vars = x[0,i].shape[1] data = x[0,i].astype(np.float64) vc = np.array([n_stats]*n_vars, np.int32) fg = FactorGraph(vc) # add unary factors for v in range(n_vars): datau = data[:,v] vindu = np.array([v], np.int32) facu = Factor(ftype[0], vindu, datau) fg.add_factor(facu) # add pairwise factors for e in range(n_vars-1): datap = np.array([1.0]) vindp = np.array([e,e+1], np.int32) facp = Factor(ftype[1], vindp, datap) fg.add_factor(facp) # add bias factor to first letter datas = np.array([1.0]) vinds = np.array([0], np.int32) facs = Factor(ftype[2], vinds, datas) fg.add_factor(facs) # add bias factor to last letter datat = np.array([1.0]) vindt = np.array([n_vars-1], np.int32) fact = Factor(ftype[3], vindt, datat) fg.add_factor(fact) # add factor graph samples.add_sample(fg) # add corresponding label states_gt = y[0,i].astype(np.int32) states_gt = states_gt[0,:]; # mat to vector loss_weights = np.array([1.0/n_vars]*n_vars) fg_obs = FactorGraphObservation(states_gt, loss_weights) labels.add_label(fg_obs) return samples, labels # prepare training pairs (factor graph, node states) n_tr_samples = 350 # choose a subset of training data to avoid time out on buildbot samples, labels = prepare_data(p_tr, l_tr, ftype_all, n_tr_samples) try: import networkx as nx # pip install networkx except ImportError: import pip pip.main(['install', '--user', 'networkx']) import networkx as nx import matplotlib.pyplot as plt # create a graph G = nx.Graph() node_pos = {} # add variable nodes, assuming there are 3 letters G.add_nodes_from(['v0','v1','v2']) for i in range(3): node_pos['v%d' % i] = (2*i,1) # add factor nodes G.add_nodes_from(['F0','F1','F2','F01','F12','Fs','Ft']) for i in range(3): node_pos['F%d' % i] = (2*i,1.006) for i in range(2): node_pos['F%d%d' % (i,i+1)] = (2*i+1,1) node_pos['Fs'] = (-1,1) node_pos['Ft'] = (5,1) # add edges to connect variable nodes and factor nodes G.add_edges_from([('v%d' % i,'F%d' % i) for i in range(3)]) G.add_edges_from([('v%d' % i,'F%d%d' % (i,i+1)) for i in range(2)]) G.add_edges_from([('v%d' % (i+1),'F%d%d' % (i,i+1)) for i in range(2)]) G.add_edges_from([('v0','Fs'),('v2','Ft')]) # draw graph fig, ax = plt.subplots(figsize=(6,2)) nx.draw_networkx_nodes(G,node_pos,nodelist=['v0','v1','v2'],node_color='white',node_size=700,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F0','F1','F2'],node_color='yellow',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['F01','F12'],node_color='blue',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Fs'],node_color='green',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_nodes(G,node_pos,nodelist=['Ft'],node_color='purple',node_shape='s',node_size=300,ax=ax) nx.draw_networkx_edges(G,node_pos,alpha=0.7) plt.axis('off') plt.tight_layout() from shogun import FactorGraphModel, TREE_MAX_PROD # create model and register factor types model = FactorGraphModel(samples, labels, TREE_MAX_PROD) model.add_factor_type(ftype_all[0]) model.add_factor_type(ftype_all[1]) model.add_factor_type(ftype_all[2]) model.add_factor_type(ftype_all[3]) from shogun import DualLibQPBMSOSVM from shogun import BmrmStatistics import pickle import time # create bundle method SOSVM, there are few variants can be chosen # BMRM, Proximal Point BMRM, Proximal Point P-BMRM, NCBM # usually the default one i.e. BMRM is good enough # lambda is set to 1e-2 bmrm = DualLibQPBMSOSVM(model, labels, 0.01) bmrm.put('m_TolAbs', 20.0) bmrm.put('verbose', True) bmrm.set_store_train_info(True) # train t0 = time.time() bmrm.train() t1 = time.time() w_bmrm = bmrm.get_real_vector('m_w') print("BMRM took", t1 - t0, "seconds.") import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_bmrm = bmrm.get_helper().get_real_vector('primal') dual_bmrm = bmrm.get_result().get_hist_Fd_vector() len_iter = min(primal_bmrm.size, dual_bmrm.size) primal_bmrm = primal_bmrm[1:len_iter] dual_bmrm = dual_bmrm[1:len_iter] # plot duality gaps xs = range(dual_bmrm.size) axes[0].plot(xs, (primal_bmrm-dual_bmrm), label='duality gap') axes[0].set_xlabel('iteration') axes[0].set_ylabel('duality gap') axes[0].legend(loc=1) axes[0].set_title('duality gaps'); axes[0].grid(True) # plot primal and dual values xs = range(dual_bmrm.size-1) axes[1].plot(xs, primal_bmrm[1:], label='primal') axes[1].plot(xs, dual_bmrm[1:], label='dual') axes[1].set_xlabel('iteration') axes[1].set_ylabel('objective') axes[1].legend(loc=1) axes[1].set_title('primal vs dual'); axes[1].grid(True) # statistics bmrm_stats = bmrm.get_result() nCP = bmrm_stats.nCP nzA = bmrm_stats.nzA print('number of cutting planes: %d' % nCP) print('number of active cutting planes: %d' % nzA) from shogun import StochasticSOSVM # the 3rd parameter is do_weighted_averaging, by turning this on, # a possibly faster convergence rate may be achieved. # the 4th parameter controls outputs of verbose training information sgd = StochasticSOSVM(model, labels, True, True) sgd.put('num_iter', 100) sgd.put('lambda', 0.01) # train t0 = time.time() sgd.train() t1 = time.time() w_sgd = sgd.get_real_vector('m_w') print("SGD took", t1 - t0, "seconds.") fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) primal_sgd = sgd.get_helper().get_real_vector('primal') xs = range(dual_bmrm.size-1) axes[0].plot(xs, primal_bmrm[1:], label='BMRM') axes[0].plot(range(99), primal_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('primal objective') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), primal_bmrm[1:100], label='BMRM') axes[1].plot(range(99), primal_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('primal objective') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,4)) terr_bmrm = bmrm.get_helper().get_real_vector('train_error') terr_sgd = sgd.get_helper().get_real_vector('train_error') xs = range(terr_bmrm.size-1) axes[0].plot(xs, terr_bmrm[1:], label='BMRM') axes[0].plot(range(99), terr_sgd[1:100], label='SGD') axes[0].set_xlabel('effecitve passes') axes[0].set_ylabel('training error') axes[0].set_title('whole training progress') axes[0].legend(loc=1) axes[0].grid(True) axes[1].plot(range(99), terr_bmrm[1:100], label='BMRM') axes[1].plot(range(99), terr_sgd[1:100], label='SGD') axes[1].set_xlabel('effecitve passes') axes[1].set_ylabel('training error') axes[1].set_title('first 100 effective passes') axes[1].legend(loc=1) axes[1].grid(True) def hinton(matrix, max_weight=None, ax=None): Draw Hinton diagram for visualizing a weight matrix. ax = ax if ax is not None else plt.gca() if not max_weight: max_weight = 2**np.ceil(np.log(np.abs(matrix).max())/np.log(2)) ax.patch.set_facecolor('gray') ax.set_aspect('equal', 'box') ax.xaxis.set_major_locator(plt.NullLocator()) ax.yaxis.set_major_locator(plt.NullLocator()) for (x,y),w in np.ndenumerate(matrix): color = 'white' if w > 0 else 'black' size = np.sqrt(np.abs(w)) rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=color) ax.add_patch(rect) ax.autoscale_view() ax.invert_yaxis() # get pairwise parameters, also accessible from # w[n_dims*n_stats:n_dims*n_stats+n_stats*n_stats] model.w_to_fparams(w_sgd) # update factor parameters w_p = ftype_all[1].get_w() w_p = np.reshape(w_p,(n_stats,n_stats)) hinton(w_p) # get testing data samples_ts, labels_ts = prepare_data(p_ts, l_ts, ftype_all, n_ts_samples) from shogun import FactorGraphFeatures, FactorGraphObservation, TREE_MAX_PROD, MAPInference # get a factor graph instance from test data fg0 = samples_ts.get_sample(100) fg0.compute_energies() fg0.connect_components() # create a MAP inference using tree max-product infer_met = MAPInference(fg0, TREE_MAX_PROD) infer_met.inference() # get inference results y_pred = infer_met.get_structured_outputs() y_truth = FactorGraphObservation.obtain_from_generic(labels_ts.get_label(100)) print(y_pred.get_data()) print(y_truth.get_data()) from shogun import SOSVMHelper # training error of BMRM method bmrm.put('m_w', w_bmrm) model.w_to_fparams(w_bmrm) lbs_bmrm = bmrm.apply() acc_loss = 0.0 ave_loss = 0.0 for i in range(n_tr_samples): y_pred = lbs_bmrm.get_label(i) y_truth = labels.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss = acc_loss / n_tr_samples print('BMRM: Average training error is %.4f' % ave_loss) # training error of stochastic method print('SGD: Average training error is %.4f' % SOSVMHelper.average_loss(w_sgd, model)) # testing error bmrm.set_features(samples_ts) bmrm.set_labels(labels_ts) lbs_bmrm_ts = bmrm.apply() acc_loss = 0.0 ave_loss_ts = 0.0 for i in range(n_ts_samples): y_pred = lbs_bmrm_ts.get_label(i) y_truth = labels_ts.get_label(i) acc_loss = acc_loss + model.delta_loss(y_truth, y_pred) ave_loss_ts = acc_loss / n_ts_samples print('BMRM: Average testing error is %.4f' % ave_loss_ts) # testing error of stochastic method print('SGD: Average testing error is %.4f' % SOSVMHelper.average_loss(sgd.get_real_vector('m_w'), model)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We can now checkout what our data consists of by using the .head() method on our DataFrame. By default, this will show the top 5 rows. Step2: One of the first steps you should take is to get an overview of what kind of data we have but running the .info() method. Please see the documentation for more info (no pun intended). Step3: We can see that the column date_published is being interpreted as an object and not a datetime. Let's change that by using the pandas.to_datetime() function. Step4: Number of Authors Step5: We can now take a look at the summary statistics of any numeric columns by running the .describe() method. Step6: Number of Unique Authors Step7: If we also wanted the number of times a particular author was involved in writing an article we could leverage the power of Counter's from the collections library. Refer to the documentation for more information. Step8: Let's say we wanted to now subset down to the articles which Ari Shapiro worked on. There are a variety of way's we could do this but I will demo one possible avenue. Step9: Most popular sections Step10: When we first were looking at our DataFrame, you may have noticed that there are quite a few rows missing author information. Maybe we have a hypothesis that there are certain sections that systemically weren't attaching author information. Let's dive deeper to try and prove/disprove this hypothesis... Step11: As we can see, there are clearly sections that are consistently not attaching author information as well as many that are hit or miss with the author information. Step12: Below we see how we could use the resample function to find the number of articles published per day. Step13: There are, of course, many different offset alias' for passing to resample. For more options see this page. Step14: What hour is the most popular time for publishing articles? Step15: We were able to run the above command because that particular column contains a datetime object. From there we can run .dt and then extract any aspect of that datetime (e.g. .dt.hour, .dt.second, .dt.month, .dt.quarter) Step16: By default, the .hist method is going to plot 10 bins. Let's up that to 24 bins so we have a bin for each hour in the day... Step17: Selecting Particular Dates Step18: Length of Articles (# Words) Step19: Let's create a histogram of the length of different articles... Step20: Clearly there are some outliers in this data. Let's subset what we are plotting to cut out the top 2% of articles in terms of article length and see what the resulting histogram looks like... Step21: Only rows that contain 'Obama' in the Headline Step22: Looking at Average Hour Published by Section
<ASSISTANT_TASK:> Python Code: df = pd.read_csv('npr_articles.csv') df.head() df.info() df['date_published'] = pd.to_datetime(df['date_published']) df.info() # Let's create a mask for all rows that have a non-null value mask = df['author'].notnull() # When the data was saved to a csv, these lists were converted into strings, we can convert # them back like so from ast import literal_eval df.loc[mask, 'author'] = df.loc[mask, 'author'].map(literal_eval) # Initialize column with NaN's and then fill in the respective values df['num_authors'] = np.nan df.loc[mask, 'num_authors'] = df.loc[mask, 'author'].map(len) df.describe() df.head() # Create a set to hold our authors authors = set() for lst in df.loc[mask, 'author']: # For every row, update the authors set with those contained in that row authors.update(lst) # Print out the total authors seen print(len(authors)) from collections import Counter authors = df.loc[mask, 'author'].map(Counter).sum() authors authors.most_common() authors['Ari Shapiro'] # Because some rows have NaN's in them, we need to get clever with how we # create our mask mask = df['author'].map(lambda x: 'Ari Shapiro' in x if isinstance(x, list) else False) df.loc[mask, 'headline'] # Here is another way we could acheive this mask = df.loc[df['author'].notnull(), 'author'].map(lambda x: 'Ari Shapiro' in x) df.loc[df['author'].notnull()].loc[mask, 'headline'] df['section'].value_counts(dropna=False)[:5] # Let's create a new column that indicates whether the author attribute was null or not # This helps with the groupby below df['author_null'] = df['author'].isnull() # Get the mean amount of nulls for each section and sort descending # NOTE: 1.0 indicates ALL Nulls df.groupby('section')['author_null'].mean().sort_values(ascending=False) # Create a pandas Series with 1's as the values and the date as the index s = pd.Series([1], index=df['date_published']) s[:10] # Let's resample that Series and sum the values to find the number of articles by Day s.resample('D').sum() plt.plot(s.resample('D').sum()) plt.title('Article Count By Day') plt.ylabel('Number of Articles') plt.xlabel('Date') locs, labels = plt.xticks() plt.setp(labels, rotation=-45); plt.plot(s.resample('W').sum()) plt.title('Article Count By Week') plt.ylabel('Number of Articles') plt.xlabel('date') locs, labels = plt.xticks() plt.setp(labels, rotation=-45); df['hour_published'] = df['date_published'].dt.hour df['hour_published'].hist() plt.ylabel('Number of Articles Published') plt.xlabel('Hour Published (24Hr)'); # Let's force the plot to split into 24 bins, one for each hour df['hour_published'].hist(bins=24) plt.ylabel('Number of Articles Published') plt.xlabel('Hour Published (24Hr)'); # Let's extract the relative frequency rather than the raw counts df['hour_published'].hist(bins=24, normed=True, alpha=0.75) plt.ylabel('Freq. of Articles Published') plt.xlabel('Hour Published (24Hr)'); # We can also grab this information without plotting it using .value_counts df['hour_published'].value_counts() df['hour_published'].value_counts(normalize=True) # Or we could leave them in the order of a day df['hour_published'].value_counts().sort_index() mask = ((df['date_published'] >= '2016-12-24 10:00:00') & (df['date_published'] <= '2016-12-24 14:00:00')) df.loc[mask, :] # Or we could reset or index and do it that way... df2 = df.set_index('date_published') df2.loc['2016-12-24 10:00:00': '2016-12-24 14:00:00', :] df['num_words'] = df['article_text'].map(lambda x: len(x.split())) df['num_words'].describe() df['num_words'].hist(bins=20, alpha=0.75) plt.ylabel('Number of Articles Published') plt.xlabel('Length of Article'); cutoff = np.percentile(df['num_words'], 98) df.loc[df['num_words'] <= cutoff, 'num_words'].hist(bins=20, alpha=0.75) plt.ylabel('Number of Articles Published') plt.xlabel('Length of Article'); df.loc[df['headline'].str.contains('Obama'), 'headline'].head() # Let's subset to just the 10 most popular sections top_sections = df['section'].value_counts()[:10].index df_sub = df.loc[df['section'].isin(top_sections), :] # We are now grouping by the section and extracting the mean hour that articles were published df_sub.groupby('section')['hour_published'].mean() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Create Function To Visualize Classification Regions Step2: Generate Data Step3: Classify Using a Linear Kernel Step4: Classify Using a RBF Kernel Step5: Gamma = 1.0 Step6: Gamma = 10.0 Step7: Gamma = 100.0 Step8: C - The Penalty Parameter Step9: C = 10 Step10: C = 1000 Step11: C = 10000 Step12: C = 100000
<ASSISTANT_TASK:> Python Code: # Import packages to visualize the classifer from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt import warnings # Import packages to do the classifying import numpy as np from sklearn.svm import SVC def versiontuple(v): return tuple(map(int, (v.split(".")))) def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl) # highlight test samples if test_idx: # plot all samples if not versiontuple(np.__version__) >= versiontuple('1.9.0'): X_test, y_test = X[list(test_idx), :], y[list(test_idx)] warnings.warn('Please update to NumPy 1.9.0 or newer') else: X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidths=1, marker='o', s=55, label='test set') np.random.seed(0) X_xor = np.random.randn(200, 2) y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0) y_xor = np.where(y_xor, 1, -1) plt.scatter(X_xor[y_xor == 1, 0], X_xor[y_xor == 1, 1], c='b', marker='x', label='1') plt.scatter(X_xor[y_xor == -1, 0], X_xor[y_xor == -1, 1], c='r', marker='s', label='-1') plt.xlim([-3, 3]) plt.ylim([-3, 3]) plt.legend(loc='best') plt.tight_layout() plt.show() # Create a SVC classifier using a linear kernel svm = SVC(kernel='linear', C=1, random_state=0) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=1) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=1, C=1) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=10, C=1) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=100, C=1) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=1) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=10) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=1000) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=10000) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() # Create a SVC classifier using an RBF kernel svm = SVC(kernel='rbf', random_state=0, gamma=.01, C=100000) # Train the classifier svm.fit(X_xor, y_xor) # Visualize the decision boundaries plot_decision_regions(X_xor, y_xor, classifier=svm) plt.legend(loc='upper left') plt.tight_layout() plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 1. Training a perceptron via scikit-learn Step2: 2. Training a logistic regression model with scikit-learn Step3: 3. Training a support vector machine (SVM) with scikit-learn Step4: Training a kernel SVM with scikit-learn Step5: Since we chose a relatively small value for gamma, the resulting decision boundary of the RBF kernel SVM model will be relatively soft. When we increase gamma from 0.2 to 100, we observe the effect on the decision boundary below. Although the model fits the training dataset very well, such a classifier is likely to overfit and have a high generalization error on unseen data. Step6: 4. Training a decision tree classifier with scikit-learn Step7: 5. Training a random forest with scikit-learn Step8: 6. Training a KNN with scikit-learn
<ASSISTANT_TASK:> Python Code: from sklearn import datasets import numpy as np iris = datasets.load_iris() X = iris.data[:, [2, 3]] y = iris.target print('Class labels:', np.unique(y)) # Splitting data into 70% training and 30% test data: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=0) # Standardizing the features: from sklearn.preprocessing import StandardScaler sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test) from sklearn.linear_model import Perceptron ppn = Perceptron(n_iter=40, eta0=0.1, random_state=0) ppn.fit(X_train_std, y_train) y_pred = ppn.predict(X_test_std) print('Misclassified sample: %d' % (y_test != y_pred).sum()) from sklearn.metrics import accuracy_score print('Accuracy: %.2f' % accuracy_score(y_test, y_pred)) from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl) # highlight test samples if test_idx: # plot all samples X_test, y_test = X[test_idx, :], y[test_idx] plt.scatter(X_test[:, 0], X_test[:, 1], c='', alpha=1.0, linewidths=1, marker='o', s=55, label='test set') X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined_std, y_combined, classifier=ppn, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() from sklearn.linear_model import LogisticRegression lr = LogisticRegression(C=1000, random_state=0) lr.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=lr, test_idx=range(105,150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() lr.predict_proba(X_test_std[0, :].reshape(1,-1)) from sklearn.svm import SVC svm = SVC(kernel='linear', C=1.0, random_state=0) svm.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() svm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0) svm.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() svm = SVC(kernel='rbf', random_state=0, gamma=100.0, C=1.0) svm.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(criterion='entropy', max_depth=3, random_state=0) tree.fit(X_train, y_train) X_combined = np.vstack((X_train, X_test)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined, y_combined, classifier=tree, test_idx=range(105, 150)) plt.xlabel('petal length [cm]') plt.ylabel('petal width [cm]') plt.legend(loc='upper left') plt.show() from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(criterion='entropy', n_estimators=10, random_state=1, n_jobs=2) forest.fit(X_train, y_train) plot_decision_regions(X_combined, y_combined, classifier=forest, test_idx=range(105, 150)) plt.xlabel('petal length [cm]') plt.ylabel('petal width [cm]') plt.legend(loc='upper left') plt.show() from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski') knn.fit(X_train_std, y_train) plot_decision_regions(X_combined_std, y_combined, classifier=knn, test_idx=range(105, 150)) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Setup Authorization Step3: Unique PR Creators Step4: Number Prs Step6: Release stats per release (quarter) Step8: Get a list of distinct actions Step10: New Issues Last 28 Days Step12: GetSomeSampleIssue Events Step14: Get some sample pull request events Step16: Get Distinct Types
<ASSISTANT_TASK:> Python Code: import os import subprocess if os.path.exists("/var/run/secrets/kubernetes.io/serviceaccount"): subprocess.check_call(["pip", "install", "--user", "-r", "requirements.txt"], stderr=subprocess.STDOUT, bufsize=1) # NOTE: The RuntimeWarnings (if any) are harmless. See ContinuumIO/anaconda-issues#6678. import altair as alt from pandas.io import gbq import pandas as pd import numpy as np from importlib import reload import itertools import getpass import subprocess # Configuration Variables. Modify as desired. PROJECT = subprocess.check_output(["gcloud", "config", "get-value", "project"]).strip().decode() #matplotlib import datetime month = datetime.datetime.now().month year = datetime.datetime.now().year num_months = 12 months = [] for i in range(num_months): months.append("\"{0}{1:02}\"".format(year, month)) month -= 1 if month == 0: month = 12 year -=1 query = SELECT DATE(created_at) AS pr_date, actor.id, actor.login, JSON_EXTRACT(payload, '$.pull_request.user.id') as user_id, JSON_EXTRACT(payload, '$.pull_request.id') as pr_id, JSON_EXTRACT(payload, '$.pull_request.merged') as merged FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND type = 'PullRequestEvent' AND org.login = 'kubeflow' AND JSON_EXTRACT(payload, '$.action') IN ('"closed"') .format(",".join(months)) all_prs=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) # Filter PRs to merged PRs v=all_prs["merged"].values == 'true' merged_all_prs = all_prs.iloc[v] p=pd.Series(data=merged_all_prs["user_id"].values,index=merged_all_prs["pr_date"]) p=p.sort_index() # Some solutions here: https://stackoverflow.com/questions/46470743/how-to-efficiently-compute-a-rolling-unique-count-in-a-pandas-time-series # Need to figure out how to do a time based window # TODO(jlewi): Is there a bug in the rolling window computation? creators ends up having the same number # of rows as p; so we end up with multiple datapoints for each day; but the values aren't the same for # each day. What is causing this effect? creators = p.rolling('28d').apply(lambda arr: pd.Series(arr).nunique()) # We need to group the days. Rolling window will create a point for each data point creators_df = pd.DataFrame({"day": creators.index, "num_authors": creators.values}) creators_df = creators_df.groupby("day", as_index=False).max() import altair as alt chart = alt.Chart(creators_df, title= "Unique PR Authors (Last 28 Days)") line = chart.mark_line().encode( x= alt.X('day', title = "Day"), y=alt.Y("num_authors", title="# Unique Authors"), ) point = line + line.mark_point() point.interactive() pr_impulse=pd.Series(data=merged_all_prs["pr_id"].values,index=merged_all_prs["pr_date"]) pr_impulse=pr_impulse.sort_index() unique_prs = pr_impulse.rolling('28d').apply(lambda arr: pd.Series(arr).nunique()) prs_df = pd.DataFrame({"day": unique_prs.index, "num_prs": unique_prs.values}) prs_df = prs_df.groupby("day", as_index=False).max() chart = alt.Chart(prs_df, title= "Merged PRs (Last 28 Days)") line = chart.mark_line().encode( x= alt.X('day', title = "Day"), y=alt.Y("num_prs", title="# PRs"), ) point = line + line.mark_point() point.interactive() release_months = [] year = 2019 for month in range(8, 11): release_months.append("\"{0}{1:02}\"".format(year, month)) query = SELECT DATE(created_at) AS pr_date, actor.id, actor.login, JSON_EXTRACT(payload, '$.pull_request.merged') as merged, JSON_EXTRACT(payload, '$.pull_request.id') as pr_id, JSON_EXTRACT(payload, '$.pull_request.url') as pr_url, JSON_EXTRACT(payload, '$.pull_request.user.id') as user_id FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND type = 'PullRequestEvent' AND org.login = 'kubeflow' AND JSON_EXTRACT(payload, '$.action') IN ('"closed"') .format(",".join(release_months)) prs=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) # Filter PRs to merged PRs v=prs["merged"].values == 'true' merged_prs = prs.iloc[v] unique_pr_logins = prs["user_id"].unique() unique_prs = prs["pr_id"].unique() merged_unique_logins = merged_prs["user_id"].unique() merged_unique_prs = merged_prs["pr_id"].unique() print("Number of unique pr authors (merged & unmerged) {0}".format(unique_pr_logins.shape)) print("Number of unique prs (merged & unmerged) {0}".format(unique_prs.shape)) print("Number of unique pr authors (merged) {0}".format(merged_unique_logins.shape)) print("Number of unique prs (merged) {0}".format(merged_unique_prs.shape)) query = SELECT distinct JSON_EXTRACT(payload, '$.action') FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) .format(",".join(months)) actions=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) actions query = SELECT DATE(created_at) AS issue_date, actor.id, actor.login, JSON_EXTRACT(payload, '$.pull_request.id') as issue_id, JSON_EXTRACT(payload, '$.pull_request.url') as issue_url FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND type = 'IssuesEvent' AND org.login = 'kubeflow' AND JSON_EXTRACT(payload, '$.action') IN ('"opened"') .format(",".join(months)) issues=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) issue_counts=issues["issue_date"].value_counts() issue_counts=issue_counts.sort_index() rolling_issue_count = issue_counts.rolling('28d').sum() issues_df = pd.DataFrame({"day": rolling_issue_count.index, "num_issues": rolling_issue_count.values}) issues_df = issues_df.groupby("day", as_index=False).max() # Truncate the first 28 days because it will be a windowing effect. chart = alt.Chart(issues_df[28:], title= "New Issues (Last 28 Days)") line = chart.mark_line().encode( x= alt.X('day', title = "Day"), y=alt.Y("num_issues", title="# issues"), ) point = line + line.mark_point() point.interactive() import matplotlib from matplotlib import pylab matplotlib.rcParams.update({'font.size': 22}) hf = pylab.figure() hf.set_size_inches(18.5, 10.5) pylab.plot(rolling_issue_count, linewidth=5) ha = pylab.gca() ha.set_title("New Kubeflow Issues (28 Days)") ha.set_xlabel("Date") ha.set_ylabel("# Of Issues") query = SELECT * FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND type = 'IssuesEvent' AND org.login = 'kubeflow' limit 20 .format(",".join(months)) events=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) events query = SELECT * FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND type = 'PullRequestEvent' AND org.login = 'kubeflow' limit 20 .format(",".join(months)) events=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) import pprint import json data = json.loads(events["payload"].values[3]) pprint.pprint(data) data["pull_request"]["id"] query = SELECT distinct type FROM `githubarchive.month.*` WHERE _TABLE_SUFFIX IN ({0}) AND org.login = 'kubeflow' limit 20 .format(",".join(months)) events=gbq.read_gbq(str(query), dialect='standard', project_id=PROJECT) events <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Use the following function to visualize data Step2: <a id="ref1"></a> Step3: Load the testing dataset by setting the parameters train <code>False</code> and convert it to a tensor by placing a transform object in the argument <code>transform</code>. Step4: Note that the data type is long Step5: Data Visualization Step6: Plot the 3rd sample Step7: You see its a 1. Now, plot the second sample Step8: The Softmax function requires vector inputs. If you see the vector shape, you'll note it's 28x28. Step9: Flatten the tensor as shown in this image Step10: Double-click here for the solution. Step11: Cover the model parameters for each class to a rectangular grid Step12: Optimizer class Step13: Define the dataset loader Step14: Train the model and determine validation accuracy Step15: <a id="ref3"></a> Step16: Plot the first five misclassified samples
<ASSISTANT_TASK:> Python Code: !conda install -y torchvision import torch import torch.nn as nn import torchvision.transforms as transforms import torchvision.datasets as dsets import matplotlib.pylab as plt import numpy as np def show_data(data_sample): plt.imshow(data_sample[0].numpy().reshape(28,28),cmap='gray') #print(data_sample[1].item()) plt.title('y= '+ str(data_sample[1].item())) train_dataset=dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor()) train_dataset validation_dataset=dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor()) validation_dataset train_dataset[0][1].type() train_dataset[3][1] show_data(train_dataset[3]) show_data(train_dataset[2]) train_dataset[0][0].shape input_dim=28*28 output_dim=10 input_dim print('W:',list(model.parameters())[0].size()) print('b',list(model.parameters())[1].size()) criterion=nn.CrossEntropyLoss() learning_rate=0.1 optimizer=torch.optim.SGD(model.parameters(), lr=learning_rate) train_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=100) validation_loader=torch.utils.data.DataLoader(dataset=validation_dataset,batch_size=5000) n_epochs=10 loss_list=[] accuracy_list=[] N_test=len(validation_dataset) #n_epochs for epoch in range(n_epochs): for x, y in train_loader: #clear gradient optimizer.zero_grad() #make a prediction z=model(x.view(-1,28*28)) # calculate loss loss=criterion(z,y) # calculate gradients of parameters loss.backward() # update parameters optimizer.step() correct=0 #perform a prediction on the validation data for x_test, y_test in validation_loader: z=model(x_test.view(-1,28*28)) _,yhat=torch.max(z.data,1) correct+=(yhat==y_test).sum().item() accuracy=correct/N_test accuracy_list.append(accuracy) loss_list.append(loss.data) accuracy_list.append(accuracy) fig, ax1 = plt.subplots() color = 'tab:red' ax1.plot(loss_list,color=color) ax1.set_xlabel('epoch',color=color) ax1.set_ylabel('total loss',color=color) ax1.tick_params(axis='y', color=color) ax2 = ax1.twinx() color = 'tab:blue' ax2.set_ylabel('accuracy', color=color) ax2.plot( accuracy_list, color=color) ax2.tick_params(axis='y', labelcolor=color) fig.tight_layout() count=0 for x,y in validation_dataset: z=model(x.reshape(-1,28*28)) _,yhat=torch.max(z,1) if yhat!=y: show_data((x,y)) plt.show() print("yhat:",yhat) count+=1 if count>=5: break <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 2. free wifi Step2: DataFrame Step3: <br/><br>
<ASSISTANT_TASK:> Python Code: m n # m grid_plot.sort_values(ascending=False, by='uni').head() # n freeGrid2.sort_values(ascending=False, by='median level').head() check1 = grid_bssid.dropna(subset=['unique_bssid_list']).sort_values(ascending=False, by='uni').head().iloc[0,3] len(check1) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Let's start by examining the current state of the dataset. source_sentences contains the entire input sequence file as text delimited by newline symbols. Step2: target_sentences contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from source_sentences. target_sentences contains a sorted characters of the line. Step3: Preprocess Step4: The last step in the preprocessing stage is to determine the the longest sequence size in the dataset we'll be using, then pad all the sequences to that length. Step5: This is the final shape we need them to be in. We can now proceed to building the model. Step6: Hyperparameters Step7: Input Step8: Sequence to Sequence Step9: Process Decoding Input Step10: Decoding Step11: Decoder During Training Step12: Decoder During Inference Step13: Optimization Step14: Train Step15: Prediction
<ASSISTANT_TASK:> Python Code: import helper source_path = 'data/letters_source.txt' target_path = 'data/letters_target.txt' source_sentences = helper.load_data(source_path) target_sentences = helper.load_data(target_path) source_sentences[:50].split('\n') target_sentences[:50].split('\n') def extract_character_vocab(data): special_words = ['<pad>', '<unk>', '<s>', '<\s>'] set_words = set([character for line in data.split('\n') for character in line]) int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))} vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()} return int_to_vocab, vocab_to_int # Build int2letter and letter2int dicts source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences) target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences) # Convert characters to ids source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<unk>']) for letter in line] for line in source_sentences.split('\n')] target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<unk>']) for letter in line] for line in target_sentences.split('\n')] print("Example source sequence") print(source_letter_ids[:3]) print("\n") print("Example target sequence") print(target_letter_ids[:3]) def pad_id_sequences(source_ids, source_letter_to_int, target_ids, target_letter_to_int, sequence_length): new_source_ids = [sentence + [source_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \ for sentence in source_ids] new_target_ids = [sentence + [target_letter_to_int['<pad>']] * (sequence_length - len(sentence)) \ for sentence in target_ids] return new_source_ids, new_target_ids # Use the longest sequence as sequence length sequence_length = max( [len(sentence) for sentence in source_letter_ids] + [len(sentence) for sentence in target_letter_ids]) # Pad all sequences up to sequence length source_ids, target_ids = pad_id_sequences(source_letter_ids, source_letter_to_int, target_letter_ids, target_letter_to_int, sequence_length) print("Sequence Length") print(sequence_length) print("\n") print("Input sequence example") print(source_ids[:3]) print("\n") print("Target sequence example") print(target_ids[:3]) from distutils.version import LooseVersion import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Number of Epochs epochs = 60 # Batch Size batch_size = 128 # RNN Size rnn_size = 50 # Number of Layers num_layers = 2 # Embedding Size encoding_embedding_size = 13 decoding_embedding_size = 13 # Learning Rate learning_rate = 0.001 input_data = tf.placeholder(tf.int32, [batch_size, sequence_length]) targets = tf.placeholder(tf.int32, [batch_size, sequence_length]) lr = tf.placeholder(tf.float32) source_vocab_size = len(source_letter_to_int) # Encoder embedding enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size) # Encoder enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers) _, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, dtype=tf.float32) import numpy as np # Process the input we'll feed to the decoder ending = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1]) dec_input = tf.concat([tf.fill([batch_size, 1], target_letter_to_int['<s>']), ending], 1) demonstration_outputs = np.reshape(range(batch_size * sequence_length), (batch_size, sequence_length)) sess = tf.InteractiveSession() print("Targets") print(demonstration_outputs[:2]) print("\n") print("Processed Decoding Input") print(sess.run(dec_input, {targets: demonstration_outputs})[:2]) target_vocab_size = len(target_letter_to_int) # Decoder Embedding dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size])) dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input) # Decoder RNNs dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers) with tf.variable_scope("decoding") as decoding_scope: # Output Layer output_fn = lambda x: tf.contrib.layers.fully_connected(x, target_vocab_size, None, scope=decoding_scope) with tf.variable_scope("decoding") as decoding_scope: # Training Decoder train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(enc_state) train_pred, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder( dec_cell, train_decoder_fn, dec_embed_input, sequence_length, scope=decoding_scope) # Apply output function train_logits = output_fn(train_pred) with tf.variable_scope("decoding", reuse=True) as decoding_scope: # Inference Decoder infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference( output_fn, enc_state, dec_embeddings, target_letter_to_int['<s>'], target_letter_to_int['<\s>'], sequence_length - 1, target_vocab_size) inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope) # Loss function cost = tf.contrib.seq2seq.sequence_loss( train_logits, targets, tf.ones([batch_size, sequence_length])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) import numpy as np train_source = source_ids[batch_size:] train_target = target_ids[batch_size:] valid_source = source_ids[:batch_size] valid_target = target_ids[:batch_size] sess.run(tf.global_variables_initializer()) for epoch_i in range(epochs): for batch_i, (source_batch, target_batch) in enumerate( helper.batch_data(train_source, train_target, batch_size)): _, loss = sess.run( [train_op, cost], {input_data: source_batch, targets: target_batch, lr: learning_rate}) batch_train_logits = sess.run( inference_logits, {input_data: source_batch}) batch_valid_logits = sess.run( inference_logits, {input_data: valid_source}) train_acc = np.mean(np.equal(target_batch, np.argmax(batch_train_logits, 2))) valid_acc = np.mean(np.equal(valid_target, np.argmax(batch_valid_logits, 2))) print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}' .format(epoch_i, batch_i, len(source_ids) // batch_size, train_acc, valid_acc, loss)) input_sentence = 'hello' input_sentence = [source_letter_to_int.get(word, source_letter_to_int['<unk>']) for word in input_sentence.lower()] input_sentence = input_sentence + [0] * (sequence_length - len(input_sentence)) batch_shell = np.zeros((batch_size, sequence_length)) batch_shell[0] = input_sentence chatbot_logits = sess.run(inference_logits, {input_data: batch_shell})[0] print('Input') print(' Word Ids: {}'.format([i for i in input_sentence])) print(' Input Words: {}'.format([source_int_to_letter[i] for i in input_sentence])) print('\nPrediction') print(' Word Ids: {}'.format([i for i in np.argmax(chatbot_logits, 1)])) print(' Chatbot Answer Words: {}'.format([target_int_to_letter[i] for i in np.argmax(chatbot_logits, 1)])) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: How many unique species have occurrence records with latitude/longitude? Step2: Best to take into account all observations which have either "year" or "eventdate" present. (or both) Let's group them by species name, and count the number of observation records. Step3: How many unique species HAVE records with latitude/longitude, AND date of event (at least year) Step4: How many unique species with latitude/longitude, AND event date after 1990? Step5: I guess we should keep only observations of type 'OBSERVATION', 'MACHINE_OBSERVATION' and 'HUMAN_OBSERVATION'? Step6: 2. Create a bias grid at a resolution of 5arcmin (for the moment it's just counts of observations per pixel) Step7: Normalize using log10? Step8: Standardize? Step9: Min-Max scaling? Step10: numpy array memory map Step11: Can read any tif file as memory map??
<ASSISTANT_TASK:> Python Code: result_with_lat_long = result_with_lat_long[result_with_lat_long.decimallatitude.notnull() & result_with_lat_long.decimallongitude.notnull()] result_with_lat_long['species'].unique().size grouped_lat_long_year_or_eventdate = pd.DataFrame() grouped_lat_long_year_or_eventdate['count'] = result_with_lat_long[result_with_lat_long.eventdate.notnull() | result_with_lat_long.year.notnull()].groupby(['species']).apply(lambda x: x['species'].count()) grouped_lat_long_year_or_eventdate.head(10) # peak at the top 10 only result_with_lat_long['species'].unique().size year_or_eventdate_1990 = result_with_lat_long[['species', 'year', 'eventdate', 'basisofrecord', 'decimallatitude', 'decimallongitude']][(result_with_lat_long.year>1990) | (result_with_lat_long.eventdate>"1990")] grouped_year_or_eventdate_1990 = pd.DataFrame() grouped_year_or_eventdate_1990['numobservations'] = year_or_eventdate_1990.groupby(['species']).apply(lambda x: x['species'].count()) grouped_year_or_eventdate_1990.shape[0] year_or_eventdate_1990.basisofrecord.unique() final_selection = year_or_eventdate_1990[(year_or_eventdate_1990.basisofrecord=='OBSERVATION') | (year_or_eventdate_1990.basisofrecord=='HUMAN_OBSERVATION') | (year_or_eventdate_1990.basisofrecord=='MACHINE_OBSERVATION')] final_selection.species.unique().shape final_selection from iSDM.species import GBIFSpecies all_species = GBIFSpecies(name_species='All') all_species.set_data(final_selection) all_species.get_data().species.unique().shape # these many different species all_species.get_data().shape[0] # 1939675? this many observations satisfying our criteria (after 1990, with the correct observation type) year_or_eventdate_1990.shape[0] # total number, before filtering out observations that match our criteria all_species.geometrize() all_species.get_data().species.unique().shape final_observations = all_species.get_data()[['species', 'year','eventdate', 'basisofrecord','geometry']] final_observations.to_file("../data/bias_grid/final_observations", driver="ESRI Shapefile") import gc gc.collect() from geopandas import GeoDataFrame final_observations = GeoDataFrame.from_file("../data/bias_grid/final_observations/") final_observations.head() x_min, y_min, x_max, y_max = -180, -90, 180, 90 pixel_size = 0.0083333333 # changed from 0.083333333 to 30arcsec x_res = int((x_max - x_min) / pixel_size) y_res = int((y_max - y_min) / pixel_size) bias_grid=np.zeros(shape=(y_res, x_res)).astype('int32') def increase_pixel_value(row): bias_grid[np.abs(int((row.y - 90) / pixel_size)), np.abs(int((row.x + 180) / pixel_size))]+=1 here = final_observations.geometry.apply(lambda row: increase_pixel_value(row)) bias_grid.max() bias_grid.std() bias_grid.sum() # is the sum of the bias grid equal to the total number of observations? bias_grid.sum() == final_observations.shape[0] import gc gc.collect() # bias_grid_plus_1 = bias_grid + 1 bias_grid_log = np.log10(bias_grid + 1) # bias_grid_log[np.isneginf(bias_grid_log)] = 0 # is this a good idea, setting to 0? log10(0) = infinity otherwise bias_grid_log.max() bias_grid_log.min() bias_grid_log.std() bias_grid_standardized = (bias_grid - bias_grid.mean()) / bias_grid.std() bias_grid_standardized.max() bias_grid_standardized.min() bias_grid_standardized.std() bias_grid_minmax_scale = (bias_grid - bias_grid.min()) / (bias_grid.max() - bias_grid.min()) bias_grid_minmax_scale.max() bias_grid_minmax_scale.min() bias_grid_minmax_scale.std() import matplotlib.pyplot as plt plt.figure(figsize=(25,20)) plt.imshow(bias_grid_log, cmap="hot", interpolation="none") import pickle pickle.dump(bias_grid, open("../data/bias_grid/bias_grid_30arcsec.pkl", "wb")) np.count_nonzero(bias_grid) np.product(bias_grid.shape) 41881/9331200 bias_grid.shape import rasterio from rasterio.transform import Affine x_min, y_min, x_max, y_max = -180, -90, 180, 90 pixel_size = 0.083333333 x_res = int((x_max - x_min) / pixel_size) y_res = int((y_max - y_min) / pixel_size) crs = {'init': "EPSG:4326"} transform = Affine.translation(x_min, y_max) * Affine.scale(pixel_size, -pixel_size) with rasterio.open("../data/bias_grid/bias_grid_minmax_scale.tif", 'w', driver='GTiff', width=x_res, height=y_res, count=1, dtype=np.uint16, nodata=0, transform=transform, crs=crs) as out: out.write(bias_grid_minmax_scale.astype(np.uint16), indexes=1) out.close() pixel_size bias_grid_minmax_scale.std() bias_grid_log.shape import pickle pickle.dump(bias_grid_log, open("../data/bias_grid/bias_grid_log_30arcsec.pkl", "wb"), protocol=4) pickle.dump(bias_grid_standardized, open("../data/bias_grid/bias_grid_standardized.pkl", "wb")) pickle.dump(bias_grid_minmax_scale, open("../data/bias_grid/bias_grid_minmax_scale.pkl", "wb")) # bias_grid_mm=np.zeros(shape=(y_res, x_res)).astype('int32') bias_grid_mm = np.memmap("../data/bias_grid/bias_grid_mm.dat", dtype='int32', mode='w+', shape=(y_res,x_res)) def increase_pixel_value(row): bias_grid_mm[np.abs(int((row.y - 90) / pixel_size)), np.abs(int((row.x + 180) / pixel_size))]+=1 here = final_observations.geometry.apply(lambda row: increase_pixel_value(row)) bias_grid_mm.flush() bias_grid_mm.max() bias_grid_mm.std() del bias_grid_mm gc.collect() fpr = np.memmap("../data/bias_grid/bias_grid_mm.dat", dtype='int32', mode='r', shape=(y_res, x_res)) fpr.max() fpr.flags.writeable anything = np.memmap("/home/daniela/git/iSDM/data/GLWD/downscaled/original_corrected.tif", dtype='uint8', mode='r', shape=(y_res,x_res)) np.unique(anything) type(anything) gc.collect() del anything some_data = np.memmap("../data/bias_grid/some_data.tif", dtype='float64', mode='w+', shape=(y_res,x_res)) type(some_data) some_data.flags.writeable isinstance(some_data, np.ndarray) some_data.shape some_data[:,:] = 1 some_data._mmap some_data <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 1.2. Check sites against John's spreadsheet Step2: This looks promising. With a bit of luck, I can just move the four sites listed above from the EXCLUDED project back into ICPW_TOCTRENDS_2015_US_LTM and everything will be OK... Step3: So, there are four stations (rows 60, 69, 70 and 71 in the table above) currently included in the ICPWaters US project that should be removed. There are also four stations (rows 95 to 98 above) that are present in John's spreadsheet but not associated with the project. Of these, two of them (Middle Pond and Tunk) are already in the database but associated with the ICPW_TOCTRENDS_2015_US_LTM project only, while one of them (Sucker) has been shifted to the ICPW_TOCTRENDS_2015_US_LTM_EXCLUDED project. The remaining site (Barnes Lake) does not appear anywhere in the database as far as I can tell. Step4: 1.3. The Mystery of Mud Pond Step5: So, the incorrect site has been used in 16 different projects, most (all?) of which are linked to ICP Waters in some way. The only project using the correct site is ICPW_TOCTRENDS_2015_US_LTM. Before I can delete the incorrect site, I need to switch all these other projects to use station_id=37063 instead. Step6: It looks as though there is at least some overlap here, but it's not easy to see what's going on without some plots. Let's pick a commonly measured parameter ($Cl^-$ in $mg/l$) and plot the values for each site to see whether they're actually the same. Step7: It's pretty clear that this is the same data series (although something weird happens in 1996 - use the pan and zoom tools to see this). However, there appears to be no consistent pattern regarding which samples have been assigned to which site. The 1996 discrepancy seems pretty minor, so the best way to fix this is probably as follows Step8: Now plot the results for chloride again to make sure it's worked. Step9: This looks OK, so I can now delete all the remaining records associated with station_id=23709... Step10: Note the Unicode warning here, highlighting that some matches on the name column have failed due to issues with special text characters, so the results are probably a worst case scenario. Nevertheless, some of the sites match exactly. Let's see how many matches and partial matches we have. Step11: The script above uses RESA2 codes taken from John's spreadsheet. For the most part these are the old codes (e.g. US26) associated with the original ICPWaters US project (rather than the more recent codes, such as X15 Step12: This approach manages to match most (but not necessarily all) of the US sites using th X15
<ASSISTANT_TASK:> Python Code: # Import custom functions and connect to db resa2_basic_path = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Upload_Template' r'\useful_resa2_code.py') resa2_basic = imp.load_source('useful_resa2_code', resa2_basic_path) engine, conn = resa2_basic.connect_to_resa2() # Check numbers of sites proj_codes = [1679, # ICPWaters US 3870, # ICPW_TOCTRENDS_2015_US_LTM 4150] # ICPW_TOCTRENDS_2015_US_LTM_EXCLUDED for code in proj_codes: # Read stations list sql = ('SELECT * ' 'FROM resa2.projects_stations ' 'WHERE project_id = %s' % code) df = pd.read_sql_query(sql, engine) print code, len(df) # Read John's spreadsheet in_xls = (r'C:\Data\James_Work\Staff\Heleen_d_W\ICP_Waters\Call_for_Data_2016' '\Replies\usa\U.S.Site.Reconciliation.August.2016.xlsx') j_df = pd.read_excel(in_xls, sheetname='Sheet1') print 'John, core ICPW sites: ', len(j_df[j_df['INCLUDE IN ICP WATERS DATABASE']=='YES']) print 'John, DOC trends sites:', len(j_df[j_df['INCLUDE IN ICP DOC ANALYSIS']=='YES']) # Get the 95 'ICPWaters US' sites from RESA2 sql = ('SELECT * FROM resa2.stations ' 'WHERE station_id IN (SELECT station_id ' 'FROM resa2.projects_stations ' 'WHERE project_id = 1679)') core_df = pd.read_sql_query(sql, engine) # Outer join based on RESA2 station code df = core_df.merge(j_df[j_df['INCLUDE IN ICP WATERS DATABASE']=='YES'], how='outer', left_on='station_code', right_on='Station Code') # Cols of interest cols = ['station_id', 'station_code', 'station_name', 'latitude', 'longitude', 'Station Code', 'NFC_SITEID', 'NFC_SITENAME', 'Latitude', 'Longitude'] df = df[cols] # Get entries where no match was found df[pd.isnull(df['station_code']) | pd.isnull(df['Station Code'])] # Check numbers of sites proj_codes = [1679, # ICPWaters US 3870] # ICPW_TOCTRENDS_2015_US_LTM for code in proj_codes: # Read stations list sql = ('SELECT * ' 'FROM resa2.projects_stations ' 'WHERE project_id = %s' % code) df = pd.read_sql_query(sql, engine) print code, len(df) # Find projects using the different versions of Mud Pond for site_id in [23709, 37063]: sql = ('SELECT project_id, project_name ' 'FROM resa2.projects ' 'WHERE project_id IN (SELECT project_id ' 'FROM resa2.projects_stations ' 'WHERE station_id = %s)' % site_id) df = pd.read_sql_query(sql, engine) print 'Projects using station_id %s:\n' % site_id print df print '\n' # Read sample info for incorrect site sql = ('SELECT water_sample_id, station_id, sample_date ' 'FROM resa2.water_samples ' 'WHERE station_id = 23709') df_23709 = pd.read_sql_query(sql, engine) # Read sample info for correct site sql = ('SELECT water_sample_id, station_id, sample_date ' 'FROM resa2.water_samples ' 'WHERE station_id = 37063') df_37063 = pd.read_sql_query(sql, engine) # Outer join df = df_23709.merge(df_37063, how='outer', on='sample_date', suffixes=['_23709', '_37063']) print 'Number of samples associated with site 23709:', len(df_23709) print 'Number of samples associated with site 37063:', len(df_37063) print 'Number of samples with common dates: ', len(df.dropna(how='any')) # Site 23709 sql = ('SELECT sample_id, value ' 'FROM resa2.water_chemistry_values2 ' 'WHERE method_id = 10253 ' 'AND sample_id IN (SELECT water_sample_id ' 'FROM resa2.water_samples ' 'WHERE station_id = 23709)') df_23709 = pd.read_sql_query(sql, engine) # Join dates df_23709 = df_23709.merge(df, how='left', left_on='sample_id', right_on='water_sample_id_23709') # Tidy df df_23709.sort_values(by='sample_date', inplace=True, ascending='True') df_23709.index = df_23709['sample_date'] df_23709 = df_23709[['value']] df_23709.columns = ['site_23709'] # Site 37063 sql = ('SELECT sample_id, value ' 'FROM resa2.water_chemistry_values2 ' 'WHERE method_id = 10253 ' 'AND sample_id IN (SELECT water_sample_id ' 'FROM resa2.water_samples ' 'WHERE station_id = 37063)') df_37063 = pd.read_sql_query(sql, engine) # Join dates df_37063 = df_37063.merge(df, how='left', left_on='sample_id', right_on='water_sample_id_37063') # Tidy df df_37063.sort_values(by='sample_date', inplace=True, ascending='True') df_37063.index = df_37063['sample_date'] df_37063 = df_37063[['value']] df_37063.columns = ['site_37063'] # Merge merged_df = df_23709.merge(df_37063, how='outer', left_index=True, right_index=True) print 'Total number of samples in merged series:', len(merged_df) # Plot merged_df.plot(figsize=(12, 8)) plt.title('Chloride concentration in mg/l at two Mud Ponds', fontsize=20) plt.tight_layout() mpld3.display() # Identify samples associated ONLY with site 23709 trans_df = df[pd.isnull(df['station_id_37063'])] # These need transferring to site 37063 sql = ('UPDATE resa2.water_samples ' 'SET station_id = 37063 ' 'WHERE water_sample_id IN %s' % str(tuple(trans_df['water_sample_id_23709'].map(int)))) result = conn.execute(sql) # Site 23709 # Get dates sql = ('SELECT water_sample_id, sample_date ' 'FROM resa2.water_samples ' 'WHERE station_id = 23709') dates_23709 = pd.read_sql_query(sql, engine) sql = ('SELECT sample_id, value ' 'FROM resa2.water_chemistry_values2 ' 'WHERE method_id = 10253 ' 'AND sample_id IN (SELECT water_sample_id ' 'FROM resa2.water_samples ' 'WHERE station_id = 23709)') df_23709 = pd.read_sql_query(sql, engine) # Join dates df_23709 = df_23709.merge(dates_23709, how='left', left_on='sample_id', right_on='water_sample_id') # Tidy df df_23709.sort_values(by='sample_date', inplace=True, ascending='True') df_23709.index = df_23709['sample_date'] df_23709 = df_23709[['value']] df_23709.columns = ['site_23709'] # Site 37063 # Get dates sql = ('SELECT water_sample_id, sample_date ' 'FROM resa2.water_samples ' 'WHERE station_id = 37063') dates_37063 = pd.read_sql_query(sql, engine) sql = ('SELECT sample_id, value ' 'FROM resa2.water_chemistry_values2 ' 'WHERE method_id = 10253 ' 'AND sample_id IN (SELECT water_sample_id ' 'FROM resa2.water_samples ' 'WHERE station_id = 37063)') df_37063 = pd.read_sql_query(sql, engine) # Join dates df_37063 = df_37063.merge(dates_37063, how='left', left_on='sample_id', right_on='water_sample_id') # Tidy df df_37063.sort_values(by='sample_date', inplace=True, ascending='True') df_37063.index = df_37063['sample_date'] df_37063 = df_37063[['value']] df_37063.columns = ['site_37063'] # Merge merged_df = df_23709.merge(df_37063, how='outer', left_index=True, right_index=True) # Plot merged_df.plot(subplots=True, sharey=True, figsize=(12, 8)) plt.tight_layout() mpld3.display() # Read stations table sql = ('SELECT station_id, station_code, station_name, latitude, longitude ' 'FROM resa2.stations') stn_df = pd.read_sql_query(sql, engine) stn_df.head() # Get a subset of columns from John's spreadsheet us_df = j_df[['Station Code', 'Station name', 'Suggested Name', 'Active', 'INCLUDE IN ICP WATERS DATABASE', 'INCLUDE IN ICP DOC ANALYSIS', 'NFC_SITEID', 'NFC_SITENAME', 'Latitude', 'Longitude']] # Rename columns us_df.columns = ['station_code', 'station_name', 'suggested_name', 'active', 'core', 'trend', 'nfc_code', 'nfc_name', 'latitude', 'longitude'] us_df.head() # Dict to store results res_dict = {'stn_code':[], 'code_present':[], 'name_ok':[], 'lat_ok':[], 'lon_ok':[]} # Iterate over spreadsheet for idx in range(len(us_df)): # Get stn id stn_cd = us_df.ix[idx, 'station_code'] name = us_df.ix[idx, 'suggested_name'] # These are John's ideal names lat = us_df.ix[idx, 'latitude'] lon = us_df.ix[idx, 'longitude'] # Check stations table q_res = stn_df.query('station_code == @stn_cd') if len(q_res) == 0: # The site isn't present with this code res_dict['stn_code'].append(stn_cd) res_dict['code_present'].append(0) res_dict['name_ok'].append(np.nan) res_dict['lat_ok'].append(np.nan) res_dict['lon_ok'].append(np.nan) elif len(q_res) == 1: # Check the site properties res_dict['stn_code'].append(stn_cd) res_dict['code_present'].append(1) res_dict['name_ok'].append(name == q_res.station_name.values[0]) res_dict['lat_ok'].append(lat == q_res.latitude.values[0]) res_dict['lon_ok'].append(lon == q_res.longitude.values[0]) else: # This should be impossible raise ValueError('Site code %s appears to be duplicated.' % stn_cd) # Build df res_df = pd.DataFrame(res_dict) res_df = res_df[['stn_code', 'code_present', 'name_ok', 'lat_ok', 'lon_ok']] res_df.head() # How many matches? print 'Total number of records in spreadsheet: ', len(res_df) print 'Of these...' print 'Number matched by code: ', len(res_df.query('code_present==1')) print 'Number matched by code with correct lat and lon: ', len(res_df.query('lat_ok==1 & lon_ok==1')) print 'Number matched by code with correct name, lat and lon:', len(res_df.query('name_ok==1 & lat_ok==1 & lon_ok==1')) # Dict to store results res_dict = {'nfc_code':[], 'code_present':[], 'name_ok':[], 'lat_ok':[], 'lon_ok':[]} # Iterate over spreadsheet for idx in range(len(us_df)): # Get stn id nfc_cd = us_df.ix[idx, 'nfc_code'] name = us_df.ix[idx, 'nfc_name'] # These are John's ideal names lat = us_df.ix[idx, 'latitude'] lon = us_df.ix[idx, 'longitude'] # Check stations table. Need to add 'X15:' and allow for variants q_res = stn_df[(stn_df['station_code']=='X15:%s' % nfc_cd) | (stn_df['station_code']=='X15:%s' % nfc_cd[1:]) | (stn_df['station_code']=='X15:%s' % nfc_cd[:-1])] if len(q_res) == 0: # The site isn't present with this code res_dict['nfc_code'].append(nfc_cd) res_dict['code_present'].append(0) res_dict['name_ok'].append(np.nan) res_dict['lat_ok'].append(np.nan) res_dict['lon_ok'].append(np.nan) elif len(q_res) == 1: # Check the site properties res_dict['nfc_code'].append(nfc_cd) res_dict['code_present'].append(1) res_dict['name_ok'].append(name == q_res.station_name.values[0]) res_dict['lat_ok'].append(lat == q_res.latitude.values[0]) res_dict['lon_ok'].append(lon == q_res.longitude.values[0]) else: # This should be impossible raise ValueError('Site code %s appears to be duplicated.' % stn_cd) # Build df res_df = pd.DataFrame(res_dict) res_df = res_df[['nfc_code', 'code_present', 'name_ok', 'lat_ok', 'lon_ok']] res_df.head() # How many matches? print 'Total number of records in spreadsheet: ', len(res_df) print 'Of these...' print 'Number matched by code: ', len(res_df.query('code_present==1')) print 'Number matched by code with correct lat and lon: ', len(res_df.query('lat_ok==1 & lon_ok==1')) print 'Number matched by code with correct name, lat and lon:', len(res_df.query('name_ok==1 & lat_ok==1 & lon_ok==1')) # Filter dfs match_df = res_df.query('code_present==1') trend_df = us_df.query('trend == "YES"') # Join trend_df = trend_df.merge(match_df, how='left', on='nfc_code') # Identify records with no match trend_df[pd.isnull(trend_df['code_present'])] <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Softmax regression Step2: Training Step3: Validation
<ASSISTANT_TASK:> Python Code: from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("data/MNIST",one_hot=True) import tensorflow as tf # Setup our Input placeholder x = tf.placeholder(tf.float32, [None, 784]) # Our Weights and Biases W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x,W) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32,[None,10]) # The following is considered numerically unstable, using the line above instead # cross_entropy = tf.reduce_mean(-(tf.reduce_sum(y_ * tf.log(y),reduction_indices=[1]))) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # We train our model with batches of random data for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step,feed_dict={x:batch_xs,y_:batch_ys}) # We validate our model by casting our boolean (right/wrong) into integers and taking the mean correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels})) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Reading CSV File into Pandas DataFrame Step2: I have a few things to do, first i want to flip the positions of columns with the of row index variables. Step3: Issues with the way the data is stored in the DataFrame object, first of all since the data includes signs such as commas, and currency signs, its presented as a string format. Step4: The line above returns the desired result removing, the varchars from the cells and returning their values as intergers that can be computed and analayzed further. Step5: The line above repeats the same process for the 'Life Expectancy' Column and returns their values as intergers. The only diference is there is no replacement of varchars since the cells do not contain other than numberical characters. Step6: What is returned after cells are cleaned and returned as computable data, is a clean DataFrame object. Step7: <img src="incomearea1.png" alt="Income Area Graph"> Step8: <img src="incomebar1.png" alt="Income Area Graph"> Step9: <img src="income_scatter.png" alt="Income Scatter Graph"> Step10: <img src="bar2income.png" alt="Income Bar Graph">
<ASSISTANT_TASK:> Python Code: import pandas as pd import csv import matplotlib.pyplot as plt income_df = pd.read_csv('LifeExpectancyIncome.csv') income_df income_df = income_df.set_index('Country').transpose() income_df income_df['Income / Person'] = income_df['Income / Person'].str.replace(r'[^-+\d.]', '').astype(int) income_df['Life Expectancy'] = income_df['Life Expectancy'].astype(int) income_df income_df.plot(kind='area') plt.show() income_df.plot(kind='bar', title='Income vs Life Expectancy') plt.show() #bar graph income_df.plot(x = 'Income / Person',y='Life Expectancy', kind='scatter') plt.show() #scatter chart plt.figure(figsize=(9, 6)) income_df = income_df.drop('Ethiopia') income_df.plot(x='Income / Person', y='Life Expectancy', kind='bar', stacked=True) plt.show() protein_df = pd.read_csv('HighProteinFoods.csv') protein_df.stack() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: As usual, we import everything we need. Step2: First, we load and preprocess the data. We use runs 6, 10, and 14 from Step3: Now we can create 5s epochs around events of interest. Step4: Here we set suitable values for computing ERDS maps. Step5: Finally, we perform time/frequency decomposition over all epochs. Step6: Similar to ~mne.Epochs objects, we can also export data from Step7: This allows us to use additional plotting functions like Step8: Having the data as a DataFrame also facilitates subsetting,
<ASSISTANT_TASK:> Python Code: # Authors: Clemens Brunner <clemens.brunner@gmail.com> # Felix Klotzsche <klotzsche@cbs.mpg.de> # # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import TwoSlopeNorm import pandas as pd import seaborn as sns import mne from mne.datasets import eegbci from mne.io import concatenate_raws, read_raw_edf from mne.time_frequency import tfr_multitaper from mne.stats import permutation_cluster_1samp_test as pcluster_test fnames = eegbci.load_data(subject=1, runs=(6, 10, 14)) raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames]) raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3)) tmin, tmax = -1, 4 event_ids = dict(hands=2, feet=3) # map event IDs to tasks epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5, picks=('C3', 'Cz', 'C4'), baseline=None, preload=True) freqs = np.arange(2, 36) # frequencies from 2-35Hz vmin, vmax = -1, 1.5 # set min and max ERDS values in plot baseline = [-1, 0] # baseline interval (in s) cnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1.5) # min, center, and max ERDS kwargs = dict(n_permutations=100, step_down_p=0.05, seed=1, buffer_size=None, out_type='mask') # for cluster test tfr = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs, use_fft=True, return_itc=False, average=False, decim=2) tfr.crop(tmin, tmax).apply_baseline(baseline, mode="percent") for event in event_ids: # select desired epochs for visualization tfr_ev = tfr[event] fig, axes = plt.subplots(1, 4, figsize=(12, 4), gridspec_kw={"width_ratios": [10, 10, 10, 1]}) for ch, ax in enumerate(axes[:-1]): # for each channel # positive clusters _, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch], tail=1, **kwargs) # negative clusters _, c2, p2, _ = pcluster_test(tfr_ev.data[:, ch], tail=-1, **kwargs) # note that we keep clusters with p <= 0.05 from the combined clusters # of two independent tests; in this example, we do not correct for # these two comparisons c = np.stack(c1 + c2, axis=2) # combined clusters p = np.concatenate((p1, p2)) # combined p-values mask = c[..., p <= 0.05].any(axis=-1) # plot TFR (ERDS map with masking) tfr_ev.average().plot([ch], cmap="RdBu", cnorm=cnorm, axes=ax, colorbar=False, show=False, mask=mask, mask_style="mask") ax.set_title(epochs.ch_names[ch], fontsize=10) ax.axvline(0, linewidth=1, color="black", linestyle=":") # event if ch != 0: ax.set_ylabel("") ax.set_yticklabels("") fig.colorbar(axes[0].images[-1], cax=axes[-1]) fig.suptitle(f"ERDS ({event})") plt.show() df = tfr.to_data_frame(time_format=None) df.head() df = tfr.to_data_frame(time_format=None, long_format=True) # Map to frequency bands: freq_bounds = {'_': 0, 'delta': 3, 'theta': 7, 'alpha': 13, 'beta': 35, 'gamma': 140} df['band'] = pd.cut(df['freq'], list(freq_bounds.values()), labels=list(freq_bounds)[1:]) # Filter to retain only relevant frequency bands: freq_bands_of_interest = ['delta', 'theta', 'alpha', 'beta'] df = df[df.band.isin(freq_bands_of_interest)] df['band'] = df['band'].cat.remove_unused_categories() # Order channels for plotting: df['channel'] = df['channel'].cat.reorder_categories(('C3', 'Cz', 'C4'), ordered=True) g = sns.FacetGrid(df, row='band', col='channel', margin_titles=True) g.map(sns.lineplot, 'time', 'value', 'condition', n_boot=10) axline_kw = dict(color='black', linestyle='dashed', linewidth=0.5, alpha=0.5) g.map(plt.axhline, y=0, **axline_kw) g.map(plt.axvline, x=0, **axline_kw) g.set(ylim=(None, 1.5)) g.set_axis_labels("Time (s)", "ERDS (%)") g.set_titles(col_template="{col_name}", row_template="{row_name}") g.add_legend(ncol=2, loc='lower center') g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.08) df_mean = (df.query('time > 1') .groupby(['condition', 'epoch', 'band', 'channel'])[['value']] .mean() .reset_index()) g = sns.FacetGrid(df_mean, col='condition', col_order=['hands', 'feet'], margin_titles=True) g = (g.map(sns.violinplot, 'channel', 'value', 'band', n_boot=10, palette='deep', order=['C3', 'Cz', 'C4'], hue_order=freq_bands_of_interest, linewidth=0.5) .add_legend(ncol=4, loc='lower center')) g.map(plt.axhline, **axline_kw) g.set_axis_labels("", "ERDS (%)") g.set_titles(col_template="{col_name}", row_template="{row_name}") g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.3) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: TensorFlowใ‚ขใƒ‰ใ‚ชใƒณใ‚ชใƒ—ใƒ†ใ‚ฃใƒžใ‚คใ‚ถ๏ผšConditionalGradient Step2: ใƒขใƒ‡ใƒซใฎๆง‹็ฏ‰ Step3: ใƒ‡ใƒผใ‚ฟใฎๆบ–ๅ‚™ Step5: ใ‚ซใ‚นใ‚ฟใƒ ใ‚ณใƒผใƒซใƒใƒƒใ‚ฏ้–ขๆ•ฐใฎๅฎš็พฉ Step6: ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใจ่ฉ•ไพก Step7: ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใจ่ฉ•ไพก Step8: ้‡ใฟใฎใƒ•ใƒญใƒ™ใƒ‹ใ‚ฆใ‚นใƒŽใƒซใƒ  Step9: ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใจๆคœ่จผใฎ็ฒพๅบฆ๏ผšCGใจSGDใฎๆฏ”่ผƒ
<ASSISTANT_TASK:> Python Code: #@title Licensed under the Apache License, Version 2.0 # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. !pip install -U tensorflow-addons import tensorflow as tf import tensorflow_addons as tfa from matplotlib import pyplot as plt # Hyperparameters batch_size=64 epochs=10 model_1 = tf.keras.Sequential([ tf.keras.layers.Dense(64, input_shape=(784,), activation='relu', name='dense_1'), tf.keras.layers.Dense(64, activation='relu', name='dense_2'), tf.keras.layers.Dense(10, activation='softmax', name='predictions'), ]) # Load MNIST dataset as NumPy arrays dataset = {} num_validation = 10000 (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() # Preprocess the data x_train = x_train.reshape(-1, 784).astype('float32') / 255 x_test = x_test.reshape(-1, 784).astype('float32') / 255 def frobenius_norm(m): This function is to calculate the frobenius norm of the matrix of all layer's weight. Args: m: is a list of weights param for each layers. total_reduce_sum = 0 for i in range(len(m)): total_reduce_sum = total_reduce_sum + tf.math.reduce_sum(m[i]**2) norm = total_reduce_sum**0.5 return norm CG_frobenius_norm_of_weight = [] CG_get_weight_norm = tf.keras.callbacks.LambdaCallback( on_epoch_end=lambda batch, logs: CG_frobenius_norm_of_weight.append( frobenius_norm(model_1.trainable_weights).numpy())) # Compile the model model_1.compile( optimizer=tfa.optimizers.ConditionalGradient( learning_rate=0.99949, lambda_=203), # Utilize TFA optimizer loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) history_cg = model_1.fit( x_train, y_train, batch_size=batch_size, validation_data=(x_test, y_test), epochs=epochs, callbacks=[CG_get_weight_norm]) model_2 = tf.keras.Sequential([ tf.keras.layers.Dense(64, input_shape=(784,), activation='relu', name='dense_1'), tf.keras.layers.Dense(64, activation='relu', name='dense_2'), tf.keras.layers.Dense(10, activation='softmax', name='predictions'), ]) SGD_frobenius_norm_of_weight = [] SGD_get_weight_norm = tf.keras.callbacks.LambdaCallback( on_epoch_end=lambda batch, logs: SGD_frobenius_norm_of_weight.append( frobenius_norm(model_2.trainable_weights).numpy())) # Compile the model model_2.compile( optimizer=tf.keras.optimizers.SGD(0.01), # Utilize SGD optimizer loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=['accuracy']) history_sgd = model_2.fit( x_train, y_train, batch_size=batch_size, validation_data=(x_test, y_test), epochs=epochs, callbacks=[SGD_get_weight_norm]) plt.plot( CG_frobenius_norm_of_weight, color='r', label='CG_frobenius_norm_of_weights') plt.plot( SGD_frobenius_norm_of_weight, color='b', label='SGD_frobenius_norm_of_weights') plt.xlabel('Epoch') plt.ylabel('Frobenius norm of weights') plt.legend(loc=1) plt.plot(history_cg.history['accuracy'], color='r', label='CG_train') plt.plot(history_cg.history['val_accuracy'], color='g', label='CG_test') plt.plot(history_sgd.history['accuracy'], color='pink', label='SGD_train') plt.plot(history_sgd.history['val_accuracy'], color='b', label='SGD_test') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(loc=4) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 1. Target data format Step2: 2. Target data encoding Step3: Perfomance measure Step4: 4. Model definition Step5: Now that we have replaced the relevant accuracy measures and loss function, our training process is exactly the same, meaning we can run the same training process and plotting code to visualize the results. The only difference is that with classificiation we are using an accuracy rather than an error measure, so the better our model is performing, the higher the graph should be (higher accuracy is better, while lower error is better).
<ASSISTANT_TASK:> Python Code: %matplotlib inline import math import random import seaborn as sns import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_boston '''Since this is a classification problem, we will need to represent our targets as one-hot encoding vectors (see previous lab). To do this we will use scikit-learn's OneHotEncoder module which we import here''' from sklearn.preprocessing import OneHotEncoder import numpy as np import tensorflow as tf sns.set(style="ticks", color_codes=True) dataset = load_boston() houses = pd.DataFrame(dataset.data, columns=dataset.feature_names) houses['target'] = dataset.target # WRITE CODE TO CONVERT 'TARGET' COLUMN FROM CONTINUOUS TO CATEGORICAL mean = np.mean(houses['target']) houses['target'] = (houses['target'] > mean).astype(int) '''check your work''' print np.max(houses['target']), "<-- should be 1" print np.min(houses['target']), "<-- should be 0" houses_array = houses.as_matrix().astype(float) np.random.shuffle(houses_array) X = houses_array[:, :-1] y = houses_array[:, -1] # USE SCIKIT-LEARN'S ONE-HOT ENCODING MODULE TO # CONVERT THE y ARRAY OF TARGETS TO ONE-HOT ENCODING. # convert the list of targets to a vertical matrix with the dimensions [1 x number of samples] # this is necessary for later computation y = y.reshape(-1,1) # create an instance of the one-hot encoding function from the sci-kit learn library enc = OneHotEncoder() # use the function to figure out how many categories exist in the data enc.fit(y) # convert the target data to one-hot encoding y = enc.transform(y).toarray() X = X / X.max(axis=0) trainingSplit = int(.7 * houses_array.shape[0]) X_train = X[:trainingSplit] y_train = y[:trainingSplit] X_test = X[trainingSplit:] y_test = y[trainingSplit:] print('Training set', X_train.shape, y_train.shape) print('Test set', X_test.shape, y_test.shape) '''check your work''' print y_train.shape[1], "<-- should be 2" print y_test.shape[1], "<-- should be 2" print y_train[0], "<-- should be either [0. 1.] or [1. 0.]" # helper variables num_samples = X_train.shape[0] num_features = X_train.shape[1] num_outputs = y_train.shape[1] # Hyper-parameters batch_size = 30 num_hidden_1 = 30 num_hidden_2 = 28 learning_rate = 0.08 training_epochs = 400 dropout_keep_prob = 0.75 # 0.5 # set to no dropout by default # variable to control the resolution at which the training results are stored display_step = 1 def accuracy(predictions, targets): # IMPLEMENT THE NEW ACCURACY MEASURE HERE accuracy = float(np.sum(np.argmax(predictions, 1) == np.argmax(targets, 1))) / float(len(predictions)) * 100 return accuracy def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) graph = tf.Graph() with graph.as_default(): x = tf.placeholder(tf.float32, shape=(None, num_features)) _y = tf.placeholder(tf.float32, shape=(None)) keep_prob = tf.placeholder(tf.float32) tf_X_test = tf.constant(X_test, dtype=tf.float32) tf_X_train = tf.constant(X_train, dtype=tf.float32) W_fc1 = weight_variable([num_features, num_hidden_1]) b_fc1 = bias_variable([num_hidden_1]) W_fc2 = weight_variable([num_hidden_1, num_hidden_2]) b_fc2 = bias_variable([num_hidden_2]) W_fc3 = weight_variable([num_hidden_2, num_outputs]) b_fc3 = bias_variable([num_outputs]) def model(data, keep): fc1 = tf.nn.relu(tf.matmul(data, W_fc1) + b_fc1) fc1_drop = tf.nn.dropout(fc1, keep) fc2 = tf.nn.relu(tf.matmul(fc1_drop, W_fc2) + b_fc2) fc2_drop = tf.nn.dropout(fc2, keep) fc3 = tf.matmul(fc2_drop, W_fc3) + b_fc3 return fc3 '''for our loss function we still want to get the raw outputs of the model, but since it no longer represents the actual prediction we rename the variable to โ€˜outputโ€™''' output = model(x, keep_prob) # WHEN WE CALCULATE THE PREDICTIONS, WE NEED TO WRAP EACH OUTPUT IN A # tf.nn.softmax() FUNCTION. THE FIRST ONE HAS BEEN DONE FOR YOU: prediction = tf.nn.softmax(output) test_prediction = model(tf_X_test, 1.0) train_prediction = model(tf_X_train, 1.0) '''finally, we replace our previous MSE cost function with the cross-entropy function included in Tensorflow. This function takes in the raw output of the network and calculates the average loss with the target''' loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, _y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) saver = tf.train.Saver() results = [] with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print('Initialized') for epoch in range(training_epochs): indexes = range(num_samples) random.shuffle(indexes) for step in range(int(math.floor(num_samples/float(batch_size)))): offset = step * batch_size batch_data = X_train[indexes[offset:(offset + batch_size)]] batch_labels = y_train[indexes[offset:(offset + batch_size)]] feed_dict = {x : batch_data, _y : batch_labels, keep_prob: dropout_keep_prob} _, l, p = session.run([optimizer, loss, prediction], feed_dict=feed_dict) if (epoch % display_step == 0): batch_acc = accuracy(p, batch_labels) train_acc = accuracy(train_prediction.eval(session=session), y_train) test_acc = accuracy(test_prediction.eval(session=session), y_test) results.append([epoch, batch_acc, train_acc, test_acc]) save_path = saver.save(session, "model_houses_classification.ckpt") print("Model saved in file: %s" % save_path) df = pd.DataFrame(data=results, columns = ["epoch", "batch_acc", "train_acc", "test_acc"]) df.set_index("epoch", drop=True, inplace=True) fig, ax = plt.subplots(1, 1, figsize=(10, 4)) ax.plot(df) ax.set(xlabel='Epoch', ylabel='Error', title='Training result') ax.legend(df.columns, loc=1) print "Maximum test accuracy: %.2f%%" % np.max(df["test_acc"]) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Table 3- New Members of Taurus Step2: Save the data tables locally.
<ASSISTANT_TASK:> Python Code: import warnings warnings.filterwarnings("ignore") import pandas as pd names = ["Name_2MASS", "RA", "Dec", "Spectral Type", "Membership", "Teff", "AJ", "Lbol", "I", "I-zp","J-H","H-Ks", "Ks", "inIMF", "Night"] tbl3 = pd.read_csv("http://iopscience.iop.org/0004-637X/617/2/1216/fulltext/60509.tb3.txt", sep='\t', names=names) tbl3 tbl3.to_csv("../data/Luhman2004c/tbl3.csv", sep="\t") <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: B Step2: C Step3: D Step4: E
<ASSISTANT_TASK:> Python Code: def urn_to_dict(urn_list): urn_dict = {} ### BEGIN SOLUTION ### END SOLUTION return urn_dict u1 = ["green", "green", "blue", "green"] a1 = set({("green", 3), ("blue", 1)}) assert a1 == set(urn_to_dict(u1).items()) u2 = ["red", "blue", "blue", "green", "yellow", "black", "black", "green", "blue", "yellow", "red", "green", "blue", "black", "yellow", "yellow", "yellow", "green", "blue", "red", "red", "blue", "red", "blue", "yellow", "yellow", "yellow"] a2 = set({('black', 3), ('blue', 7), ('green', 4), ('red', 5), ('yellow', 8)}) assert a2 == set(urn_to_dict(u2).items()) def chances_of_color(counts, query): prob = 0.0 ### BEGIN SOLUTION ### END SOLUTION return prob import numpy.testing as t c1 = {"blue": 3, "red": 1} t.assert_allclose(chances_of_color(c1, "blue"), 0.75) import numpy.testing as t c2 = {"red": 934, "blue": 493859, "yellow": 31, "green": 3892, "black": 487} t.assert_allclose(chances_of_color(c2, "green"), 0.007796427505443677) import numpy.testing as t c3 = {"red": 5, "blue": 5, "yellow": 5, "green": 5, "black": 5} t.assert_allclose(chances_of_color(c2, "orange"), 0.0) def chances_of_not_color(counts, query): prob = 0.0 ### BEGIN SOLUTION ### END SOLUTION return prob import numpy.testing as t c1 = {"blue": 3, "red": 1} t.assert_allclose(chances_of_not_color(c1, "blue"), 0.25) import numpy.testing as t c2 = {"red": 934, "blue": 493859, "yellow": 31, "green": 3892, "black": 487} t.assert_allclose(chances_of_not_color(c2, "blue"), 0.010705063871811693) import numpy.testing as t c3 = {"red": 5, "blue": 5, "yellow": 5, "green": 5, "black": 5} t.assert_allclose(chances_of_not_color(c2, "orange"), 1.0) import itertools def select_chances(urn_list, number, color): prob = 0.0 ### BEGIN SOLUTION ### END SOLUTION return prob import numpy.testing as t q1 = ["blue", "green", "red"] t.assert_allclose(select_chances(q1, 2, "red"), 2/3) q2 = ["red", "blue", "blue", "green", "yellow", "black", "black", "green", "blue", "yellow", "red", "green", "blue", "black", "yellow", "yellow", "yellow", "green", "blue", "red", "red", "blue", "red", "blue", "yellow", "yellow", "yellow"] t.assert_allclose(select_chances(q2, 3, "red"), 0.4735042735042735) import itertools def select_chances_first(urn_list, number, color): prob = 0.0 ### BEGIN SOLUTION ### END SOLUTION return prob import numpy.testing as t q1 = ["blue", "green", "red"] t.assert_allclose(select_chances_first(q1, 2, "red"), 2/6) q2 = ["red", "blue", "blue", "green", "yellow", "black", "black", "green", "blue", "yellow", "red", "green", "blue", "black", "yellow", "yellow", "yellow", "green", "blue", "red", "red", "blue", "red", "blue", "yellow", "yellow", "yellow"] t.assert_allclose(select_chances_first(q2, 3, "red"), 0.18518518518518517) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: La primera columna es un entero con formato "AAAAMMDD" que vamos a ignorar. Las temperaturas estรกn medidas en dรฉcimas de grado Celsius, asรญ que hay que pasarlas a grados Celsius. Vamos a calcular tambiรฉn la temperatura media. Step2: Como vamos a ignorar la columna de las fechas tenemos que crear un dominio para el eje x. Simplemente construiremos un array de enteros desde 0 hasta 365. Step3: Supongamos que ahora queremos guardar nuestra tabla de datos en un archivo txt, para poder cargarlo ya modificado mรกs adelante. Una manera fรกcil de hacerlo serรญa con otra funciรณn de NumPy Step4: Ejercicio Step5: Ejemplo datos calidad del aire Step6: Cargando los datos Step7: Valores inexistentes Step8: Pintando los datos Step9: CO Step10: O3 Step11: ยฟCรณmo leerรญamos un archivo sin usar NumPy?
<ASSISTANT_TASK:> Python Code: !head ../data/temperaturas.csv # Esta lรญnea no funciona en Windows import numpy as np %matplotlib inline import matplotlib.pyplot as plt datos = np.loadtxt("../data/temperaturas.csv", skiprows=1, # Saltamos una lรญnea usecols=(1, 2, 3), # Solo columnas 2, 3 y 4 delimiter=',') # Separados por comas datos[:9] Tmax = datos[:, 1] / 10 Tmin = datos[:, 2] / 10 Tavg = (Tmax + Tmin) / 2 x = np.arange(366) matriz_datos = np.zeros([366, 4]) matriz_datos[:, 0] = x matriz_datos[:, 1] = Tmax matriz_datos[:, 2] = Tmin matriz_datos[:, 3] = Tavg print(matriz_datos[:10]) # np.savetxt('archivo_datos.txt', matriz_datos, fmt='%.5f', newline = '\r\n') import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.plot(x, Tavg, 'k') ax.set_xlim(0, 366) ax.fill_between(x, Tmin, Tmax, facecolor='#4f88b1', edgecolor='none') ax.set_title("Resรบmenes diarios") ax.set_xlabel("Dรญas") ax.set_ylabel("Temperatura (C)") from IPython.display import HTML HTML('<iframe src="http://www.mambiente.munimadrid.es/sica/scripts/index.php" \ width="700" height="400"></iframe>') # Linux command !head ../data/barrio_del_pilar-20160322.csv # Windows #!gc log.txt | select -first 10 # head # loading the data # ./data/barrio_del_pilar-20160322.csv data1 = np.genfromtxt('../data/barrio_del_pilar-20160322.csv', skip_header=3, delimiter=';', usecols=(2,3,4)) data1 np.mean(data1, axis=0) np.nanmean(data1, axis=0) # masking invalid data data1 = np.ma.masked_invalid(data1) np.mean(data1, axis=0) data2 = np.genfromtxt('../data/barrio_del_pilar-20151222.csv', skip_header=3, delimiter=';', usecols=(2,3,4)) data2 = np.ma.masked_invalid(data2) plt.plot(data1[:, 1], label='2016') plt.plot(data2[:, 1], label='2015') plt.legend() plt.hlines(200, 0, 200, linestyles='--') plt.ylim(0, 220) from IPython.display import HTML HTML('<iframe src="http://ccaa.elpais.com/ccaa/2015/12/24/madrid/1450960217_181674.html" width="700" height="400"></iframe>') # http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.convolve.html def moving_average(x, N=8): return np.convolve(x, np.ones(N)/N, mode='same') plt.plot(moving_average(data1[:, 0]), label='2016') plt.plot(moving_average(data2[:, 0]), label='2015') plt.hlines(10, 0, 250, linestyles='--') plt.ylim(0, 11) plt.legend() plt.plot(moving_average(data1[:, 2]), label='2016') #plt.plot(data1[:, 2]) plt.plot(moving_average(data2[:, 2]), label='2015') #plt.plot(data2[:, 2]) plt.hlines(180, 0, 250, linestyles='--') plt.ylim(0, 190) plt.legend() data_file = '../data/barrio_del_pilar-20151222.csv' data = [] with open(data_file) as f: # Saltamos las tres primeras lรญneas for ii in range(3): f.readline() for line in f: line_string = line line_list = line.split(';') date = line_list[0] hour = line_list[1] components_data = [] for c in line_list[2:]: if '-' not in c: components_data.append(float(c)) else: components_data.append(np.nan) data.append(components_data) print(np.array(data)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Objectif Step2: Creation des liste de successeurs Step3: Crรฉation des graphes Step4: Apprentissage Step5: Poids du graphe assignรฉs en random Step6: Fonction d'apprentissage Step7: Infรฉrence Step8: La mean average precision
<ASSISTANT_TASK:> Python Code: #IMPORT import numpy as np import copy import operator from tqdm import tqdm file_train = open("cascades_train.txt") file_test = open("cascades_test.txt") data_train=[] for i in file_train.readlines(): tab = [[float(j.split(":")[0]),float(j.split(":")[1])] for j in i.split(";")[:-1]] tab.sort(key=lambda x: x[1]) data_train.append(tab) data_test=[] for i in file_test.readlines(): tab = [[float(j.split(":")[0]),float(j.split(":")[1])] for j in i.split(";")[:-1]] tab.sort(key=lambda x: x[1]) data_test.append(tab) def getSuccsOfTab(tab): succ=[] for i in tab: succ_i=[] for j in tab: if(i[1]<j[1]): succ_i.append(j[0]) succ.append([i[0],succ_i]) return succ def unique(liste): seen=set() seen_add =seen.add return [x for x in liste if not(x in seen or seen_add(x))] succs_train = [] for line in data_train: succs_train.append(getSuccsOfTab(line)) succs_test = [] for line in data_test: succs_test.append(getSuccsOfTab(line)) #optimisation au niveau du temps possible def getGraph(succs): graph={} for h in succs: for i in h: try: for j in i[1]: graph[i[0]].append(j) graph[i[0]]=unique(graph[i[0]]) graph[i[0]].sort() except KeyError: graph[i[0]]=i[1] return graph graph_train = getGraph(succs_train) graph_test = getGraph(succs_test) #liste correspond a un D def getListPrec(liste): prec=[] for i in liste: if (i[1]>1): p=[] for j in liste: if (i[1]>j[1]): p.append(j[0]) prec.append((i[0],p)) return prec #listePrec obtenue avec getListPrec dun D def getProbaOfList(listePrec,graph_weight_d): a={} for i in listePrec: prod=1 for pre in i[1]: prod= prod *(1 - graph_weight_d[pre][i[0]]) a[i[0]]=1-prod return a #dicoPrec obtenue avec constructAllDicoPrec def getProbaOfDicoPrec(dicoPrec,graph_weight_d): a={} for i in dicoPrec: prod=1 for pre in dicoPrec[int(i)]: prod= prod *(1 - graph_weight_d[int(pre)][int(i)]) a[i]=1-prod return a #fonction permettant de savoir si il existe une infection de l'element u sur v, lors d'un episode D #si oui retourne les indices def existLinkUV(d,u,v): listeSucc = getSuccsOfTab(d) listeU = [i[0] for i in listeSucc] if u in listeU: indiceU = np.where(np.array(listeU)==u)[0][0] listeV = listeSucc[indiceU][1] if v in listeV: return True return False def constructDicoDsucc(d): dico1={} dico={} for i in d: dico1[i[0]]=i[1] dico[i[0]]={} for i in dico: for j in d: if(dico1[i]<j[1]): dico[i][j[0]]=j[1] return dico def constructDicoDprec(d): dico1={} dico={} for i in d: dico1[i[0]]=i[1] dico[i[0]]={} for i in dico: for j in d: if(dico1[i]>j[1]): dico[i][j[0]]=j[1] return dico def constructAlldicosSucc(data): listeAllDico=[] for i in data: listeAllDico.append(constructDicoDsucc(i)) return listeAllDico def constructAlldicosPrec(data): listeAllDico=[] for i in data: listeAllDico.append(constructDicoDprec(i)) return listeAllDico #creation des poids du graph dico def getGraphWeightRandom(graph): graph_weight_d={} for i in graph: dico={} for e in graph[i]: rando=np.random.rand() dico[e]=rando graph_weight_d[i]=dico return graph_weight_d gInit_train = getGraphWeightRandom(graph_train) #ne fonctionne pas def fitModele(data_train,nbIt=1,eps=1e-1): succs_train = [] for line in data_train: succs_train.append(getSuccsOfTab(line)) graph_train = getGraph(succs_train) #init graph_cur = getGraphWeightRandom(graph_train) #it=0 #ep=100000 #while((it<nbIt) & (ep>eps)): #probas_chapiteau=[getProbaOfList(getListPrec(d),graph_cur) for d in data_train] #loop for : utilisateur u vers v for m,u in enumerate(graph_cur): if((m % 10)==0): print(u) for v in graph_cur[u]: s=0 nbUV=0 nbnotUV=0 for idx,d in enumerate(data_train[:3]): probas_chapiteau = getProbaOfList(getListPrec(d),graph_cur) if(existLinkUV(d,u,v)): nbUV+=1 s+=graph_cur[u][v]*1.0/probas_chapiteau[v] else: nbnotUV+=1 graph_cur[u][v]= s*1.0/(nbUV+nbnotUV) # met a jour les poids return graph_cur #pc = fitModele(data_train) #devrait fonctionner def fitModele2(data_train,nbIt=2,eps=1e-1): succs_train = [] for line in data_train: succs_train.append(getSuccsOfTab(line)) graph_train = getGraph(succs_train) #init theta_cur = getGraphWeightRandom(graph_train) for it in range(nbIt): dicoAS = constructAlldicosSucc(data_train) dicoAP = constructAlldicosPrec(data_train) sommeDico={} compteurDuvplus={} compteurDuvmoins={} for u in theta_cur: sommeDico[u]={} compteurDuvplus[u]={} compteurDuvmoins[u]={} for v in theta_cur[u]: sommeDico[u][v]=0.0 compteurDuvplus[u][v]=0.0 compteurDuvmoins[u][v]=0.0 probas=[] for i in dicoAP: probas.append(getProbaOfDicoPrec(i,theta_cur)) #A OPTIMISER for p in probas: for u in theta_cur: for v in theta_cur[u]: try: p[v] try: sommeDico[u][v]=sommeDico[u][v]+(theta_cur[u][v]*1.0/p[v]) compteurDuvplus[u][v]=compteurDuvplus[u][v]+1 except ZeroDivisionError: pass except KeyError: compteurDuvmoins[u][v]=compteurDuvmoins[u][v]+1 print(compteurDuvplus[0][1]+compteurDuvmoins[0][1]) #maj des poids du graph theta_tmp = copy.deepcopy(theta_cur) for u in theta_tmp: for v in theta_tmp[u]: try: theta_tmp[u][v]= sommeDico[u][v]*1.0/(compteurDuvplus[u][v]+compteurDuvmoins[u][v]) except ZeroDivisionError: pass #calcul de la vraissemblance probas_chap=[] for i in dicoAP: probas_chap.append(getProbaOfDicoPrec(i,theta_tmp)) vraissemblance=0 for idx,p in enumerate(probas_chap): for u in theta_tmp: for v in theta_tmp[u]: try: p[v] try: a=(theta_tmp[u][v]/p[v])*np.log(theta_cur[u][v]) b=(1-theta_tmp[u][v]/p[v])*np.log(1-theta_cur[u][v]) vraissemblance+= a+b + np.log(1-theta_cur[u][v]) except ZeroDivisionError: pass except KeyError: pass print("vraissemblance",vraissemblance) theta_cur=copy.deepcopy(theta_tmp) return theta_cur pc = fitModele2(data_train,nbIt=10) #ne fonctionne pas def inference(listeT1, graph): listeUP=[] listeU=[] listeTmp=[] for u in listeT1: for v in graph[u]: if (np.random.rand()<graph[u][v]): if(v not in listeU): listeU.append(v) listeUP.append((v,graph[u][v])) listeTmp.append(v) t=1 while((len(listeTmp)>0) &(t<15)): listeTmp2=[] for u in listeTmp: for v in graph[u]: if (np.random.rand()<graph[u][v]): if(v not in listeU): listeU.append(v) listeUP.append((v,graph[u][v])) listeTmp2.append(v) listeTmp=copy.deepcopy(listeTmp2) t+=1 listeUP.sort(key=lambda elem: elem[1]) return [i[0] for i in listeUP] t1=[68.0] print(inference(t1,pc)) #devrait fonctionner def inference2(listeT1, graph,iteration): dicoProbaInfection={} for u in graph: dicoProbaInfection[u]=0 for it in range(iteration): listeFinalInfected={} listeTM1={} #on affecte les personnes au temps T2 for u in listeT1.keys(): for v in graph[u]: if(np.random.rand()<graph[u][v]): try: listeT1[v] except KeyError: try: listeFinalInfected[v] #listeTM1[v] except KeyError: listeFinalInfected[v]=1 listeTM1[v]=1 #on affecte les gens aux temps t3 et suivants t=1 while(len(listeTM1.keys())>0 & t<20): listeTM1tmp={} for u in listeTM1.keys(): for v in graph[u]: try: listeT1[v] except KeyError: try: listeTM1[v] except KeyError: try: listeFinalInfected[v] #listeTM1tmp[v] except KeyError: try: if (np.random.rand()<graph[u][v]): listeTM1tmp[v]=1 listeFinalInfected[v]=1 except KeyError: print("le lien entre u ",u,"et v ",v," n'existe pas dans le graphe") listeTM1=copy.deepcopy(listeTM1tmp) t+=1 for u in listeFinalInfected: dicoProbaInfection[u]+=1 somme = 0 for u in dicoProbaInfection: somme += dicoProbaInfection[u] for u in dicoProbaInfection: try: dicoProbaInfection[u] = dicoProbaInfection[u]*1.0/somme except ZeroDivisionError: pass return dicoProbaInfection def MAP(graph,data_test): s=0 for d in data_test: listeD=[i[0] for i in d if (i[1]>1)] listeT1=[i[0] for i in d if (i[1]==1)] listeU=inference(listeT1,graph) s2=0 for i,u in enumerate(listeU): intersection = [val for val in listeD if val in listeU[:i]] s2+=len(intersection)*1.0/(i+1) try: s+=s2/len(listeU) except ZeroDivisionError: pass return s/len(data_test) MAP(pc,data_test) MAP2(pc,data_test) #devrait fonctionner def MAP2(graph,data_test,it=2): s=0 for d in tqdm(data_test):#boucle dataset listeD=[i[0] for i in d if (i[1]>1)] listeT1={} for i in d: if (i[1]==1): listeT1[i[0]]=1 dicoU=inference2(listeT1,graph,it) listeU = [i[0] for i in sorted(dicoU.items(), key=operator.itemgetter(1))[::-1]] s2=0 for i,u in enumerate(listeU): intersection = [val for val in listeD if val in listeU[:i]] s2+=len(intersection)*1.0/(i+1) try: s+=s2/len(listeU) except ZeroDivisionError: pass return s/len(data_test) MAP2(pc,data_test) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Preprocessing Step2: Lets look how images looks like Step3: Feature extraction Step4: Visualization of HOG histograms Step5: Short look at the training data Step6: PCA 2D decomposition Step7: Classification Step8: Training Step9: Testing and score Step10: Short look at the prediction Step11: Our results for different parameters Step12: SVC(kernel=linear) and SVC()
<ASSISTANT_TASK:> Python Code: import myutils raw_data_training, raw_data_testing = myutils.load_CIFAR_dataset(shuffle=False) # raw_data_training = raw_data_training[:5000] class_names = myutils.load_CIFAR_classnames() n_training = len( raw_data_training ) n_testing = len( raw_data_testing ) print('Loaded CIFAR10 database with {} training and {} testing samples'.format(n_training, n_testing)) # Converting to greyscale def rgb2gray(image): import cv2 return cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) Xdata_training = [ rgb2gray(raw_data_training[i][0]) for i in range(n_training)] Xdata_testing = [ rgb2gray(raw_data_testing[i][0]) for i in range(n_testing)] import random import matplotlib.pyplot as plt %matplotlib inline # lets choose some random sample of 10 training images examples_id = random.sample(range(n_training), 10) fig, axarr = plt.subplots(2,len(examples_id), figsize=(15,3)) for i in range(len(examples_id)): id = examples_id[i] axarr[0,i].imshow(raw_data_training[id][0][:,:]) axarr[0,i].axis('off') axarr[1,i].imshow(Xdata_training[id],cmap='gray') axarr[1,i].axis('off') print('Few examples after preprocessing') plt.show() # Configuring HOG descriptor # see http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.hog # Configuration of HOG descriptor normalize = True # True ==> yields a little bit better score # block_norm = 'L2-Hys' # or 'L1' orientations = 9 # pixels_per_cell = [8, 8] # see section 'Additional remarks' for some explanation cells_per_block = [2, 2] # def extractFeature(img, vis=False): from skimage.feature import hog return hog(img, orientations, pixels_per_cell, cells_per_block, block_norm, visualise=vis, transform_sqrt=normalize) # extracting one sample data nfeatures = extractFeature(Xdata_training[0], vis=False).size print('Number of features = {}'.format(nfeatures)) fig, axarr = plt.subplots(3,len(examples_id), figsize=(16,5)) for i in range(len(examples_id)): id = examples_id[i] axarr[0,i].imshow(raw_data_training[id][0][:,:]) axarr[0,i].axis('off') axarr[1,i].imshow(Xdata_training[id],cmap='gray') axarr[1,i].axis('off') _, hog_vis = extractFeature(Xdata_training[id], vis=True) axarr[2,i].imshow(hog_vis,cmap='gray') axarr[2,i].axis('off') plt.show() # feature extraction import numpy as np X_training = np.array( [ extractFeature(Xdata_training[i], vis=False) for i in range(n_training) ] ) y_training = np.array( [ raw_data_training[i][1] for i in range(n_training) ] ) X_testing = np.array( [ extractFeature(Xdata_testing[i], vis=False) for i in range(n_testing) ] ) y_testing = np.array( [ raw_data_testing[i][1] for i in range(n_testing) ] ) print( 'X_training shape is {}'.format( X_training.shape ) ) print( 'y_training shape is {}'.format( y_training.shape ) ) print( 'X_testing shape is {}'.format( X_testing.shape ) ) print( 'y_testing shape is {}'.format( y_testing.shape ) ) import pandas as pd print( 'X_training data description') pd.DataFrame( X_training ).describe() print( 'y_training data description') pd.DataFrame( y_training ).describe() from sklearn import decomposition pca = decomposition.PCA(n_components=2) pca.fit(X_training) X = pca.transform(X_training) print(pca.explained_variance_ratio_) plt.figure( figsize=(15,15) ) plt.scatter( X[:, 0], X[:, 1], c=y_training, cmap='tab10' ) # plt.colorbar() plt.show() # TODO: remove outliers from sklearn.svm import LinearSVC # parameter C chosen experimentally (see explanation below) C = 1.0 clf = LinearSVC(C=C) # this may take some time clf.fit(X_training, y_training) clf.score( X_testing, y_testing ) y_predict = clf.predict( X_testing ) import numpy as np np.unique( y_predict ) for C in [ 0.001, 0.01, 0.1, 1.0, 1.2, 1.5, 2.0, 10.0 ]: clf = LinearSVC(C=C) clf.fit(X_training, y_training) print( 'normalize={norm}, C={C}, score={score}'.format(norm=normalize, C=C, score=clf.score( X_testing, y_testing )) ) from sklearn.svm import SVC svc_lin_clf = SVC(kernel='linear', C=1) svc_lin_clf.fit(X_training, y_training) svc_lin_clf.score(X_testing, y_testing) from sklearn.svm import SVC svc_clf = SVC(C=1) svc_clf.fit(X_training, y_training) svc_clf.score(X_testing, y_testing) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Load a tabulated rate Step2: A human readable string describing the rate, and the nuclei involved Step3: Evaluate the electron capture rate [s$^{-1}$] at a given temperature (T [K]) and $Y_e$-weighted density ($\rho Y_e$ [g/cm$^3$]) Step4: Plot the rate depending on the temperature and the density on a heat map. Step5: Another example Step6: Working with a group of rates
<ASSISTANT_TASK:> Python Code: import pynucastro as pyrl al_mg = pyrl.Rate("al28--mg28-toki") print(al_mg) al_mg.eval(T=1.e8,rhoY=1.e9) al_mg.plot() ne_f = pyrl.Rate("ne23--f23-toki") print(ne_f) ne_f.plot() files = ["c13-pg-n14-nacr", "n13--c13-wc12", "c12-c12n-mg23-cf88", "o14-ap-f17-Ha96c", "mg23--na23-toki", "na23--ne23-toki", "n13-pg-o14-lg06", "c12-c12p-na23-cf88"] rc = pyrl.RateCollection(files) rc.plot() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: <b>1. </b>Load image from a file and display the image. Determine the size of the image. Finally, save a new copy of the image in a new file.<br /> <br /> Step2: <b>2. </b>Write a function <code>flip_image</code>, which flips an image either vertically or horizontally. The function should take two input parameters Step3: <b>3. </b> Write a function to generate the negative of an image. This means that a new image is created in which the pixel values are all equal to 1.0 minus the pixel value in the original image. <br /> <br /> Step4: <b>4. </b>Write a function <code>average_intensity</code>, which calculates the average intensity level of an image. Use this function on example images and discuss your results. You can use images from section 2 and 3 <br /> <br /> Step5: <b>5. </b>Write a function <code>threshold_image</code> which thresholds an image based on a threshold level given as a parameter to the function. The function should take two parameters Step6: <b>6. </b>Write a function avg_intensity_threshold_image which takes an image as its only parameter and thresholds this image based on the images average intensity value. <b>Hint Step7: <b>7. </b>Write a function which subsamples a grayscale image of size 512x512 by factors of 2, i.e., 256, 128, 64 and display your results. There are multiple ways to do this as discussed in the textbook. You may simply sub-sample, average, etc. Describe which you used and why. Step8: <b>8. </b>Keeping your original image resolution reduce the number of gray levels in your image from 256 to 2 in powers of 2. Display your results.
<ASSISTANT_TASK:> Python Code: '''This is a definition script, so we do not have to rewrite code''' import numpy as np import cv2 import matplotlib.pyplot as mplt # set matplotlib to print inline (Jupyter) %matplotlib inline # path prefix pth = '../data/' # files to be used as samples # list *files* holds the names of the test images files = ['cameraman.png', 'moon.jpg', 'rose.bmp', 'skull.bmp', 'Woman.bmp','hut.jpg'] # Usefull function def rg(img_path): return cv2.imread(pth+img_path, cv2.IMREAD_GRAYSCALE) img1 = rg(files[0]) hg, wd = img1.shape # function that returns a tuple with 2 parameters print 'height: ' + str(hg) print 'width: '+ str(wd) mplt.figure() mplt.imshow(img1, cmap='gray') mplt.title(files[0]) img2 = img1.copy() cv2.imwrite(pth + 'new_cameraman.png', img2) # create a new file called 'cameraman_new.png' if files[-1] != 'new_cameraman.png': files.append('new_cameraman.png') # add the new element to the list print files # checking img = rg(files[4]) def flip_image(im_data, flag): im_flip = cv2.flip(im_data, flag) # flag=1 flip arround y-axis; flag=0 flip arround x-axis return im_flip vert = flip_image(img, 1) hor = flip_image(img, 0) ls = [img, vert, hor] for i in ls: mplt.figure() mplt.imshow(i, cmap='gray') mplt.title(files[4]) img = rg(files[1]) def neg_im(): im = 255 - img return im image = neg_im() mplt.figure() mplt.imshow(img, cmap='gray') mplt.title(files[1]) mplt.figure() mplt.imshow(image, cmap='gray') mplt.title(files[1]+'_negative') def average_intensity(img): return img.mean() for avg, name in zip([average_intensity(rg(i)) for i in files], files): print 'Average intensity of "', name, '":', avg def threshold_image(img, th): # ignore retVal parameter, no Otsu binarization will be applied _, t = cv2.threshold(img, th, 255, cv2.THRESH_BINARY_INV) return t img = threshold_image(rg(files[3]), 150) mplt.figure() mplt.imshow(img, cmap='gray') mplt.title('Threshold of ' + files[3]) def avg_intensity_threshold_image(img): return threshold_image(img, average_intensity(img)) ths = [avg_intensity_threshold_image(rg(i)) for i in files] for t, name in zip(ths, files): mplt.figure() mplt.imshow(t, cmap='gray') mplt.title('Threshold of ' + name) def downSampling(src,N): '''Downsample an image N times. src: Image data N: Number of subsamples(it must be lower than 9) [!] if N is too large it will display the max amount of samples ''' if src.shape[:2] != (512,512) : # image(512x512) verification print("[!] Image is not 512x512") else: # Plotting the base image mplt.figure() mplt.imshow(src, cmap='gray') mplt.title("Image size: "+str(src.shape[:2])) n = 1 # Loop for plotting the N subsamples while(n <= N): src = cv2.pyrDown(src) # subsample to 1/2 mplt.figure() mplt.imshow(src, cmap='gray') mplt.title("Image size: "+str(src.shape[:2])) n += 1 if n >= 9: # Condition in the case of 'N' been too large return pic = rg(files[5]) downSampling(pic, 3) img = rg(files[3]) # 1 num_levels = 4 # 2 step = 255.0/(num_levels-1) list_levels = step*np.arange(0, num_levels) # 3 orig_levels = np.arange(0,256) # 4 distance = np.zeros((num_levels,256)) # 5 for level_id in range(0,num_levels): distance[level_id, :] = abs(list_levels[level_id]-orig_levels) # 6 LUT_quant = list_levels[np.argmin(distance, 0)] # 7 LUT_quant = np.uint8(LUT_quant) # 8 img_th = cv2.LUT(img, LUT_quant) # 9 mplt.figure() mplt.imshow(img_th, cmap='gray') mplt.title('Threshold of ') <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 2. Subspacing - sampling in the domain of features - evolving and mutating columns
<ASSISTANT_TASK:> Python Code: from evoml.subsampling import BasicSegmenter_FEMPO, BasicSegmenter_FEGT, BasicSegmenter_FEMPT df = pd.read_csv('datasets/ozone.csv') df.head(2) X, y = df.iloc[:,:-1], df['output'] print(BasicSegmenter_FEGT.__doc__) from sklearn.tree import DecisionTreeRegressor clf_dt = DecisionTreeRegressor(max_depth=3) clf = BasicSegmenter_FEGT(base_estimator=clf_dt, statistics=True) clf.fit(X, y) clf.score(X, y) EGs = clf.segments_ len(EGs) sampled_datasets = [eg.get_data() for eg in EGs] [sd.shape for sd in sampled_datasets] from evoml.subspacing import FeatureStackerFEGT, FeatureStackerFEMPO print(FeatureStackerFEGT.__doc__) clf = FeatureStackerFEGT(ngen=30) clf.fit(X, y) clf.score(X, y) ## Get the Hall of Fame individual hof = clf.segment[0] sampled_datasets = [eg.get_data() for eg in hof] [data.columns.tolist() for data in sampled_datasets] ## Original X columns X.columns <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Part 2 Step3: Assignment wrapup
<ASSISTANT_TASK:> Python Code: # Put your code here! import random as rand import math def f(x): return 2.0*(x**2) + 3.0 # x min, max: -2, 4 (delta_x = 6) # y min, max: 0, 35 Area = (35-0)*(4+2) real_area = 66.0 samples = [] errors = [] for i in range(1,7): N_samples = 10**i N_below = 0 for j in range(N_samples): x = rand.uniform(-2,4) y = rand.uniform(0,35) if y < f(x): N_below += 1 est_area = Area * N_below/N_samples error = math.fabs( (est_area - real_area)/real_area) samples.append(N_samples) errors.append(error) print("estimated area, real area, error:", est_area, real_area, error) %matplotlib inline import matplotlib.pyplot as plt plt.plot(samples,errors,'b-',[1,1.0e+6],[1,1.0e-3],'r-') plt.xscale('log') plt.yscale('log') # Put your code here! # z bounds: +/- 8 cm # x,y bounds: +/- 20 cm # bounding volume: 40*40*16 cm^3 = 25,600 cm^3 R = 12 r = 8 rho = 0.8 def f(x,y,z,R,r): if (R - (x**2 + y**2)**0.5)**2 + z**2 < r**2: return 1 else: return 0 # x min, max: -20, 20 # y min, max: -20, 20 # z min, max: -8, 8 sample_volume = 40*40*16 real_volume = (math.pi*(8**2))*(2.0*math.pi*12) samples = [] errors = [] for i in range(1,7): N_samples = 10**i N_inside = 0 for j in range(N_samples): x = rand.uniform(-20,20) y = rand.uniform(-20,20) z = rand.uniform(-8,8) N_inside += f(x,y,z,R,r) est_volume = sample_volume * N_inside/N_samples error = math.fabs( (est_volume - real_volume)/real_volume) samples.append(N_samples) errors.append(error) print("estimated mass: {:.3f}, real mass: {:.3f}, error {:.3e}".format(est_volume*rho, real_volume*rho, error)) %matplotlib inline import matplotlib.pyplot as plt plt.plot(samples,errors,'b-',[1,1.0e+6],[1,1.0e-3],'r-') plt.xscale('log') plt.yscale('log') from IPython.display import HTML HTML( <iframe src="https://goo.gl/forms/NOKKHPQ0oKn1B7e23?embedded=true" width="80%" height="1200px" frameborder="0" marginheight="0" marginwidth="0"> Loading... </iframe> ) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Let's load in a meme. I'm partial to 'Deal with it'. Step2: To convert this to a 1 bit image, I convert everything darker than some threshold to black (1), and everything else to white (-1). Experimenting a bit with the particular image of the 'deal with it meme' that I have, a threshold of 80 seemed to work reasonably. The resulting image is still a bit rough around the edges, but it's recognizable. Step4: Now train the weights. Whereas before we used Hebb's rule, now let's use the Storkey Learning Rule. This rule has a few nice advantages over Hebb's rule Step5: This next cell can take a little while if the image is large. For an image of size 128x128, it takes a minute or two. Step6: Now start with a noisy version of the image. We'll just flip a certain number of random pixels on each row of the image. Step7: Now we can start with that, and use the weights to update it. We'll update the units asynchronously (one at a time), and keep track of the energy of the network every so often. Step8: Voila. Step9: Cool. So now we make some weights for this image. The takes a little bit longer than the Hebbian learning rule when it is dealing with previous, nonzero weights. Step10: Now let's doublecheck that the average weights also still work for the 'deal with it' image. Step11: Sweet. So now we can try something like feeding it a pattern that is halfway between the two patterns -- it should eventually settle into one of them! Who has greater meme strength!??! Step12: I could force those 0 values to -1 or 1, but that biases the pattern towards deal and neil, respectively (at least, testing suggested this -- I think because Aang has more black pixels and Deal has more white pixels). So, I'll leave them in. I could probably solve this by randomly setting 0's to 1 or -1, but naw. Step13: Assuming the cells/pixels of 0 were unaltered, if you run that a few times, you'll notice that sometimes it settles on Neil, and sometimes it settles on Deal!!! Step14: Okay, now let's make a spurious pattern. Any linear combination will do. Step15: Pretty noisy. Only Aang, and kiiiiinda the Deal with It, are visible. Now make a noisy version of that combination. Step16: Beautifully noisy. Can barely see anything in it. But now if we start with that, and apply the weights, it should recover the spurious pattern!
<ASSISTANT_TASK:> Python Code: import os import numpy as np import imageio import matplotlib from matplotlib import pyplot as plt import pandas as pd # from skimage import img_as_ubyte %matplotlib inline matplotlib.rcParams['figure.figsize'] = (20.0, 10.0) np.random.seed(1) #deal = 2 * np.random.binomial(1,.5,size=(5,5)) - 1 #deal = imread('obama.png', mode="L") deal = imread('small-deal-with-it-with-text.jpg', mode="L") print(deal.shape) deal = deal.astype(int) np.unique(deal) bvw_threshold = 80 deal[deal <= bvw_threshold] = -1 deal[deal > bvw_threshold] = 1 deal = -deal deal np.unique(deal) plt.imshow(deal, cmap='Greys', interpolation='nearest'); def storkey_rule(pattern, old_weights=None): pattern: 2-dimensional array old_weights: square array of length pattern.shape[0]*pattern.shape[1] mem = pattern.flatten() n = len(mem) if old_weights is None: old_weights = np.zeros(shape=(n,n)) hebbian_term = np.outer(mem,mem) net_inputs = old_weights.dot(mem) net_inputs = np.tile(net_inputs, (n, 1)) # repeat the net_input vector n times along the rows # so we now have a matrix # h_i and h_j should exclude input from i and j from h_ij h_i = np.diagonal(old_weights) * mem # this obtains the input each neuron receives from itself h_i = h_i[:, np.newaxis] # turn h_i into a column vector so we can subtract from hij appropriately h_j = old_weights * mem # element-wise multiply each row of old-weights by mem np.fill_diagonal(h_j,0) # now replace the diagonal of h_j with 0's; the diagonal of h_j is the # self-inputs, which are redundant with h_i; np.fill_diagonal modifies inplace hij = net_inputs - h_i - h_j post_synaptic = hij * mem #pre_synaptic = post_synaptic.T pre_synaptic = hij.T * mem[:, np.newaxis] new_weights = old_weights + (1./n)*(hebbian_term - pre_synaptic - post_synaptic) return new_weights deal_weights = storkey_rule(deal, old_weights=None) deal_weights def noisify(pattern, numb_flipped=30): noisy_pattern = pattern.copy() for idx, row in enumerate(noisy_pattern): choices = np.random.choice(range(len(row)), numb_flipped) noisy_pattern[idx,choices] = -noisy_pattern[idx,choices] return noisy_pattern noisy_deal = noisify(pattern=deal) plt.imshow(noisy_deal, cmap='Greys', interpolation='nearest'); def flow(pattern, weights, theta=0, steps = 50000): pattern_flat = pattern.flatten() if isinstance(theta, numbers.Number): thetas = np.zeros(len(pattern_flat)) + theta for step in range(steps): unit = np.random.randint(low=0, high=(len(pattern_flat)-1)) unit_weights = weights[unit,:] net_input = np.dot(unit_weights,pattern_flat) pattern_flat[unit] = 1 if (net_input > thetas[unit]) else -1 #pattern_flat[unit] = np.sign(net_input) if (step % 10000) == 0: energy = -0.5*np.dot(np.dot(pattern_flat.T,weights),pattern_flat) + np.dot(thetas,pattern_flat) print("Energy at step {:05d} is now {}".format(step,energy)) evolved_pattern = np.reshape(a=pattern_flat, newshape=(pattern.shape[0],pattern.shape[1])) return evolved_pattern steps = 50000 theta = 0 noisy_deal_evolved = flow(noisy_deal, deal_weights, theta = theta, steps = steps) plt.imshow(noisy_deal_evolved, cmap='Greys', interpolation='nearest'); # woah = imread('woah.png', mode="L") woah = imageio.imread('aang.jpg')[:,:,0] woah = woah.astype(int) woah[woah >= 1] = 1 woah[woah < 1] = -1 woah = -woah np.unique(woah) plt.imshow(woah, cmap='Greys', interpolation='nearest'); average_weights = storkey_rule(woah, old_weights=deal_weights) noisy_woah = noisify(pattern=woah, numb_flipped=15) plt.imshow(noisy_woah, cmap='Greys', interpolation='nearest'); recovered_woah = flow(noisy_woah, average_weights, theta = theta, steps = steps) plt.imshow(recovered_woah, cmap='Greys', interpolation='nearest'); deal_recovered = flow(noisy_deal, average_weights, theta = theta, steps = steps) plt.imshow(deal_recovered, cmap='Greys', interpolation='nearest'); deal_with_neil = (woah + deal) / 2 print(np.unique(deal_with_neil)) #deal_with_neil[deal_with_neil == 0] = -1 #np.unique(deal_with_neil) plt.imshow(deal_with_neil, cmap='Greys', interpolation='nearest'); recovered_deal_with_neil = flow(deal_with_neil, average_weights, theta = theta, steps = steps) plt.imshow(recovered_deal_with_neil, cmap='Greys', interpolation='nearest'); shrek = imageio.imread('shrek.jpg') shrek = shrek.astype(int) shrek_threshold = 200 shrek[shrek < shrek_threshold] = -1 shrek[shrek >= shrek_threshold] = 1 shrek[120:,:] = 1 shrek = -shrek plt.imshow(shrek, cmap='Greys', interpolation='nearest') plt.show() flattened_shrek = shrek.flatten() flatlen = len(flattened_shrek) shrek_weights = np.outer(flattened_shrek,flattened_shrek) - np.identity(len(flattened_shrek)) # average_weights = (woah_weights + deal_weights + shrek_weights) / 3 noisy_shrek = noisify(pattern=shrek) plt.imshow(noisy_shrek, cmap='Greys', interpolation='nearest') plt.show() recovered_shrek = flow(noisy_shrek, average_weights, theta=theta, steps=steps) plt.imshow(recovered_shrek, cmap='Greys', interpolation='nearest') plt.show() recovered_woah = flow(noisy_woah, average_weights, theta=theta, steps=steps) plt.imshow(recovered_woah, cmap='Greys', interpolation='nearest') plt.show() spurious_meme = shrek + deal + woah np.unique(spurious_meme) spurious_meme[spurious_meme > 0] = 1 spurious_meme[spurious_meme < 0] = -1 plt.imshow(spurious_meme, cmap='Greys', interpolation='nearest') plt.show() noisy_spurious_meme = noisify(pattern=spurious_meme) plt.imshow(noisy_spurious_meme, cmap='Greys', interpolation='nearest') plt.show() steps = 100000 recovered_spurious_meme = flow(noisy_spurious_meme, average_weights, theta=theta, steps=steps) plt.imshow(recovered_spurious_meme, cmap='Greys', interpolation='nearest') plt.show() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Let us start by implementing a very simple network. Our network will only have two input units plus a bias unit, as in the figure. Step2: Let's plot the input points. Red points belong to the class to be learned, while blue ones do not belong to it. Step3: Spreading of the network during training Step4: Plotting the results of training Step5: Testing Step6: Classifying the input patterns Step7: Plotting the results of test Step8: <br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br><br>
<ASSISTANT_TASK:> Python Code: %matplotlib inline from pylab import * from utils import * #------------------------------------------------- # Training # Constants # Number of input elements n = 2 # Learning rate eta = 0.0001 # number of training patterns n_patterns = 2000 # Number of repetitions of # the pattern series epochs = 4 # Number of timesteps stime = n_patterns*epochs # Variables # generate training data (function build_dataset in utils.py) data = build_dataset(n_patterns) # Each row of P is an input pattern P = data[:,:2] # Each element of o is the desired output # relative to an input pattern o = data[:,2] # Initialize weights w = zeros(n+1) # Initialize the weight history storage dw = zeros([n+1,stime]) # Initialize the error history storage squared_errors = zeros(epochs) # limits upper_bound = P.max(0) + 0.2*(P.max(0)-P.min(0)) lower_bound = P.min(0) - 0.2*(P.max(0)-P.min(0)) # Create the figure fig = figure(figsize=(4,4)) scatter(*P[(n_patterns/2):,:].T, s = 50, c = '#ff8888' ) scatter(*P[:(n_patterns/2),:].T, s = 50, c = '#8888ff' ) xlim( [lower_bound[0], upper_bound[0]] ) ylim( [lower_bound[1], upper_bound[1]] ) show() # Create a list of pattern indices. # We will reshuffle it at each # repetition of the series pattern_indices = arange(n_patterns) # counter of repetitions # of the series of patterns epoch = -1 for t in xrange(stime) : # Reiterate the input pattern # sequence through timesteps # Reshuffle at the end # of the series if t%n_patterns == 0: shuffle(pattern_indices) epoch += 1 # Current pattern k = pattern_indices[t%n_patterns] # MAIN STEP CALCULATIONS # Bias-plus-input vector x = hstack([1, P[k]]) # Weighted sum - !!dot product!! net = dot(w, x) # Activation y = step(net) # Learning w += eta*(o[k] - y)*x # Store current weights dw[:,t] = w # Current error squared_errors[epoch] += 0.5*(o[k] - y)**2 # Create the figure fig = figure(figsize=(10,4)) ax = fig.add_subplot(121) ax.set_title('Decision boundary') # Chose the x-axis coords of the # two points to plot the decision # boundary line x1 = array([lower_bound[0],upper_bound[0]]) # Calculate the y-axis coords of the # two points to plot the decision # boundary line as it changes for t in xrange(stime) : # Show evert 10th timestep if t%10 == 0: if dw[2,t] != 0 : # Evaluate x2 based on current weights x2 = -(dw[1,t]*x1 + dw[0,t])/dw[2,t] # Plot the changes in the boundary line during learning ax.plot(x1,x2, c='#cccccc', linewidth = 1, zorder = 1) # Evaluate x2 ibased on final weights x2 = -(w[1]*x1 + w[0])/w[2] # Plot the learned boundary line plot(x1,x2, c= '#000000', linewidth = 2, zorder = 1) # Plot in red points belonging to the class scatter(*P[(n_patterns/2):,:].T, s = 50, c = '#ff8888', zorder = 2 ) # Plot in blue points not belonging to the class scatter(*P[:(n_patterns/2),:].T, s = 50, c = '#8888ff', zorder = 2 ) # Limits and labels of the plot xlim( [lower_bound[0], upper_bound[0]] ) ylim( [lower_bound[1], upper_bound[1]] ) xlabel("$p_1$", size = 'xx-large') ylabel("$p_2$", size = 'xx-large') # Plot squared errors ax = fig.add_subplot(122) ax.set_title('Error') ax.plot(squared_errors) # Labels and ticks of the plot xlabel("epochs", size = 'xx-large') ylabel("SSE", size = 'xx-large') xticks(range(epochs)) show() #------------------------------------------------- # Test # Number of test patterns n_patterns = 50000 # Generating test data - we use a single repeated centroid # so we have a single population of points expanding across # the decision boundary line test_centroid = lower_bound +(upper_bound-lower_bound)/2.0 # Generating test data - build_dataset function from utils.py. # We change the standard deviation data = build_dataset(n_patterns, centroids1 = [ test_centroid ], # we use the same centroids for class centroids2 = [ test_centroid ], # and non-class std_deviation = 2.6 ) # we want the test samples # to be sparsely distributed # Each row of P is a test pattern P = data[:,:2] # Create the figure fig = figure(figsize=(5,4)) title('Tests - average error = {}'.format(mean(squared_errors).round(4))) # Show points ax = scatter(*P.T, s = 2, edgecolors='none', zorder = 2 ) xlim( [lower_bound[0], upper_bound[0]] ) ylim( [lower_bound[1], upper_bound[1]] ) show() y = zeros(n_patterns) # iterate tests for t in xrange(n_patterns) : # Bias-plus-input vector x = hstack([1, P[t]]) # Weighted sum - !!dot product!! net = dot(w, x) # Activation y[t] = step(net) # Create the figure fig = figure(figsize=(5,4)) title('Tests - average error = {}'.format(mean(squared_errors).round(4))) # Show points ax = scatter(*P.T, s = 2, c = y, edgecolors='none', zorder = 2, cmap = cm.coolwarm ) #limits xlim( [lower_bound[0], upper_bound[0]] ) ylim( [lower_bound[1], upper_bound[1]] ) xlabel("$p_1$", size = 'xx-large') ylabel("$p_2$", size = 'xx-large') show() from IPython.core.display import HTML def css_styling(): styles = open("../style/ipybn.css", "r").read() return HTML(styles) css_styling() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Implementing a Neural Network Step2: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation. Step3: Forward pass Step4: Forward pass Step5: Backward pass Step6: Train the network Step8: Load the data Step9: Train a network Step10: Debug the training Step11: Tune your hyperparameters Step12: Run on the test set
<ASSISTANT_TASK:> Python Code: # A bit of setup import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.neural_net import TwoLayerNet from __future__ import print_function %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): returns relative error return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Create a small net and some toy data to check your implementations. # Note that we set the random seed for repeatable experiments. input_size = 4 hidden_size = 10 num_classes = 3 num_inputs = 5 def init_toy_model(): np.random.seed(0) return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1) def init_toy_data(): np.random.seed(1) X = 10 * np.random.randn(num_inputs, input_size) y = np.array([0, 1, 2, 2, 1]) return X, y net = init_toy_model() X, y = init_toy_data() scores = net.loss(X) print('Your scores:') print(scores) print() print('correct scores:') correct_scores = np.asarray([ [-0.81233741, -1.27654624, -0.70335995], [-0.17129677, -1.18803311, -0.47310444], [-0.51590475, -1.01354314, -0.8504215 ], [-0.15419291, -0.48629638, -0.52901952], [-0.00618733, -0.12435261, -0.15226949]]) print(correct_scores) print() # The difference should be very small. We get < 1e-7 print('Difference between your scores and correct scores:') print(np.sum(np.abs(scores - correct_scores))) loss, _ = net.loss(X, y, reg=0.05) correct_loss = 1.30378789133 # should be very small, we get < 1e-12 print('Difference between your loss and correct loss:') print(np.sum(np.abs(loss - correct_loss))) from cs231n.gradient_check import eval_numerical_gradient # Use numeric gradient checking to check your implementation of the backward pass. # If your implementation is correct, the difference between the numeric and # analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2. loss, grads = net.loss(X, y, reg=0.05) # these should all be less than 1e-8 or so for param_name in grads: f = lambda W: net.loss(X, y, reg=0.05)[0] param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False) print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))) net = init_toy_model() stats = net.train(X, y, X, y, learning_rate=1e-1, reg=5e-6, num_iters=100, verbose=False) print('Final training loss: ', stats['loss_history'][-1]) # plot the loss history plt.plot(stats['loss_history']) plt.xlabel('iteration') plt.ylabel('training loss') plt.title('Training Loss history') plt.show() from cs231n.data_utils import load_CIFAR10 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = list(range(num_training, num_training + num_validation)) X_val = X_train[mask] y_val = y_train[mask] mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) X_val = X_val.reshape(num_validation, -1) X_test = X_test.reshape(num_test, -1) return X_train, y_train, X_val, y_val, X_test, y_test # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data() print('Train data shape: ', X_train.shape) print('Train labels shape: ', y_train.shape) print('Validation data shape: ', X_val.shape) print('Validation labels shape: ', y_val.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) input_size = 32 * 32 * 3 hidden_size = 50 num_classes = 10 net = TwoLayerNet(input_size, hidden_size, num_classes) # Train the network stats = net.train(X_train, y_train, X_val, y_val, num_iters=1000, batch_size=100, learning_rate=1e-4, learning_rate_decay=0.95, reg=0.25, verbose=True) # Predict on the validation set val_acc = (net.predict(X_val) == y_val).mean() print('Validation accuracy: ', val_acc) # Plot the loss function and train / validation accuracies plt.subplot(2, 1, 1) plt.plot(stats['loss_history']) plt.tight_layout() plt.title('Loss history') plt.xlabel('Iteration') plt.ylabel('Loss') plt.subplot(2, 1, 2) plt.plot(stats['train_acc_history'], label='train') plt.plot(stats['val_acc_history'], label='val') plt.tight_layout() plt.title('Classification accuracy history') plt.legend() plt.xlabel('Epoch') plt.ylabel('Clasification accuracy') plt.show() from cs231n.vis_utils import visualize_grid # Visualize the weights of the network def show_net_weights(net): W1 = net.params['W1'] W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2) plt.imshow(visualize_grid(W1, padding=3).astype('uint8')) plt.gca().axis('off') plt.show() show_net_weights(net) best_net = None # store the best model into this best_acc = -1 best_stats = None from cs231n.pca import PCA ################################################################################# # TODO: Tune hyperparameters using the validation set. Store your best trained # # model in best_net. # # # # To help debug your network, it may help to use visualizations similar to the # # ones we used above; these visualizations will have significant qualitative # # differences from the ones we saw above for the poorly tuned network. # # # # Tweaking hyperparameters by hand can be fun, but you might find it useful to # # write code to sweep through possible combinations of hyperparameters # # automatically like we did on the previous exercises. # ################################################################################# hidden_sizes = [150] learning_rates = [1e-3] batch_sizes = [200]#[150] regularizations = [0.25]#[0.5] input_sizes = [32 * 32 * 3] for in_size in input_sizes: X_train_pca = PCA(X_train, in_size) X_val_pca = PCA(X_val, in_size) for h_size in hidden_sizes: for lr in learning_rates: for batch in batch_sizes: for reg in regularizations: print('>>>>> input_size=%d, hidden_size=%d, lr=%.5f, batch_size=%3d, reg=%.2f' % (in_size, h_size, lr, batch, reg)) net = TwoLayerNet(in_size, h_size, num_classes) stats = net.train(X_train_pca, y_train, X_val_pca, y_val, num_iters=5000, batch_size=batch, learning_rate=lr, learning_rate_decay=0.95, reg=reg, verbose=False๏ผŒ dropout=True) # Predict on the validation set val_acc = (net.predict(X_val_pca) == y_val).mean() print('Validation accuracy: ', val_acc) if val_acc>best_acc: best_acc = val_acc best_net = net best_stats = stats plt.plot(best_stats['loss_history']) plt.tight_layout() plt.title('Loss history') plt.xlabel('Iteration') plt.ylabel('Loss') ################################################################################# # END OF YOUR CODE # ################################################################################# # visualize the weights of the best network show_net_weights(best_net) test_acc = (best_net.predict(X_test) == y_test).mean() print('Test accuracy: ', test_acc) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Let's go over the columns Step2: How many records do we have now? Step3: Let's break it down by user Step4: Let's convert it over to a Pandas DataFrame so we can chart it and examine it closer Step5: That's neat. But let's add in some data from another dataset -- the Estimize Revisions data. For the same timeframe, the revisions data provides each revision to the overall consensus estimates. So where estimates_free data provides every single estimate made by an individual on the Estimize site, revisions_free provides rolled up summaries of the estimates. Step6: For this quick demonstration, let's just grab the consensus mean from the revisions_free data set and convert it over to Pandas. Note, we need to rename the mean column name because it causes problems otherwise Step7: Let's chart that in the same chart again so we get a trend of the mean over time, overlayed on a chart of each individual analyst estimate
<ASSISTANT_TASK:> Python Code: # import the free sample of the dataset from quantopian.interactive.data.estimize import estimates_free # or if you want to import the full dataset, use: # from quantopian.interactive.data.estimize import estimates # import data operations from odo import odo # import other libraries we will use import pandas as pd import matplotlib.pyplot as plt # Let's use blaze to understand the data a bit using Blaze dshape() estimates_free.dshape # And how many rows are in this free sample? # N.B. we're using a Blaze function to do this, not len() estimates_free.count() # Let's see what the data looks like. We'll grab the first three rows. estimates_free.head(3) stocks = symbols('TSLA') one_quarter = estimates_free[(estimates_free.sid == stocks.sid) & (estimates_free.fiscal_year == '2014') & (estimates_free.fiscal_quarter == '1') & (estimates_free.eps < 100) ] one_quarter.head(5) one_quarter.count() one_quarter.username.count_values() one_q_df = odo(one_quarter.sort('asof_date'), pd.DataFrame) plt.plot(one_q_df.asof_date, one_q_df.eps, marker='.', linestyle='None', color='r') plt.xlabel("As Of Date (asof_date)") plt.ylabel("EPS Estimate") plt.title("Analyst by Analyst EPS Estimates for TSLA") plt.legend(["Individual Estimate"], loc=2) from quantopian.interactive.data.estimize import revisions_free consensus = revisions_free[(revisions_free.sid == stocks.sid) & (revisions_free.fiscal_year == '2014') & (revisions_free.fiscal_quarter == '1') & (revisions_free.source == 'estimize') & (revisions_free.metric == 'eps') ] consensus.head(3) consensus_df = odo(consensus[['asof_date', 'mean']].sort('asof_date'), pd.DataFrame) consensus_df.rename(columns={'mean':'eps_mean'}, inplace=True) plt.plot(consensus_df.asof_date, consensus_df.eps_mean) plt.plot(one_q_df.asof_date, one_q_df.eps, marker='.', linestyle='None', color='r') plt.xlabel("As Of Date (asof_date)") plt.ylabel("EPS Estimate") plt.title("EPS Estimates for TSLA") plt.legend(["Mean Estimate", "Individual Estimates"], loc=2) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 1. I want to make sure my Plate ID is a string. Can't lose the leading zeroes! Step2: 2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN. Step3: 3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates. Step4: 4. "Date first observed" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. "20140324") into a Python date. Make the 0's show up as NaN. Step5: 5. "Violation time" is... not a time. Make it a time. Step6: 6. There sure are a lot of colors of cars, too bad so many of them are the same. Make "BLK" and "BLACK", "WT" and "WHITE", and any other combinations that you notice. Step7: 7.Join the data with the Parking Violations Code dataset from the NYC Open Data site. Step8: 8. How much money did NYC make off of parking violations? Step9: 9. What's the most lucrative kind of parking violation? The most frequent? Step10: 10. New Jersey has bad drivers, but does it have bad parkers, too? How much money does NYC make off of all non-New York vehicles? Step11: 11. Make a chart of the top few. Step12: 12. What time of day do people usually get their tickets? You can break the day up into several blocks - for example 12am-6am, 6am-12pm, 12pm-6pm,6pm-12am. Step13: 14. Make a graph of the number of tickets per day. Step14: 15. Make a graph of the amount of revenue collected per day. Step15: 16. Manually construct a dataframe out of https Step16: 17. What's the parking-ticket-$-per-licensed-driver in each borough of NYC? Do this with pandas and the dataframe you just made, not with your head!
<ASSISTANT_TASK:> Python Code: import pandas as pd #As the datatypes in all of the columns vary, I decided to to make all the values, except for the ones I specify #into str. This also takes care of questions 1. -> dtype=str # The syntax for this is really very nice and clear, an example na_values= {'Vehicle Year' : ['0']} # A lot more here: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html #parse_dates=True,keep_date_col = True import datetime import datetime as dt dt.datetime.strptime('08/04/2013', '%m/%d/%Y') datetime.datetime(2013, 8, 4, 0, 0) parser = lambda date: pd.datetime.strptime(date, '%m/%d/%Y') #taken from here: http://stackoverflow.com/questions/23797491/parse-dates-in-pandas #adding this, column counts starts at 0, without index, parse_dates=[4], date_parser=parser #nrows=100000, df = pd.read_csv("violations.csv", na_values= {'Vehicle Year' : ['0']}, parse_dates=[4], date_parser=parser, dtype=str) df.head() #df.info() #df['Date First Observed'].value_counts() import dateutil.parser def date_transfer(x): x = str(x) if x == '0': return 'NaN' try: date = dateutil.parser.parse(x) return str(date.strftime('%d/%m/%Y')) except: return 'NaN' # dt.datetime.strptime('20130719', '%Y%m%d') # datetime.datetime(2013, 7, 19, 0, 0) # year_month_tag = dateutil.parser.parse(x) return x print(date_transfer(20151198)) df['Date First Observed'].apply(date_transfer).head() #adding n_values = {'Violation Time' : ['0']} #df = pd.read_csv("violations.csv", na_values= {'Vehicle Year' : ['0'], 'Violation Time' : ['0']}, parse_dates=[4], date_parser=parser, dtype=str) df['Violation Time'].head(5) #df['Violation Time'].value_counts() import dateutil def pmam(x): x = str(x) #x = (':'.join(a+b for a,b in zip(x[::2], x[1::2]))) if x == 'NaN': pass try: x = str(x[:2] + ':' + x[2:]) date = dateutil.parser.parse(x) return str(date.strftime('%H:%M %p')) except: return 'NaN' print(pmam('NaN')) print(pmam("0752A")) print(pmam("1240P")) print(pmam("0232P")) df['Violation Time pmam'] = df['Violation Time'].apply(pmam) df['Vehicle Color'].head(10) def black_and_white(car_color): car_color = str(car_color) if car_color == 'NaN': return 'NaN' elif car_color == 'WHITE': return car_color elif car_color == 'BLACK': return car_color else: return car_color.replace("BK","BLACK").replace("WH", "WHITE") def rainbow(car_color): car_color = str(car_color) if car_color == 'NaN': return car_color elif car_color == 'BROWN': return car_color elif car_color == 'BLUE': return car_color elif car_color == 'RED': return car_color elif car_color == 'GREY': return car_color elif car_color == 'GREEN': return car_color else: return car_color.replace("BR","BROWN").replace("RD", "RED").replace("GY", "GRAY").replace("TN", "TAN").replace("BLU", "BLUE").replace("GN", "GREEN") df['Vehicle Color'].apply(black_and_white).apply(rainbow).head(5) DOF_Parking_Violation_Codes_df = pd.read_csv("DOF_Parking_Violation_Codes.csv") DOF_Parking_Violation_Codes_df.head() df_merged = df.merge(DOF_Parking_Violation_Codes_df, left_on='Violation Code', right_on='CODE') def money_to_int(money_str): if money_str == '200 (Heavy Tow plus violation fine)': money_str_Heavy_Tow = money_str[:3] return int(money_str_Heavy_Tow) if money_str == '100\n(Regular Tow, plus violation fine)': money_str_Heavy_Tow = money_str[:3] return int(money_str_Heavy_Tow) try: return int(money_str.replace("$","").replace(",","")) except: return None df_merged['All Other Areas int'] = df_merged['All Other Areas'].apply(money_to_int) df_merged['Manhattan\xa0 96th St. & below int'] = df_merged['Manhattan\xa0 96th St. & below'].apply(money_to_int) Mininum_Amount = df_merged['All Other Areas int'].sum() print("NYC made at least: ${:,.2f} on parking fines.".format(Mininum_Amount)) def def_strip(definition): return definition.strip() df_merged['DEFINITION_stripped'] = df_merged['DEFINITION'].apply(def_strip) #Most frequent kind of parking violation df_merged['DEFINITION'].value_counts().head(1) count_by_violation_code = df_merged['CODE'].value_counts() count_by_violation_code.head() df_merged.groupby('CODE')['All Other Areas int'].sum().sort_values(ascending=False).head() Most_lucrative_amount = df_merged.groupby('CODE')['All Other Areas int'].sum().sort_values(ascending=False)[1] Most_lucrative_fine_in_words = df_merged[df_merged['CODE'] == '14'] Most_lucrative_fine_in_words = Most_lucrative_fine_in_words['DEFINITION'].head(1) print("The most lucrative fine regarded ${:,.2f} on parking fines.".format(Most_lucrative_amount)) print("It regarded " + Most_lucrative_fine_in_words) df_merged['Registration State'].value_counts().head(10) Fines_for_New_York_Cars = df_merged.groupby('Registration State')['All Other Areas int'].sum().sort_values(ascending=False).head(1) Fines_for_New_York_Cars Mininum_Amount #Total fines print("New York City makes ${:,.2f} off non-New-York cars.".format(Mininum_Amount - 220943620.0)) #Double Check: df_merged[df_merged['Registration State'] != 'NY'].sum() import matplotlib.pyplot as plt %matplotlib inline plt.style.use("ggplot") df_merged['Registration State'].value_counts().sort_values(ascending=True).tail(10).plot(kind='barh') import dateutil def Time_of_day(x): x = str(x) #x = (':'.join(a+b for a,b in zip(x[::2], x[1::2]))) if x == 'NaN': pass try: x = str(x[:2] + ':' + x[2:]) date = dateutil.parser.parse(x) time = int(str(date.strftime('%H'))) if time <= 6: return '12am - 6am' elif time <= 12: return '6am - 12pm' elif time <= 18: return '12pm - 6pm' elif time <= 24: return '6pm - 12am' except: return 'NaN' df_merged['Time of Day'] = df['Violation Time'].apply(Time_of_day) df_merged['Time of Day'].value_counts() df_merged['Issue Date'].head() #Filtering out all the dates that were in the future and before 2013, obviously wrong dates. df_merged_only_2013_dates = df_merged[(df_merged['Issue Date'] > '2013-01-01') & (df_merged['Issue Date'] < '2013-12-31')] #More on histograms #http://stackoverflow.com/questions/27365467/python-pandas-plot-histogram-of-dates #http://stackoverflow.com/questions/29672375/histogram-in-matplotlib-time-on-x-axis df_merged_only_2013_dates.groupby('Issue Date')['Issue Date'].count().plot(kind="bar", figsize=(15, 6)) df_merged['Issue Date'].head() df_merged_only_2013_dates.groupby('Issue Date')['All Other Areas int'].sum().plot(kind="bar", figsize=(15, 6)) df_boroughs = pd.read_csv("NYC_Boroughs.csv") df_boroughs.head() df_merged['Violation County'].value_counts() df_merged.groupby('Violation County')['All Other Areas int'].sum() def boroughs_trans(borough): borough = str(borough) if borough == 'NaN': pass elif borough == 'RICH': return 'R' elif borough == 'BRONX': return 'BX' else: return None <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: The source dataset Step3: Let's create a BigQuery client that we can use throughout the notebook. Step4: Let's now examine the result of a BiqQuery call in a Pandas DataFrame using our newly created client. Step6: First, let's get the set of all valid column names in the natality dataset. We can do this by accessing the INFORMATION_SCHEMA for the table from the dataset. Step7: We can print our valid columns set to see all of the possible columns we have available in the dataset. Of course, you could also find this information by going to the Schema tab when selecting the table in the BigQuery UI. Step11: Let's write a query to find the unique values for each of the columns and the count of those values. Step12: Make a bar plot to see is_male with avg_wt linearly scaled and num_babies logarithmically scaled. Step13: Make a bar plot to see mother_age with avg_wt linearly scaled and num_babies linearly scaled. Step14: Make a bar plot to see plurality with avg_wt linearly scaled and num_babies logarithmically scaled. Step15: Make a bar plot to see gestation_weeks with avg_wt linearly scaled and num_babies logarithmically scaled.
<ASSISTANT_TASK:> Python Code: !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst %%bash sudo pip3 freeze | grep google-cloud-bigquery==1.6.1 || \ sudo pip3 install google-cloud-bigquery==1.6.1 from google.cloud import bigquery query = SELECT weight_pounds, is_male, mother_age, plurality, gestation_weeks, FARM_FINGERPRINT( CONCAT( CAST(YEAR AS STRING), CAST(month AS STRING) ) ) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 bq = bigquery.Client() df = bq.query(query + " LIMIT 100").to_dataframe() df.head() # Query to get all column names within table schema sql = SELECT column_name FROM publicdata.samples.INFORMATION_SCHEMA.COLUMNS WHERE table_name = "natality" # Send query through BigQuery client and store output to a dataframe valid_columns_df = bq.query(sql).to_dataframe() # Convert column names in dataframe to a set valid_columns_set = valid_columns_df["column_name"].tolist() print(valid_columns_set) def get_distinct_values(valid_columns_set, column_name): Gets distinct value statistics of BigQuery data column. Args: valid_columns_set: set, the set of all possible valid column names in table. column_name: str, name of column in BigQuery. Returns: Dataframe of unique values, their counts, and averages. assert column_name in valid_columns_set, ( "{column_name} is not a valid column_name".format( column_name=column_name)) sql = SELECT {column_name}, COUNT(1) AS num_babies, AVG(weight_pounds) AS avg_wt FROM publicdata.samples.natality WHERE year > 2000 GROUP BY {column_name} .format(column_name=column_name) return bq.query(sql).to_dataframe() def plot_distinct_values(valid_columns_set, column_name, logy=False): Plots distinct value statistics of BigQuery data column. Args: valid_columns_set: set, the set of all possible valid column names in table. column_name: str, name of column in BigQuery. logy: bool, if plotting counts in log scale or not. df = get_distinct_values(valid_columns_set, column_name) df = df.sort_values(column_name) df.plot( x=column_name, y="num_babies", logy=logy, kind="bar", figsize=(12, 5)) df.plot(x=column_name, y="avg_wt", kind="bar", figsize=(12, 5)) plot_distinct_values(valid_columns_set, column_name="is_male", logy=False) plot_distinct_values(valid_columns_set, column_name="mother_age", logy=False) plot_distinct_values(valid_columns_set, column_name="plurality", logy=True) plot_distinct_values( valid_columns_set, column_name="gestation_weeks", logy=True) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: 2.) Preview the raw data Step2: 3.) Now for the charts Step3: 3.2) Pandas Step4: Bar Chart Step5: Histogram Step6: Stacked Bar Chart Step7: 3.3) Bokeh Step8: Interactive Bar Chart Step9: Interactive Histogram
<ASSISTANT_TASK:> Python Code: from __future__ import division, unicode_literals import pandas as pd import numpy as np import matplotlib %matplotlib inline matplotlib.style.use('ggplot') df = pd.read_excel('./input/complete_data.xls') df.head() import datetime import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import matplotlib.cbook as cbook years = mdates.YearLocator() # every year months = mdates.MonthLocator() # every month yearsFmt = mdates.DateFormatter('%Y') # load up some sample data to plot datafile = cbook.get_sample_data('goog.npy') r = np.load(datafile, encoding='bytes').view(np.recarray) fig, ax = plt.subplots() ax.plot(r.date, r.adj_close) # format the ticks ax.xaxis.set_major_locator(years) ax.xaxis.set_major_formatter(yearsFmt) ax.xaxis.set_minor_locator(months) datemin = datetime.date(r.date.min().year, 1, 1) datemax = datetime.date(r.date.max().year + 1, 1, 1) ax.set_xlim(datemin, datemax) # format the coords message box def price(x): return '$%1.2f' % x ax.format_xdata = mdates.DateFormatter('%Y-%m-%d') ax.format_ydata = price ax.grid(True) # rotates and right aligns the x labels, and moves the bottom of the # axes up to make room for them fig.autofmt_xdate() plt.show() sales_by_month = df[['date', 'quantity']].set_index('date') sales_by_month.resample('M', how=sum).plot(title="Total Sales by Month") summary = df[['ext price', 'name']].groupby('name').sum() summary.plot(kind='bar', title="Total Sales by Account") purchase_patterns = df[['ext price','date']] purchase_patterns.head() purchase_plot = purchase_patterns['ext price'].hist(bins=40) purchase_plot.set_title("Purchase Patterns") purchase_plot.set_xlabel("Order Amount($)") purchase_plot.set_ylabel("Number of orders") sales_by_sku=df[['name', 'sku', 'ext price']].groupby(['name', 'sku']).sum() sales_by_sku.head() sales_by_sku.unstack().head() my_plot = sales_by_sku.unstack().plot(kind='bar',stacked=True, title="Total Sales by Customer and SKU", legend=None) my_plot.set_xlabel("Customers") my_plot.set_ylabel("Sales") from bokeh.charts import Bar, Histogram, show, output_notebook output_notebook() b = Bar(summary, label='name', values='ext price', title="Total Sales by Account") show(b) hist = Histogram(df, values='ext price', bins=40, legend=True) show(hist) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Now press 'a' or 'b' to create new cells. You can also use the toolbar to create new cells. You can also use the arrow keys to move up and down. Step2: After exectuing this cell, your kernel will have access to everything inside the os library which is a common library for interacting with the operating system. We'll need to use the import statement for all of the libraries that we include. Step3: Using the os package, we can list an entire directory. The documentation or docstring, says that listdir takes one parameter, path Step4: This is the location of the directory we need to list. Let's try this with the directory of images we just downloaded Step5: We can also specify to include only certain files like so Step6: or even Step7: We could also combine file types if we happened to have multiple types Step8: Let's set this list to a variable, so we can perform further actions on it Step9: And now we can index that list using the square brackets Step10: We can even go in the reverse direction, which wraps around to the end of the list Step11: <a name="loading-an-image"></a> Step12: Now we can refer to the entire module by just using plt instead of matplotlib.pyplot every time. This is pretty common practice. Step13: This isn't python, so won't work inside of any python script files. This only works inside notebook. What this is saying is that whenever we plot something using matplotlib, put the plots directly into the notebook, instead of using a window popup, which is the default behavior. This is something that makes notebook really useful for teaching purposes, as it allows us to keep all of our images/code in one document. Step14: plt contains a very useful function for loading images Step15: Here we see that it actually returns a variable which requires us to use another library, NumPy. NumPy makes working with numerical data a lot easier. Let's import it as well Step16: Let's try loading the first image in our dataset Step17: plt.imread will not know where that file is. We can tell it where to find the file by using os.path.join Step18: Now we get a bunch of numbers! I'd rather not have to keep prepending the path to my files, so I can create the list of files like so Step19: Let's set this to a variable, img, and inspect a bit further what's going on Step20: <a name="rgb-image-representation"></a> Step21: Let's break this data down a bit more. We can see the dimensions of the data using the shape accessor Step22: This means that the image has 218 rows, 178 columns, and 3 color channels corresponding to the Red, Green, and Blue channels of the image, or RGB. Let's try looking at just one of the color channels. We can use the square brackets just like when we tried to access elements of our list Step23: We use the special colon operator to say take every value in this dimension. This is saying, give me every row, every column, and the 0th dimension of the color channels. Step24: The numbers are all between 0 to 255. What a strange number you might be thinking. Unless you are one of 10 types of people in this world, those that understand binary and those that don't. Don't worry if you're not. You are likely better off. Step25: numpy arrays have a field which will tell us how many bits they are using Step26: uint8 Step27: This is saying, let me see this data as a floating point number, meaning with decimal places, and with 32 bits of precision, rather than the previous data types 8 bits. This will become important when we start to work with neural networks, as we'll need all of those extra possible values! Step28: to pick a random image from our list of files, we can use the numpy random module Step29: This function will produce random integers between a range of values that we specify. We say, give us random integers from 0 to the length of files. Step30: This might be something useful that we'd like to do often. So we can use a function to help us in the future Step31: This function takes one parameter, a variable named filename, which we will have to specify whenever we call it. That variable is fed into the plt.imread function, and used to load an image. It is then drawn with plt.imshow. Let's see how we can use this function definition Step32: or simply Step34: We use functions to help us reduce the main flow of our code. It helps to make things clearer, using function names that help describe what is going on. Step35: There are a few things going on here. First, we are defining a function which takes as input a single variable. This variable gets named img inside the function, and we enter a set of if/else-if conditionals. The first branch says, if the rows of img are greater than the columns, then set the variable extra to their difference and divide by 2. The // notation means to perform an integer division, instead of a floating point division. So 3 // 2 = 1, not 1.5. We need integers for the next line of code which says to set the variable crop to img starting from extra rows, and ending at negative extra rows down. We can't be on row 1.5, only row 1 or 2. So that's why we need the integer divide there. Let's say our image was 128 x 96 x 3. We would have extra = (128 - 96) // 2, or 16. Then we'd start from the 16th row, and end at the -16th row, or the 112th row. That adds up to 96 rows, exactly the same number of columns as we have. Step36: <a name="resizing-images"></a> Step37: Notice that you can hit tab after each step to see what is available. That is really helpful as I never remember what the exact names are. Step38: The imresize function takes a input image as its first parameter, and a tuple defining the new image shape as rows and then columns. Step39: Great! To really see what's going on, let's turn off the interpolation like so Step40: Each one of these squares is called a pixel. Since this is a color image, each pixel is actually a mixture of 3 values, Red, Green, and Blue. When we mix those proportions of Red Green and Blue, we get the color shown here. Step41: This is an incredibly useful function which we'll revisit later when we try to visualize the mean image of our entire dataset. Step42: We now have a list containing our images. Each index of the imgs list is another image which we can access using the square brackets Step43: Since all of the images are the same size, we can make use of numpy's array instead of a list. Step44: <a name="the-batch-dimension"></a> Step45: We could also use the numpy.concatenate function, but we have to create a new dimension for each image. Numpy let's us do this by using a special variable np.newaxis
<ASSISTANT_TASK:> Python Code: 4*2 import numpy as np print(np.sin(.5)) print(np.random.random(3)) import os # Load the os library import os # Load the request module import urllib.request # Create a directory os.mkdir('img_align_celeba') # Now perform the following 10 times: for img_i in range(1, 11): # create a string using the current loop counter f = '000%03d.jpg' % img_i # and get the url with that string appended the end url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f # We'll print this out to the console so we can see how far we've gone print(url, end='\r') # And now download the url to a location inside our new directory urllib.request.urlretrieve(url, os.path.join('img_align_celeba', f)) help(os.listdir) files = os.listdir('img_align_celeba') [file_i for file_i in os.listdir('img_align_celeba') if '.jpg' in file_i] [file_i for file_i in os.listdir('img_align_celeba') if '.jpg' in file_i and '00000' in file_i] [file_i for file_i in os.listdir('img_align_celeba') if '.jpg' in file_i or '.png' in file_i or '.jpeg' in file_i] files = [file_i for file_i in os.listdir('img_align_celeba') if file_i.endswith('.jpg')] print(files[0]) print(files[1]) print(files[-1]) print(files[-2]) import matplotlib.pyplot as plt %matplotlib inline # help(plt) # plt.<tab> plt.imread? import numpy as np # help(np) # np.<tab> # img = plt.imread(files[0]) # outputs: FileNotFoundError print(os.path.join('img_align_celeba/', files[0])) plt.imread(os.path.join('img_align_celeba/', files[0])) files = [os.path.join('img_align_celeba', file_i) for file_i in os.listdir('img_align_celeba') if '.jpg' in file_i] files img = plt.imread(files[0]) # img.<tab> img img = plt.imread(files[5]) plt.imshow(img) img.shape # outputs: (218, 178, 3) #dja from matplotlib.colors import LinearSegmentedColormap cdict = { 'red': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)) } blackred = LinearSegmentedColormap('blackred', cdict) plt.figure() plt.imshow(img[:, :, 0], cmap=blackred) plt.figure() plt.imshow(img[:, :, 1], cmap="gray") plt.figure() plt.imshow(img[:, :, 2]) np.min(img), np.max(img) 2**32 img.dtype img.astype(np.float32) plt.imread(files[0]) print(np.random.randint(0, len(files))) print(np.random.randint(0, len(files))) print(np.random.randint(0, len(files))) filename = files[np.random.randint(0, len(files))] img = plt.imread(filename) plt.imshow(img) def plot_image(filename): img = plt.imread(filename) plt.imshow(img) f = files[np.random.randint(0, len(files))] plot_image(f) plot_image(files[np.random.randint(0, len(files))]) def imcrop_tosquare(img): Make any image a square image. Parameters ---------- img : np.ndarray Input image to crop, assumed at least 2d. Returns ------- crop : np.ndarray Cropped image. if img.shape[0] > img.shape[1]: extra = (img.shape[0] - img.shape[1]) if extra % 2 == 0: crop = img[extra // 2:-extra // 2, :] else: crop = img[max(0, extra // 2 - 1):min(-1, -extra // 2), :] elif img.shape[1] > img.shape[0]: extra = (img.shape[1] - img.shape[0]) if extra % 2 == 0: crop = img[:, extra // 2:-extra // 2] else: crop = img[:, max(0, extra // 2 - 1):min(-1, -extra // 2)] else: crop = img return crop def imcrop(img, amt): if amt <= 0: return img row_i = int(img.shape[0] * amt) // 2 col_i = int(img.shape[1] * amt) // 2 return img[row_i:-row_i, col_i:-col_i] #from scipy.<tab>misc import <tab>imresize from scipy.misc import imresize imresize? square = imcrop_tosquare(img) crop = imcrop(square, 0.2) rsz = imresize(crop, (64, 64)) plt.imshow(rsz) plt.imshow(rsz, interpolation='nearest') mean_img = np.mean(rsz, axis=2) print(mean_img.shape) plt.imshow(mean_img, cmap='gray') imgs = [] for file_i in files: img = plt.imread(file_i) square = imcrop_tosquare(img) crop = imcrop(square, 0.2) rsz = imresize(crop, (64, 64)) imgs.append(rsz) print(len(imgs)) plt.imshow(imgs[0]) imgs[0].shape data = np.array(imgs) data.shape data = np.concatenate([img_i[np.newaxis] for img_i in imgs], axis=0) data.shape <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step2: Line with Gaussian noise Step5: Write a function named plot_random_line that takes the same arguments as random_line and creates a random line using random_line and then plots the x and y points using Matplotlib's scatter function Step6: Use interact to explore the plot_random_line function using
<ASSISTANT_TASK:> Python Code: %matplotlib inline import matplotlib.pyplot as plt import numpy as np from IPython.html.widgets import interact, interactive, fixed from IPython.display import display def random_line(m, b, sigma, size=10): Create a line y = m*x + b + N(0,sigma**2) between x=[-1.0,1.0] Parameters ---------- m : float The slope of the line. b : float The y-intercept of the line. sigma : float The standard deviation of the y direction normal distribution noise. size : int The number of points to create for the line. Returns ------- x : array of floats The array of x values for the line with `size` points. y : array of floats The array of y values for the lines with `size` points. x = np.linspace(-1.0,1.0,size) if sigma == 0.0: y = np.array([i*m+b for i in x]) else: y = np.array([i*m+b+np.random.normal(0,sigma**2) for i in x]) return x,y m = 0.0; b = 1.0; sigma=0.0; size=3 x, y = random_line(m, b, sigma, size) assert len(x)==len(y)==size assert list(x)==[-1.0,0.0,1.0] assert list(y)==[1.0,1.0,1.0] sigma = 1.0 m = 0.0; b = 0.0 size = 500 x, y = random_line(m, b, sigma, size) assert np.allclose(np.mean(y-m*x-b), 0.0, rtol=0.1, atol=0.1) assert np.allclose(np.std(y-m*x-b), sigma, rtol=0.1, atol=0.1) def ticks_out(ax): Move the ticks to the outside of the box. ax.get_xaxis().set_tick_params(direction='out', width=1, which='both') ax.get_yaxis().set_tick_params(direction='out', width=1, which='both') def plot_random_line(m, b, sigma, size=10, color='red'): Plot a random line with slope m, intercept b and size points. x,y = random_line(m,b,sigma,size = 10) plt.plot(x,y,color) ax = plt.gca() plt.title('Soliton Wave') plt.xlabel('X') plt.ylabel('Y') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.get_xaxis().tick_bottom() ax.axes.get_yaxis().tick_left() ticks_out(ax) plt.xlim(-1.1,1.1) plt.ylim(-10.0,10.0) plot_random_line(5.0, -1.0, 2.0, 50) assert True # use this cell to grade the plot_random_line function interact(plot_random_line, m = (-10.0,10.0,0.1), b = (-5.0,5.0,0.1), sigma = (0.0,5.0,0.1),size = (10,100,10),color = {'red': 'r','green':'g','blue':'b'}) #### assert True # use this cell to grade the plot_random_line interact <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: ํ›ˆ๋ จํ•  ๋ชจ๋ธ ์ •์˜ํ•˜๊ธฐ Step2: ์›๊ฒฉ ์‹คํ–‰๊ธฐ ์„ค์ •ํ•˜๊ธฐ Step3: ํ›ˆ๋ จ ์‹คํ–‰ํ•˜๊ธฐ
<ASSISTANT_TASK:> Python Code: #@test {"skip": true} !pip install --quiet --upgrade tensorflow-federated !pip install --quiet --upgrade nest-asyncio import nest_asyncio nest_asyncio.apply() import collections import time import tensorflow as tf import tensorflow_federated as tff source, _ = tff.simulation.datasets.emnist.load_data() def map_fn(example): return collections.OrderedDict( x=tf.reshape(example['pixels'], [-1, 784]), y=example['label']) def client_data(n): ds = source.create_tf_dataset_for_client(source.client_ids[n]) return ds.repeat(10).batch(20).map(map_fn) train_data = [client_data(n) for n in range(10)] input_spec = train_data[0].element_spec def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.InputLayer(input_shape=(784,)), tf.keras.layers.Dense(units=10, kernel_initializer='zeros'), tf.keras.layers.Softmax(), ]) return tff.learning.from_keras_model( model, input_spec=input_spec, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) trainer = tff.learning.build_federated_averaging_process( model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02)) def evaluate(num_rounds=10): state = trainer.initialize() for round in range(num_rounds): t1 = time.time() state, metrics = trainer.next(state, train_data) t2 = time.time() print('Round {}: loss {}, round time {}'.format(round, metrics.loss, t2 - t1)) import grpc ip_address = '0.0.0.0' #@param {type:"string"} port = 80 #@param {type:"integer"} channels = [grpc.insecure_channel(f'{ip_address}:{port}') for _ in range(10)] tff.backends.native.set_remote_execution_context(channels) evaluate() <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Loading Model and Data Step2: Explain one image Step3: Explain multiple images
<ASSISTANT_TASK:> Python Code: import json import numpy as np import torchvision import torch import torch.nn as nn import shap from PIL import Image device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = torchvision.models.mobilenet_v2(pretrained=True, progress=False) model.to(device) model.eval() X, y = shap.datasets.imagenet50() # Getting ImageNet 1000 class names url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json" with open(shap.datasets.cache(url)) as file: class_names = [v[1] for v in json.load(file).values()] print("Number of ImageNet classes:", len(class_names)) #print("Class names:", class_names) # Prepare data transformation pipeline mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] def nhwc_to_nchw(x: torch.Tensor) -> torch.Tensor: if x.dim() == 4: x = x if x.shape[1] == 3 else x.permute(0, 3, 1, 2) elif x.dim() == 3: x = x if x.shape[0] == 3 else x.permute(2, 0, 1) return x def nchw_to_nhwc(x: torch.Tensor) -> torch.Tensor: if x.dim() == 4: x = x if x.shape[3] == 3 else x.permute(0, 2, 3, 1) elif x.dim() == 3: x = x if x.shape[2] == 3 else x.permute(1, 2, 0) return x transform= [ torchvision.transforms.Lambda(nhwc_to_nchw), torchvision.transforms.Lambda(lambda x: x*(1/255)), torchvision.transforms.Normalize(mean=mean, std=std), torchvision.transforms.Lambda(nchw_to_nhwc), ] inv_transform= [ torchvision.transforms.Lambda(nhwc_to_nchw), torchvision.transforms.Normalize( mean = (-1 * np.array(mean) / np.array(std)).tolist(), std = (1 / np.array(std)).tolist() ), torchvision.transforms.Lambda(nchw_to_nhwc), ] transform = torchvision.transforms.Compose(transform) inv_transform = torchvision.transforms.Compose(inv_transform) def predict(img: np.ndarray) -> torch.Tensor: img = nhwc_to_nchw(torch.Tensor(img)) img = img.to(device) output = model(img) return output # Check that transformations work correctly Xtr = transform(torch.Tensor(X)) out = predict(Xtr[1:3]) classes = torch.argmax(out, axis=1).cpu().numpy() print(f'Classes: {classes}: {np.array(class_names)[classes]}') topk = 4 batch_size = 50 n_evals = 10000 # define a masker that is used to mask out partitions of the input image. masker_blur = shap.maskers.Image("blur(128,128)", Xtr[0].shape) # create an explainer with model and image masker explainer = shap.Explainer(predict, masker_blur, output_names=class_names) # feed only one image # here we explain two images using 100 evaluations of the underlying model to estimate the SHAP values shap_values = explainer(Xtr[1:2], max_evals=n_evals, batch_size=batch_size, outputs=shap.Explanation.argsort.flip[:topk]) (shap_values.data.shape, shap_values.values.shape) shap_values.data = inv_transform(shap_values.data).cpu().numpy()[0] shap_values.values = [val for val in np.moveaxis(shap_values.values[0],-1, 0)] shap.image_plot(shap_values=shap_values.values, pixel_values=shap_values.data, labels=shap_values.output_names, true_labels=[class_names[132]]) # define a masker that is used to mask out partitions of the input image. masker_blur = shap.maskers.Image("blur(128,128)", Xtr[0].shape) # create an explainer with model and image masker explainer = shap.Explainer(predict, masker_blur, output_names=class_names) # feed only one image # here we explain two images using 100 evaluations of the underlying model to estimate the SHAP values shap_values = explainer(Xtr[1:4], max_evals=n_evals, batch_size=batch_size, outputs=shap.Explanation.argsort.flip[:topk]) (shap_values.data.shape, shap_values.values.shape) shap_values.data = inv_transform(shap_values.data).cpu().numpy() shap_values.values = [val for val in np.moveaxis(shap_values.values,-1, 0)] (shap_values.data.shape, shap_values.values[0].shape) shap.image_plot(shap_values=shap_values.values, pixel_values=shap_values.data, labels=shap_values.output_names) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: ยท Read the data and create a survival object using .survt_df() method Step2: ยท Specify the model and create the model fit object using .distfit_df() method Step3: ยท We can get the variance of the parameter estimates Step4: ยท Get the estimated log time-to-reach-%-of-death using .logq() method Step5: ยท Get the estimated log mean lifespan using .logm() method Step6: ยท We can also calculate the variance of time-to-percentage-death estimiates Step7: ยท Plot for parameter estimates Step8: ยท Plot harzard plot, survival plot and other plots using .plot() method
<ASSISTANT_TASK:> Python Code: import os #os.chdir('/Users/q6600sl/IPython_NB') from lifemodels import s_models %matplotlib inline #Read the data original_df = pd.read_csv('/Users/q6600sl/Downloads/SP_12-22-15.txt', sep=' ') #1st step: create a survival object surv_df = s_models.survt_df(original_df) original_df.head() gp3_fit = s_models.distfit_df(surv_df, 'gp3') gp3_fit.mdl_all_free gp3_fit.var (gp3_fit.logq('Mut-1'), gp3_fit.logq_nc('Mut-1')) medium_df = pd.DataFrame({'With_C' : map(gp3_fit.logq, gp3_fit.mdl_all_free.index), 'Without_C': map(gp3_fit.logq_nc, gp3_fit.mdl_all_free.index)}, index=gp3_fit.mdl_all_free.index) np.exp(medium_df) ax = medium_df.plot(kind='bar', alpha=0.5) ax.set_ylim(3, 4.5) right_y = ax.twinx() right_y.set_ylim(3, 4.5) _ = right_y.set_yticklabels(map('{:.2f}'.format, np.exp(right_y.get_yticks()))) ax.set_ylabel('Medium survival time in log(days)') right_y.set_ylabel('Medium survival time in days') plt.title('Medium lifespan estimated based on Gompertz Makeham Model', y=1.05) (gp3_fit.logm('Mut-1'), gp3_fit.logm_nc('Mut-1')) gp3_fit.logt_var('Mut-1', 3.9) f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(16, 5)) plot_par = {'logy' : True, 'alpha': 0.5, 'color': list('BBBBBR')} np.exp(gp3_fit.mdl_all_free).ix[:, 'alpha'].plot(kind='bar', ax=ax1, **plot_par) np.exp(gp3_fit.mdl_all_free).ix[:, 'beta'].plot(kind='bar', ax=ax2, **plot_par) np.exp(gp3_fit.mdl_all_free).ix[:, 'c'].plot(kind='bar', ax=ax3, **plot_par) ax1.set_title(r'Gompertz $\alpha$') ax2.set_title(r'Gompertz $\beta$') ax3.set_title(r'Gompertz $C$') plt.suptitle('Gompertz Makeham Model parameter estimates', fontsize=16, y=0.99) f, axes = plt.subplots(2, 3, figsize=(14, 8), sharex=True, sharey=True) axes = axes.ravel() for ax, key in zip(axes, gp3_fit.mdl_all_free.index.tolist()): gp3_fit.plot(key, np.linspace(0,80,100),ax,'hzd') ax.set_ylim((-8,1)) ax.set_title(key) plt.suptitle('log-harzard plot', fontsize=20) f, axes = plt.subplots(2, 3, figsize=(14, 8), sharex=True, sharey=True) axes = axes.ravel() for ax, key in zip(axes, gp3_fit.mdl_all_free.index.tolist()): gp3_fit.plot(key, np.linspace(0,80,100),ax,'sf') ax.set_ylim((-0.01,1.01)) ax.set_title(key) plt.suptitle('survival plot', fontsize=20) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Hello, World! Step2: Creating Objects from CSV Step3: GroupBy Step4: Map vs FlatMap Step5: Creating Rows Step7: Creating DataFrames from RDDs Step8: Loading and Inspecting Parquet Files Step9: Calculating Histograms Step11: Visualizing Histograms Step12: Counting Airplanes in the US Fleet Step14: Counting the Total Flights Per Month Step15: Using RDDs and Map/Reduce to Prepare a Complex Record Step16: Counting Late Flights Step17: Counting Flights with Hero Captains Step18: Printing Our Results Step20: Computing the Average Lateness Per Flights Step22: Inspecting Late Flights Step24: Determining Why Flights Are Late Step25: Computing a Histogram of Weather Delayed Flights Step26: Preparing a Histogram for Visualization by d3.js Step27: Building a Classifier Model to Predict Flight Delays Step28: Check Data for Nulls Step29: Add a Route Column Step30: Bucketizing ArrDelay into ArrDelayBucket Step31: Indexing Our String Fields into Numeric Fields Step32: Combining Numeric Fields into a Single Vector Step33: Training Our Model in an Experimental Setup
<ASSISTANT_TASK:> Python Code: # Initialize PySpark APP_NAME = "Debugging Prediction Problems" # If there is no SparkSession, create the environment try: sc and spark except NameError as e: import findspark findspark.init() import pyspark import pyspark.sql sc = pyspark.SparkContext() spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate() print("PySpark initiated...") # Load the text file using the SparkContext csv_lines = sc.textFile("../data/example.csv") # Map the data to split the lines into a list data = csv_lines.map(lambda line: line.split(",")) # Collect the dataset into local RAM data.collect() # Turn the CSV lines into objects def csv_to_record(line): parts = line.split(",") record = { "name": parts[0], "company": parts[1], "title": parts[2] } return record # Apply the function to every record records = csv_lines.map(csv_to_record) # Inspect the first item in the dataset records.first() # Group the records by the name of the person grouped_records = records.groupBy(lambda x: x["name"]) # Show the first group grouped_records.first() # Count the groups job_counts = grouped_records.map( lambda x: { "name": x[0], "job_count": len(x[1]) } ) job_counts.first() job_counts.collect() # Compute a relation of words by line words_by_line = csv_lines\ .map(lambda line: line.split(",")) print(words_by_line.collect()) # Compute a relation of words flattened_words = csv_lines\ .map(lambda line: line.split(","))\ .flatMap(lambda x: x) flattened_words.collect() from pyspark.sql import Row # Convert the CSV into a pyspark.sql.Row def csv_to_row(line): parts = line.split(",") row = Row( name=parts[0], company=parts[1], title=parts[2] ) return row # Apply the function to get rows in an RDD rows = csv_lines.map(csv_to_row) # Convert to a pyspark.sql.DataFrame rows_df = rows.toDF() # Register the DataFrame for Spark SQL rows_df.registerTempTable("executives") # Generate a new DataFrame with SQL using the SparkSession job_counts = spark.sql( SELECT name, COUNT(*) AS total FROM executives GROUP BY name ) job_counts.show() # Go back to an RDD job_counts.rdd.collect() # Load the parquet file containing flight delay records on_time_dataframe = spark.read.parquet('../data/on_time_performance.parquet') # Register the data for Spark SQL on_time_dataframe.registerTempTable("on_time_performance") # Check out the columns on_time_dataframe.columns # Check out some data on_time_dataframe\ .select("FlightDate", "TailNum", "Origin", "Dest", "Carrier", "DepDelay", "ArrDelay")\ .show() # Trim the fields and keep the result trimmed_on_time = on_time_dataframe\ .select( "FlightDate", "TailNum", "Origin", "Dest", "Carrier", "DepDelay", "ArrDelay" ) # Sample 0.01% of the data and show trimmed_on_time.sample(False, 0.0001).show() # Compute a histogram of departure delays on_time_dataframe\ .select("DepDelay")\ .rdd\ .flatMap(lambda x: x)\ .histogram(10) %matplotlib inline import numpy as np import matplotlib.mlab as mlab import matplotlib.pyplot as plt # Function to plot a histogram using pyplot def create_hist(rdd_histogram_data): Given an RDD.histogram, plot a pyplot histogram heights = np.array(rdd_histogram_data[1]) full_bins = rdd_histogram_data[0] mid_point_bins = full_bins[:-1] widths = [abs(i - j) for i, j in zip(full_bins[:-1], full_bins[1:])] bar = plt.bar(mid_point_bins, heights, width=widths, color='b') return bar # Compute a histogram of departure delays departure_delay_histogram = on_time_dataframe\ .select("DepDelay")\ .rdd\ .flatMap(lambda x: x)\ .histogram([-60,-30,-15,-10,-5,0,5,10,15,30,60,90,120,180]) create_hist(departure_delay_histogram) # Dump the unneeded fields tail_numbers = on_time_dataframe.rdd.map(lambda x: x.TailNum) tail_numbers = tail_numbers.filter(lambda x: x != '') # distinct() gets us unique tail numbers unique_tail_numbers = tail_numbers.distinct() # now we need a count() of unique tail numbers airplane_count = unique_tail_numbers.count() print("Total airplanes: {}".format(airplane_count)) # Use SQL to look at the total flights by month across 2015 on_time_dataframe.registerTempTable("on_time_dataframe") total_flights_by_month = spark.sql( SELECT Month, Year, COUNT(*) AS total_flights FROM on_time_dataframe GROUP BY Year, Month ORDER BY Year, Month ) # This map/asDict trick makes the rows print a little prettier. It is optional. flights_chart_data = total_flights_by_month.rdd.map(lambda row: row.asDict()) flights_chart_data.collect() # Filter down to the fields we need to identify and link to a flight flights = on_time_dataframe.rdd.map(lambda x: (x.Carrier, x.FlightDate, x.FlightNum, x.Origin, x.Dest, x.TailNum) ) # Group flights by tail number, sorted by date, then flight number, then origin/dest flights_per_airplane = flights\ .map(lambda nameTuple: (nameTuple[5], [nameTuple[0:5]]))\ .reduceByKey(lambda a, b: a + b)\ .map(lambda tuple: { 'TailNum': tuple[0], 'Flights': sorted(tuple[1], key=lambda x: (x[1], x[2], x[3], x[4])) } ) flights_per_airplane.first() total_flights = on_time_dataframe.count() # Flights that were late leaving... late_departures = on_time_dataframe.filter( on_time_dataframe.DepDelayMinutes > 0 ) total_late_departures = late_departures.count() print(total_late_departures) # Flights that were late arriving... late_arrivals = on_time_dataframe.filter( on_time_dataframe.ArrDelayMinutes > 0 ) total_late_arrivals = late_arrivals.count() print(total_late_arrivals) # Get the percentage of flights that are late, rounded to 1 decimal place pct_late = round((total_late_arrivals / (total_flights * 1.0)) * 100, 1) # Flights that left late but made up time to arrive on time... on_time_heros = on_time_dataframe.filter( (on_time_dataframe.DepDelayMinutes > 0) & (on_time_dataframe.ArrDelayMinutes <= 0) ) total_on_time_heros = on_time_heros.count() print(total_on_time_heros) print("Total flights: {:,}".format(total_flights)) print("Late departures: {:,}".format(total_late_departures)) print("Late arrivals: {:,}".format(total_late_arrivals)) print("Recoveries: {:,}".format(total_on_time_heros)) print("Percentage Late: {}%".format(pct_late)) # Get the average minutes late departing and arriving spark.sql( SELECT ROUND(AVG(DepDelay),1) AS AvgDepDelay, ROUND(AVG(ArrDelay),1) AS AvgArrDelay FROM on_time_performance ).show() # Why are flights late? Lets look at some delayed flights and the delay causes late_flights = spark.sql( SELECT ArrDelayMinutes, WeatherDelay, CarrierDelay, NASDelay, SecurityDelay, LateAircraftDelay FROM on_time_performance WHERE WeatherDelay IS NOT NULL OR CarrierDelay IS NOT NULL OR NASDelay IS NOT NULL OR SecurityDelay IS NOT NULL OR LateAircraftDelay IS NOT NULL ORDER BY FlightDate ) late_flights.sample(False, 0.01).show() # Calculate the percentage contribution to delay for each source total_delays = spark.sql( SELECT ROUND(SUM(WeatherDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_weather_delay, ROUND(SUM(CarrierDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_carrier_delay, ROUND(SUM(NASDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_nas_delay, ROUND(SUM(SecurityDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_security_delay, ROUND(SUM(LateAircraftDelay)/SUM(ArrDelayMinutes) * 100, 1) AS pct_late_aircraft_delay FROM on_time_performance ) total_delays.show() # Eyeball the first to define our buckets weather_delay_histogram = on_time_dataframe\ .select("WeatherDelay")\ .rdd\ .flatMap(lambda x: x)\ .histogram([1, 5, 10, 15, 30, 60, 120, 240, 480, 720, 24*60.0]) print(weather_delay_histogram) create_hist(weather_delay_histogram) # Transform the data into something easily consumed by d3 def histogram_to_publishable(histogram): record = {'key': 1, 'data': []} for label, value in zip(histogram[0], histogram[1]): record['data'].append( { 'label': label, 'value': value } ) return record # Recompute the weather histogram with a filter for on-time flights weather_delay_histogram = on_time_dataframe\ .filter( (on_time_dataframe.WeatherDelay != None) & (on_time_dataframe.WeatherDelay > 0) )\ .select("WeatherDelay")\ .rdd\ .flatMap(lambda x: x)\ .histogram([0, 15, 30, 60, 120, 240, 480, 720, 24*60.0]) print(weather_delay_histogram) record = histogram_to_publishable(weather_delay_histogram) record from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType from pyspark.sql.types import StructType, StructField from pyspark.sql.functions import udf schema = StructType([ StructField("ArrDelay", DoubleType(), True), # "ArrDelay":5.0 StructField("CRSArrTime", TimestampType(), True), # "CRSArrTime":"2015-12-31T03:20:00.000-08:00" StructField("CRSDepTime", TimestampType(), True), # "CRSDepTime":"2015-12-31T03:05:00.000-08:00" StructField("Carrier", StringType(), True), # "Carrier":"WN" StructField("DayOfMonth", IntegerType(), True), # "DayOfMonth":31 StructField("DayOfWeek", IntegerType(), True), # "DayOfWeek":4 StructField("DayOfYear", IntegerType(), True), # "DayOfYear":365 StructField("DepDelay", DoubleType(), True), # "DepDelay":14.0 StructField("Dest", StringType(), True), # "Dest":"SAN" StructField("Distance", DoubleType(), True), # "Distance":368.0 StructField("FlightDate", DateType(), True), # "FlightDate":"2015-12-30T16:00:00.000-08:00" StructField("FlightNum", StringType(), True), # "FlightNum":"6109" StructField("Origin", StringType(), True), # "Origin":"TUS" ]) features = spark.read.json( "../data/simple_flight_delay_features.jsonl.bz2", schema=schema ) features.first() # # Check for nulls in features before using Spark ML # null_counts = [(column, features.where(features[column].isNull()).count()) for column in features.columns] cols_with_nulls = filter(lambda x: x[1] > 0, null_counts) print(list(cols_with_nulls)) # # Add a Route variable to replace FlightNum # from pyspark.sql.functions import lit, concat features_with_route = features.withColumn( 'Route', concat( features.Origin, lit('-'), features.Dest ) ) features_with_route.select("Origin", "Dest", "Route").show(5) # # Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay # from pyspark.ml.feature import Bucketizer splits = [-float("inf"), -15.0, 0, 30.0, float("inf")] bucketizer = Bucketizer( splits=splits, inputCol="ArrDelay", outputCol="ArrDelayBucket" ) ml_bucketized_features = bucketizer.transform(features_with_route) # Check the buckets out ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show() # # Extract features tools in with pyspark.ml.feature # from pyspark.ml.feature import StringIndexer, VectorAssembler # Turn category fields into categoric feature vectors, then drop intermediate fields for column in ["Carrier", "DayOfMonth", "DayOfWeek", "DayOfYear", "Origin", "Dest", "Route"]: string_indexer = StringIndexer( inputCol=column, outputCol=column + "_index" ) ml_bucketized_features = string_indexer.fit(ml_bucketized_features)\ .transform(ml_bucketized_features) # Check out the indexes ml_bucketized_features.show(6) # Handle continuous, numeric fields by combining them into one feature vector numeric_columns = ["DepDelay", "Distance"] index_columns = ["Carrier_index", "DayOfMonth_index", "DayOfWeek_index", "DayOfYear_index", "Origin_index", "Origin_index", "Dest_index", "Route_index"] vector_assembler = VectorAssembler( inputCols=numeric_columns + index_columns, outputCol="Features_vec" ) final_vectorized_features = vector_assembler.transform(ml_bucketized_features) # Drop the index columns for column in index_columns: final_vectorized_features = final_vectorized_features.drop(column) # Check out the features final_vectorized_features.show() # # Cross validate, train and evaluate classifier # # Test/train split training_data, test_data = final_vectorized_features.randomSplit([0.7, 0.3]) # Instantiate and fit random forest classifier from pyspark.ml.classification import RandomForestClassifier rfc = RandomForestClassifier( featuresCol="Features_vec", labelCol="ArrDelayBucket", maxBins=4657, maxMemoryInMB=1024 ) model = rfc.fit(training_data) # Evaluate model using test data predictions = model.transform(test_data) from pyspark.ml.evaluation import MulticlassClassificationEvaluator evaluator = MulticlassClassificationEvaluator(labelCol="ArrDelayBucket", metricName="accuracy") accuracy = evaluator.evaluate(predictions) print("Accuracy = {}".format(accuracy)) # Check a sample predictions.sample(False, 0.001, 18).orderBy("CRSDepTime").show(6) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: We can then import the data by looping through all the files and read in the corresponding files' content. We further extract the information items we need on the fly from the path as well as the content of log.txt. The result is stored into a Pandas DataFrame for further analysis. Step2: For each file, we have now a row the complete commit information available for both repositories. Step3: Basic statistics Step4: These are the number of entries for each repository. Step5: The amount of commits for each repository are. Step6: Data preparation
<ASSISTANT_TASK:> Python Code: import glob file_list = glob.glob(r'C:/dev/forensic/data/**/*.txt', recursive=True) file_list = [x.replace("\\", "/") for x in file_list] file_list[:5] import pandas as pd dfs = [] for files_file in file_list: try: files_df = pd.read_csv(files_file, names=['sha', 'timestamp']) files_df['project'] = files_file.split("/")[-2] files_df['file'] = files_df.sha files_df['sha'] = files_df.sha[0] files_df['timestamp'] = pd.to_datetime(files_df.timestamp[0]) files_df = files_df[1:] files_df dfs.append(files_df) except OSError as e: print((e,files_file)) file_log = pd.concat(dfs, ignore_index=True) file_log.head() file_log.file = pd.Categorical(file_log.file) file_log.info() dir_log = dir_log[ (dir_log.project=='lerna') & (dir_log.file.str.endswith(".js")) | (dir_log.project=='web-build-tools') & (dir_log.file.str.endswith(".ts")) ] dir_log.project.value_counts() dir_log = dir_log[dir_log.file.str.contains("/")].copy() dir_log['last_dir'] = dir_log.file.str.split("/").str[-2] dir_log['last_dir_id'] = pd.factorize(dir_log.last_dir)[0] dir_log.head() dir_log['date'] = dir_log.timestamp.dt.date dir_log.head() grouped = dir_log.groupby(['project', pd.Grouper(level='date', freq="D"),'last_dir_id'])[['sha']].last() grouped.head() grouped['existent'] = 1 grouped.head() test = grouped.pivot_table('existent', ['project', 'date'], 'last_dir_id').fillna(0) test.head() lerna = test.loc['lerna'][0] lerna %maplotlib inline test.plot() timed_log = dir_log.set_index(['timestamp', 'project']) timed_log.head() timed_log.resample("W").first() %matplotlib inline timed.\ pivot_table('last_dir_id', timed.index, 'project')\ .fillna(method='ffill').dropna().plot() file_log[file_log.project == "lerna"].iloc[0] file_log[file_log.project == "web-build-tools"].iloc[0] file_log.info() file_log.project.value_counts() file_log.groupby('project').sha.nunique() file_log[file_log.project=="web-build-tools"].iloc[0] file_log[file_log.project=="web-build-tools"].file.iloc[-10:] lerna = file_log[file_log.project == "lerna"] lerna.info() rush = file_log[file_log.project == "web-build-tools"] rush.info() from scipy.spatial.distance import hamming def calculate_hamming(row): lerna = row.file_list_lerna.split("\n") lerna = [x.rsplit(".", maxsplit=1)[0] for x in lerna] rush = row.file_list_rush.split("\n") rush = [x.rsplit(".", maxsplit=1)[0] for x in rush] count = 0 for i in lerna: if i in rush: count = count + 1 return count comp["amount"] = comp.apply(calculate_hamming, axis=1) comp.head() %matplotlib inline comp.amount.plot() comp.resample("W").amount.mean().plot() comp[comp.amount == comp.amount.max()] <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: My First Model Step2: Validation and Early Stopping Step3: Classification Step4: Convolutional Networks (Working with Images) Step5: Transfer Learning (using Pre-Trained Networks) Step6: Load very small set of cats and dogs data (from https Step7: Transform or "featurize" data with pretrained model Step8: Classification model baed on features of pre-trained model
<ASSISTANT_TASK:> Python Code: import pandas as pd data_df = pd.read_csv('data/hourly_wages.csv') data_df.head() data_df.describe() target = data_df.wage_per_hour.as_matrix() predictors = data_df.drop(['wage_per_hour'], axis=1).as_matrix() n_cols = predictors.shape[1] from keras.models import Sequential from keras.layers import Dense my_model = Sequential() my_model.add(Dense(100, activation='relu', input_shape=(n_cols,))) my_model.add(Dense(100, activation='relu')) my_model.add(Dense(1)) my_model.summary() my_model.compile(optimizer = 'adam', loss='mean_squared_error') my_model.fit(predictors, target) def get_new_model(): my_model = Sequential() my_model.add(Dense(100, activation='relu', input_shape=(n_cols,))) my_model.add(Dense(100, activation='relu')) my_model.add(Dense(1)) my_model.compile(optimizer = 'adam', loss='mean_squared_error') return(my_model) my_model = get_new_model() my_model.fit(predictors, target, validation_split=0.3) from keras.callbacks import EarlyStopping early_stopping_monitor = EarlyStopping(patience=2) my_model = get_new_model() my_model.fit(predictors, target, validation_split=0.3, nb_epoch=20, callbacks=[early_stopping_monitor]) titanic_data = pd.read_csv('data/titanic_all_numeric.csv') titanic_data.head() from keras.utils.np_utils import to_categorical target = to_categorical(titanic_data.survived) predictors = titanic_data.drop(['survived'], axis=1).as_matrix() n_cols = predictors.shape[1] def get_classification_model(n_cols): model = Sequential() model.add(Dense(100, activation='relu', input_shape=(n_cols,))) model.add(Dense(100, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return(model) my_model = get_classification_model(n_cols) my_model.fit(predictors, target, validation_split=0.3, nb_epoch=20, callbacks=[early_stopping_monitor]) from keras.datasets import mnist nb_classes = 10 # the data, shuffled and split between train and test sets (X_train, y_train), (X_test, y_test) = mnist.load_data() # input image dimensions img_rows, img_cols = 28, 28 # Using tf dim ordering X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 255 X_test /= 255 print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_train = to_categorical(y_train, nb_classes) Y_test = to_categorical(y_test, nb_classes) from keras.layers import Dense, Flatten, Convolution2D, MaxPooling2D # number of convolutional filters to use nb_filters = 8 # size of pooling area for max pooling pool_size = (2, 2) # convolution kernel size kernel_size = (2, 2) model = Sequential() model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], border_mode='valid', input_shape=input_shape, activation='relu')) model.add(Convolution2D(nb_filters, kernel_size[0], kernel_size[1], activation='relu')) model.add(MaxPooling2D(pool_size=pool_size)) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(nb_classes, activation='softmax')) model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy']) print(model.summary()) model.summary() model.fit(X_train, Y_train, nb_epoch=5, validation_data=(X_test, Y_test)) from vgg16 import VGG16 from keras.preprocessing import image model = VGG16(weights='imagenet', include_top=False) model.summary() from keras.preprocessing.image import ImageDataGenerator train_data_dir = 'data/dogs_and_cats/train' val_data_dir = 'data/dogs_and_cats/val' n_cats_training = 40 n_dogs_training = 40 training_size = n_cats_training + n_dogs_training n_cats_val = 10 n_dogs_val = 10 val_size = n_cats_val + n_dogs_val datagen = ImageDataGenerator(rescale=1./255) train_generator = datagen.flow_from_directory( train_data_dir, target_size=(150, 150), class_mode=None, shuffle=False) # keep data in order, since this is only a transform val_generator = datagen.flow_from_directory( val_data_dir, target_size=(150, 150), class_mode=None, shuffle=False) train_data = model.predict_generator(train_generator, training_size) val_data = model.predict_generator(val_generator, val_size) import numpy as np train_labels = np.array([0] * n_cats_training + [1] * n_dogs_training) val_labels = np.array([0] * n_cats_val + [1] * n_dogs_val) model = Sequential() model.add(Flatten(input_shape=train_data.shape[1:])) model.add(Dense(256, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(train_data, train_labels, nb_epoch=20, validation_data=(val_data, val_labels)) <END_TASK>
<SYSTEM_TASK:> Given the following text description, write Python code to implement the functionality described below step by step <END_TASK> <USER_TASK:> Description: Step1: Distributing on CPU Threads Step2: Running on the GPU Step3: Memory Allocation Step4: Feeding the Beast Step5: Distributing 1960 lines by blocks of 128 threads results in 16 busy blocks, which uses a fraction of most GPUs and does not hide latency. Step6: Profiling application Step7: Static Sort Step8: Local load and store transactions should have been reduced to zero. Step9: Cache Coherence Step10: The number of gld_transactions and l2_read_transactions should significantly reduce. Step11: It is also possible to query the utilization of the different pipelines. In this last version, we should have a high utilization on the single_precision_fu_utilization, and low utilization for the other units. We are compute bound !
<ASSISTANT_TASK:> Python Code: import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-01-naive if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-01-naive !hybridizer-cuda ./01-naive/01-naive-csharp.cs graybitmap.cs -o ./01-naive/01-naive-csharp.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-01-naive/denoised.bmp') img.save('./output-01-naive/denoised.png', 'png') from IPython.display import Image Image(filename="./output-01-naive/denoised.png", width=384, height=384) import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-02-parfor if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-02-parfor !hybridizer-cuda ./02-parallel-for/01-parfor-csharp.cs graybitmap.cs -o ./02-parallel-for/01-parfor-csharp.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-02-parfor/denoised.bmp') img.save('./output-02-parfor/denoised.png', 'png') from IPython.display import Image Image(filename="./output-02-parfor/denoised.png", width=384, height=384) import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-03-naive-gpu if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-03-naive-gpu !hybridizer-cuda ./03-naive-gpu/01-naive-gpu.cs graybitmap.cs -intrinsics bitonicsort.cuh=./ -o ./03-naive-gpu/01-naive-gpu.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-03-naive-gpu/denoised.bmp') img.save('./output-03-naive-gpu/denoised.png', 'png') from IPython.display import Image Image(filename="./output-03-naive-gpu/denoised.png", width=384, height=384) import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-04-stack-gpu if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-04-stack-gpu !hybridizer-cuda ./04-stack-gpu/01-stack-gpu.cs graybitmap.cs -intrinsics bitonicsort.cuh=./ -o ./04-stack-gpu/01-stack-gpu.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-04-stack-gpu/denoised.bmp') img.save('./output-04-stack-gpu/denoised.png', 'png') from IPython.display import Image Image(filename="./output-04-stack-gpu/denoised.png", width=384, height=384) !hybridizer-cuda ./05-dice-gpu/01-query-config.cs -o ./05-dice-gpu/01-query-config.exe -run import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-05-dice-gpu if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-05-dice-gpu !hybridizer-cuda ./05-dice-gpu/02-dice-gpu.cs graybitmap.cs -intrinsics bitonicsort.cuh=./ -o ./05-dice-gpu/02-dice-gpu.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-05-dice-gpu/denoised.bmp') img.save('./output-05-dice-gpu/denoised.png', 'png') from IPython.display import Image Image(filename="./output-05-dice-gpu/denoised.png", width=384, height=384) !cd 05-dice-gpu/hybrid ; nvprof --profile-child-processes --metrics local_load_transactions,local_store_transactions,gld_transactions,gst_transactions,l2_read_transactions,l2_write_transactions ./run.sh import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-06-regsort-gpu if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-06-regsort-gpu !hybridizer-cuda ./06-regsort-gpu/01-regsort-gpu.cs graybitmap.cs -o ./06-regsort-gpu/01-regsort-gpu.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-06-regsort-gpu/denoised.bmp') img.save('./output-06-regsort-gpu/denoised.png', 'png') from IPython.display import Image Image(filename="./output-06-regsort-gpu/denoised.png", width=384, height=384) !cd 06-regsort-gpu/hybrid ; nvprof --profile-child-processes --metrics local_load_transactions,local_store_transactions,gld_transactions,gst_transactions,l2_read_transactions,l2_write_transactions ./run.sh !cd 06-regsort-gpu/hybrid ; nvprof -s --profile-child-processes ./run.sh import platform if platform.system() == "Windows" : # create directory on Windows !mkdir output-07-cache-aware-gpu if platform.system() == "Linux" : # create directory on Linux !mkdir -p ./output-07-cache-aware-gpu !hybridizer-cuda ./07-cache-aware-gpu/01-cache-aware-gpu.cs -intrinsics intrinsics.cuh=./ graybitmap.cs -o ./07-cache-aware-gpu/01-cache-aware-gpu.exe -run # convert bmp to png to have interactive display from PIL import Image img = Image.open('./output-07-cache-aware-gpu/denoised.bmp') img.save('./output-07-cache-aware-gpu/denoised.png', 'png') from IPython.display import Image Image(filename="./output-07-cache-aware-gpu/denoised.png", width=384, height=384) !cd 07-cache-aware-gpu/hybrid ; nvprof --profile-child-processes --metrics local_load_transactions,local_store_transactions,gld_transactions,gst_transactions,l2_read_transactions,l2_write_transactions ./run.sh !cd 07-cache-aware-gpu/hybrid ; nvprof -s --profile-child-processes ./run.sh !cd 07-cache-aware-gpu/hybrid ; nvprof --profile-child-processes --metrics ldst_fu_utilization,single_precision_fu_utilization ./run.sh <END_TASK>