markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
[3] Preprocessing [3.1]. Preprocessing Review TextNow that we have finished deduplication our data requires some preprocessing before we go on further with analysis and making the prediction model.Hence in the Preprocessing phase we do the following in the order below:-1. Begin by removing the html tags2. Remove any punctuations or limited set of special characters like , or . or etc.3. Check if the word is made up of english letters and is not alpha-numeric4. Check to see if the length of the word is greater than 2 (as it was researched that there is no adjective in 2-letters)5. Convert the word to lowercase6. Remove Stopwords7. Finally Snowball Stemming the word (it was obsereved to be better than Porter Stemming)After which we collect the words used to describe positive and negative reviews | # printing some random reviews
sent_0 = final['Text'].values[0]
print(sent_0)
print("="*50)
sent_1000 = final['Text'].values[1000]
print(sent_1000)
print("="*50)
sent_1500 = final['Text'].values[1500]
print(sent_1500)
print("="*50)
sent_4900 = final['Text'].values[4900]
print(sent_4900)
print("="*50)
# remove urls from text python: https://stackoverflow.com/a/40823105/4084039
sent_0 = re.sub(r"http\S+", "", sent_0)
sent_1000 = re.sub(r"http\S+", "", sent_1000)
sent_150 = re.sub(r"http\S+", "", sent_1500)
sent_4900 = re.sub(r"http\S+", "", sent_4900)
print(sent_0)
# https://stackoverflow.com/questions/16206380/python-beautifulsoup-how-to-remove-all-tags-from-an-element
from bs4 import BeautifulSoup
soup = BeautifulSoup(sent_0, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1000, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_1500, 'lxml')
text = soup.get_text()
print(text)
print("="*50)
soup = BeautifulSoup(sent_4900, 'lxml')
text = soup.get_text()
print(text)
# https://stackoverflow.com/a/47091490/4084039
import re
def decontracted(phrase):
# specific
phrase = re.sub(r"won't", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase
sent_1500 = decontracted(sent_1500)
print(sent_1500)
print("="*50)
#remove words with numbers python: https://stackoverflow.com/a/18082370/4084039
sent_0 = re.sub("\S*\d\S*", "", sent_0).strip()
print(sent_0)
#remove spacial character: https://stackoverflow.com/a/5843547/4084039
sent_1500 = re.sub('[^A-Za-z0-9]+', ' ', sent_1500)
print(sent_1500)
# https://gist.github.com/sebleier/554280
# we are removing the words from the stop words list: 'no', 'nor', 'not'
# <br /><br /> ==> after the above steps, we are getting "br br"
# we are including them into stop words list
# instead of <br /> if we have <br/> these tags would have revmoved in the 1st step
stopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've",\
"you'll", "you'd", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \
'she', "she's", 'her', 'hers', 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their',\
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', \
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \
'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\
'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\
'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \
's', 't', 'can', 'will', 'just', 'don', "don't", 'should', "should've", 'now', 'd', 'll', 'm', 'o', 're', \
've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", 'didn', "didn't", 'doesn', "doesn't", 'hadn',\
"hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", 'ma', 'mightn', "mightn't", 'mustn',\
"mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", 'wasn', "wasn't", 'weren', "weren't", \
'won', "won't", 'wouldn', "wouldn't"])
# Combining all the above stundents
from tqdm import tqdm
preprocessed_reviews = []
# tqdm is for printing the status bar
for sentance in tqdm(final['Text'].values):
sentance = re.sub(r"http\S+", "", sentance)
sentance = BeautifulSoup(sentance, 'lxml').get_text()
sentance = decontracted(sentance)
sentance = re.sub("\S*\d\S*", "", sentance).strip()
sentance = re.sub('[^A-Za-z]+', ' ', sentance)
# https://gist.github.com/sebleier/554280
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in stopwords)
preprocessed_reviews.append(sentance.strip())
preprocessed_reviews[100000] | _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4] Featurization [4.1] BAG OF WORDS | #BoW
count_vect = CountVectorizer() #in scikit-learn
count_vect.fit(preprocessed_reviews)
print("some feature names ", count_vect.get_feature_names()[:10])
print('='*50)
final_counts = count_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_counts))
print("the shape of out text BOW vectorizer ",final_counts.get_shape())
print("the number of unique words ", final_counts.get_shape()[1]) | some feature names ['aa', 'aahhhs', 'aback', 'abandon', 'abates', 'abbott', 'abby', 'abdominal', 'abiding', 'ability']
==================================================
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text BOW vectorizer (4986, 12997)
the number of unique words 12997
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4.2] Bi-Grams and n-Grams. | #bi-gram, tri-gram and n-gram
#removing stop words like "not" should be avoided before building n-grams
# count_vect = CountVectorizer(ngram_range=(1,2))
# please do read the CountVectorizer documentation http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html
# you can choose these numebrs min_df=10, max_features=5000, of your choice
count_vect = CountVectorizer(ngram_range=(1,2), min_df=10, max_features=5000)
final_bigram_counts = count_vect.fit_transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_bigram_counts))
print("the shape of out text BOW vectorizer ",final_bigram_counts.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_bigram_counts.get_shape()[1]) | the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text BOW vectorizer (4986, 3144)
the number of unique words including both unigrams and bigrams 3144
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4.3] TF-IDF | tf_idf_vect = TfidfVectorizer(ngram_range=(1,2), min_df=10)
tf_idf_vect.fit(preprocessed_reviews)
print("some sample features(unique words in the corpus)",tf_idf_vect.get_feature_names()[0:10])
print('='*50)
final_tf_idf = tf_idf_vect.transform(preprocessed_reviews)
print("the type of count vectorizer ",type(final_tf_idf))
print("the shape of out text TFIDF vectorizer ",final_tf_idf.get_shape())
print("the number of unique words including both unigrams and bigrams ", final_tf_idf.get_shape()[1]) | some sample features(unique words in the corpus) ['ability', 'able', 'able find', 'able get', 'absolute', 'absolutely', 'absolutely delicious', 'absolutely love', 'absolutely no', 'according']
==================================================
the type of count vectorizer <class 'scipy.sparse.csr.csr_matrix'>
the shape of out text TFIDF vectorizer (4986, 3144)
the number of unique words including both unigrams and bigrams 3144
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4.4] Word2Vec | # Train your own Word2Vec model using your own text corpus
i=0
list_of_sentance=[]
for sentance in preprocessed_reviews:
list_of_sentance.append(sentance.split())
# Using Google News Word2Vectors
# in this project we are using a pretrained model by google
# its 3.3G file, once you load this into your memory
# it occupies ~9Gb, so please do this step only if you have >12G of ram
# we will provide a pickle file wich contains a dict ,
# and it contains all our courpus words as keys and model[word] as values
# To use this code-snippet, download "GoogleNews-vectors-negative300.bin"
# from https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit
# it's 1.9GB in size.
# http://kavita-ganesan.com/gensim-word2vec-tutorial-starter-code/#.W17SRFAzZPY
# you can comment this whole cell
# or change these varible according to your need
is_your_ram_gt_16g=False
want_to_use_google_w2v = False
want_to_train_w2v = True
if want_to_train_w2v:
# min_count = 5 considers only words that occured atleast 5 times
w2v_model=Word2Vec(list_of_sentance,min_count=5,size=50, workers=4)
print(w2v_model.wv.most_similar('great'))
print('='*50)
print(w2v_model.wv.most_similar('worst'))
elif want_to_use_google_w2v and is_your_ram_gt_16g:
if os.path.isfile('GoogleNews-vectors-negative300.bin'):
w2v_model=KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
print(w2v_model.wv.most_similar('great'))
print(w2v_model.wv.most_similar('worst'))
else:
print("you don't have gogole's word2vec file, keep want_to_train_w2v = True, to train your own w2v ")
w2v_words = list(w2v_model.wv.vocab)
print("number of words that occured minimum 5 times ",len(w2v_words))
print("sample words ", w2v_words[0:50]) | number of words that occured minimum 5 times 3817
sample words ['product', 'available', 'course', 'total', 'pretty', 'stinky', 'right', 'nearby', 'used', 'ca', 'not', 'beat', 'great', 'received', 'shipment', 'could', 'hardly', 'wait', 'try', 'love', 'call', 'instead', 'removed', 'easily', 'daughter', 'designed', 'printed', 'use', 'car', 'windows', 'beautifully', 'shop', 'program', 'going', 'lot', 'fun', 'everywhere', 'like', 'tv', 'computer', 'really', 'good', 'idea', 'final', 'outstanding', 'window', 'everybody', 'asks', 'bought', 'made']
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4.4.1] Converting text into vectors using Avg W2V, TFIDF-W2V [4.4.1.1] Avg W2v | # average Word2Vec
# compute average word2vec for each review.
sent_vectors = []; # the avg-w2v for each sentence/review is stored in this list
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length 50, you might need to change this to 300 if you use google's w2v
cnt_words =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words:
vec = w2v_model.wv[word]
sent_vec += vec
cnt_words += 1
if cnt_words != 0:
sent_vec /= cnt_words
sent_vectors.append(sent_vec)
print(len(sent_vectors))
print(len(sent_vectors[0])) | 100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 4986/4986 [00:03<00:00, 1330.47it/s]
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[4.4.1.2] TFIDF weighted W2v | # S = ["abc def pqr", "def def def abc", "pqr pqr def"]
model = TfidfVectorizer()
tf_idf_matrix = model.fit_transform(preprocessed_reviews)
# we are converting a dictionary with word as a key, and the idf as a value
dictionary = dict(zip(model.get_feature_names(), list(model.idf_)))
# TF-IDF weighted Word2Vec
tfidf_feat = model.get_feature_names() # tfidf words/col-names
# final_tf_idf is the sparse matrix with row= sentence, col=word and cell_val = tfidf
tfidf_sent_vectors = []; # the tfidf-w2v for each sentence/review is stored in this list
row=0;
for sent in tqdm(list_of_sentance): # for each review/sentence
sent_vec = np.zeros(50) # as word vectors are of zero length
weight_sum =0; # num of words with a valid vector in the sentence/review
for word in sent: # for each word in a review/sentence
if word in w2v_words and word in tfidf_feat:
vec = w2v_model.wv[word]
# tf_idf = tf_idf_matrix[row, tfidf_feat.index(word)]
# to reduce the computation we are
# dictionary[word] = idf value of word in whole courpus
# sent.count(word) = tf valeus of word in this review
tf_idf = dictionary[word]*(sent.count(word)/len(sent))
sent_vec += (vec * tf_idf)
weight_sum += tf_idf
if weight_sum != 0:
sent_vec /= weight_sum
tfidf_sent_vectors.append(sent_vec)
row += 1 | 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 4986/4986 [00:20<00:00, 245.63it/s]
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
Truncated-SVD [5.1] Taking top features from TFIDF, SET 2 | # Please write all the code with proper documentation
X = preprocessed_reviews[:]
y = final['Score'][:]
tf_idf = TfidfVectorizer()
tfidf_data = tf_idf.fit_transform(X)
tfidf_feat = tf_idf.get_feature_names()
| _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[5.2] Calulation of Co-occurrence matrix | # Please write all the code with proper documentation
#Ref:https://datascience.stackexchange.com/questions/40038/how-to-implement-word-to-word-co-occurence-matrix-in-python
#Ref:# https://github.com/PushpendraSinghChauhan/Amazon-Fine-Food-Reviews/blob/master/Computing%20Word%20Vectors%20using%20TruncatedSVD.ipynb
def Co_Occurrence_Matrix(neighbour_num , list_words):
# Storing all words with their indices in the dictionary
corpus = dict()
# List of all words in the corpus
doc = []
index = 0
for sent in preprocessed_reviews:
for word in sent.split():
doc.append(word)
corpus.setdefault(word,[])
corpus[word].append(index)
index += 1
# Co-occurrence matrix
matrix = []
# rows in co-occurrence matrix
for row in list_words:
# row in co-occurrence matrix
temp = []
# column in co-occurrence matrix
for col in list_words :
if( col != row):
# No. of times col word is in neighbourhood of row word
count = 0
# Value of neighbourhood
num = neighbour_num
# Indices of row word in the corpus
positions = corpus[row]
for i in positions:
if i<(num-1):
# Checking for col word in neighbourhood of row
if col in doc[i:i+num]:
count +=1
elif (i>=(num-1)) and (i<=(len(doc)-num)):
# Check col word in neighbour of row
if (col in doc[i-(num-1):i+1]) and (col in doc[i:i+num]):
count +=2
# Check col word in neighbour of row
elif (col in doc[i-(num-1):i+1]) or (col in doc[i:i+num]):
count +=1
else :
if (col in doc[i-(num-1):i+1]):
count +=1
# appending the col count to row of co-occurrence matrix
temp.append(count)
else:
# Append 0 in the column if row and col words are equal
temp.append(0)
# appending the row in co-occurrence matrix
matrix.append(temp)
# Return co-occurrence matrix
return np.array(matrix)
X_new = Co_Occurrence_Matrix(15, top_feat)
| _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[5.3] Finding optimal value for number of components (n) to be retained. | # Please write all the code with proper documentation
k = np.arange(2,100,3)
variance =[]
for i in k:
svd = TruncatedSVD(n_components=i)
svd.fit_transform(X_new)
score = svd.explained_variance_ratio_.sum()
variance.append(score)
plt.plot(k, variance)
plt.xlabel('Number of Components')
plt.ylabel('Explained Variance')
plt.title('n_components VS Explained variance')
plt.show() | _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[5.4] Applying k-means clustering | # Please write all the code with proper documentation
errors = []
k = [2, 5, 10, 15, 25, 30, 50, 100]
for i in k:
kmeans = KMeans(n_clusters=i, random_state=0)
kmeans.fit(X_new)
errors.append(kmeans.inertia_)
plt.plot(k, errors)
plt.xlabel('K')
plt.ylabel('Error')
plt.title('K VS Error Plot')
plt.show()
svd = TruncatedSVD(n_components = 20)
svd.fit(X_new)
score = svd.explained_variance_ratio_ | _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[5.5] Wordclouds of clusters obtained in the above section | # Please write all the code with proper documentation
indices = np.argsort(tf_idf.idf_[::-1])
top_feat = [tfidf_feat[i] for i in indices[0:3000]]
top_indices = indices[0:3000]
top_n = np.argsort(top_feat[::-1])
feature_importances = pd.DataFrame(top_n, index = top_feat, columns=['importance']).sort_values('importance',ascending=False)
top = feature_importances.iloc[0:30]
comment_words = ' '
for val in top.index:
val = str(val)
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
for words in tokens:
comment_words = comment_words + words + ' '
stopwords = set(STOPWORDS)
wordcloud = WordCloud(width = 600, height = 600,
background_color ='black',
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
plt.figure(figsize = (10, 10), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show() | _____no_output_____ | MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[5.6] Function that returns most similar words for a given word. | # Please write all the code with proper documentation
def similarity(word):
similarity = cosine_similarity(X_new)
word_vect = similarity[top_feat.index(word)]
index = word_vect.argsort()[::-1][1:5]
for i in range(len(index)):
print((i+1),top_feat[index[i]] ,"\n")
similarity('sugary')
similarity('notlike')
| 1 maytag
2 slip
3 gibbles
4 farriage
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
[6] Conclusions | # Please write down few lines about what you observed from this assignment.
# Also please do mention the optimal values that you obtained for number of components & number of clusters.
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Algorithm","Best Hyperparameter"]
x.add_row(["T-SVD", 20])
x.add_row(["K-Means", 20])
print(x) | +-----------+---------------------+
| Algorithm | Best Hyperparameter |
+-----------+---------------------+
| T-SVD | 20 |
| K-Means | 20 |
+-----------+---------------------+
| MIT | #11_Amazon_Fine_Food_Reviews_Analysis_Truncated_SVD.ipynb | wizard-kv/Truncated-SVD-algorithm-on-Amazon-reviews-dataset |
Conditional statements - part 1 MotivationAll the previous programs are based on a pure sequence of statements. After the start of the program the statements areexecuted step by step and the program ends afterwards. However, it is often necessary that parts of a program areonly executed under certain conditions. For example, think of the following sentence and how it would be converted into a [pseudo code](https://de.wikipedia.org/wiki/Pseudocode) program:> If it rains tomorrow, I will clean up the basement. Then I will tidy the cupboards and sort the photos. Otherwise, I> will go swimming. In the evening I will go to the cinema with my wife.The textual description of the task is not precise. It is not exactly clear what is to be done.This is common for description in natural language. Often addition information is conveyed through the context of e.g. a conversation. What is probably meant in the previous example is the following:``` If it rains tomorrow, I will: - clean up the basement - tidy the cupboards - sort the photos Otherwise (so if it doesn't rain), I will: go swimming. In the evening I will go to the cinema with my wife.```So, depending on the weather either one or the other path of the pseudo code program is executed. Thisis illustrated in the following graphic:To enable this more complex workflow two things are required:- First, a construction that allows to split the workflow in different paths depending on a given condition.- Second, a specification of conditions. ConditionsSo, what is a condition? In the end, it is something that is either `True` or `False`, in other word, a condition always results in a boolean value. In principal, you could use `True` or `False`, when a condition is required. However, this not flexible, i.e. `True` is always true. More sophisticated conditions can be expressed by comparing the content of variables with a given value. For example, there is an integer variable `age`. Then the value can be either equal to 18 or not equal. So checking for *is the value of age equal to 18* can either be `True` or `False`. There are a number of comparison operators, which can be used for both numerical datatypes and string datatypes. In the former case, the usual order of numbers is used, in the latter case, the alphabetic order is taken. Comparison OperatorsIn order to use decisions in programs a way to specify conditions is needed. To formulate condition the comparisonoperators can be used. The following table shows a selection of comparison operators available in Python. The result ofa comparison using these operators is always a `Boolean` value. As already explained, the only possible `Boolean` valuesare `True` and `False`. For each comparison operator the table contain two example expressions that result in `True`and `False` respectively. | Operator | Explanation | Example True | Example False || -------- | ------------------------------------ | ------------ | ------------- || == | Check for equality | 2 == 2 | 2 == 3 || != | Check for inequality | 2 != 3 | 2 != 2 || < | Check for "smaller" | 2 < 3 | 2 < 1 || > | Check for "larger" | 3 > 2 | 2 > 3 || <= | Check for "less than or equal to" | 3 <= 3 | 3 <= 2 || >= | Check for "greater than or equal to" | 2 >= 2 | 2 >= 3 | `=` vs. `==`It is important to emphasize the difference between `=` and `==`. If there is one equal sign, the statement is an *assignment*. A value is assigned to a variable. The assignment has no return value, it is neither true or false. If there are two equal signs, it is a comparison. The values on both sides of the `""` are unchanged. However, the comparison leads to a value, namely `True` or `False`. Complex ConditionsWhat happens, if you want to check, if the variable `age` is greater than 18 but smaller than 30? In this case, you can build complex conditions using the boolean operators `and`, `or` and `not` (cf. the notebook about datatypes). ExerciseFamiliarize yourself with the comparison operators. Also test more complex comparisons, such as:```python"abc" < "abd""abcd" > "abc"2 == 2.01 == True0 != True``` | 1 == True | _____no_output_____ | CC0-1.0 | week_1/week_1_unit_5_ifstatement_notebook.ipynb | ceedee666/opensap_python_intro |
Conditional statementsUsing the conditional operators it is now possible to formulate conditional statements in Python.The syntax for conditional statements in Python is:```pythonif condition: statement_a1 ... statement_anelse: statement_b1 ... statement_bm```The result of the condition can be either `True` or `False`. If the condition is `True` the statements `a1` to `an` are executed.If the condition is `False` the statements `b1` to `bm` are executed.Note, that the `else` branch is optional, i.e. an`if` condition can also be specified without an `else` alternative. If the condition then is not true (i.e. `false`),the statements of the `if` block are simply skipped. | number = int(input("Please type a number: "))
if number > 100:
print(number, "is greater than 100!")
number = int(input("Please type a number: "))
if number > 100:
print(number, "is greater than 100!")
else:
print(number, "is smaller or equals 100!") | _____no_output_____ | CC0-1.0 | week_1/week_1_unit_5_ifstatement_notebook.ipynb | ceedee666/opensap_python_intro |
Indentations mark the boundaries of code blocksStatements that belong together are called *code blocks*.As can be seen in the previous examples, Python does not use special characters or keywords to mark thebeginning and the end of code blocks. Instead, indentation is used in Python. So indentation and spaces have a meaning in Python! Therefore, you must not indent arbitrarily within a program. Execute the code in the following two cells to see what happens. | a = 3
b = 4
print(a + b)
number = 100
if number > 0:
print("Number is greater than 0") | _____no_output_____ | CC0-1.0 | week_1/week_1_unit_5_ifstatement_notebook.ipynb | ceedee666/opensap_python_intro |
Let us challenge your understanding of code blocks in Python. Take a look at the following program. The last statement `print("Done")` is not indented. What does this mean for the execution of theprogram? Change the program and indent the `print("Done")`. How does the execution of theprogram change? | number = int(input("Please insert a number: "))
if number > 100:
print(number, "is greater than 100!")
else:
print(number, "is smaller oder equals 100!")
print("Done") | _____no_output_____ | CC0-1.0 | week_1/week_1_unit_5_ifstatement_notebook.ipynb | ceedee666/opensap_python_intro |
ExerciseWrite a conditional statement that asks for the user's name. Use the `input()` function. If his name is Harry or Harry Potter, then output "Welcome to Gryffindor, Mr. Potter!". Otherwise output "Sorry, Hogwarts is full.". | name = | _____no_output_____ | CC0-1.0 | week_1/week_1_unit_5_ifstatement_notebook.ipynb | ceedee666/opensap_python_intro |
Greetings! You might be here because you think* Betting markets are far more efficient then Nate Silver or G. Elliott Morris. I really can't help you if you insist otherwise - perhaps G. Elliott will offer you 19/1 on Trump LOL. * Betting markets still requires some interpretation, because many punters are so lazy they don't even run simulations, or because they involve heterogeneous groups and some markets are products of others, approximately, so we get a convexity effect. See this post https://www.linkedin.com/posts/petercotton_is-bidens-chance-of-winning-90-percent-or-activity-6730191890530095104-njhk and if you like it, please react on linked-in so the marketting dollar for the open source prediction network goes further. Because it really is a dollar. Okay then...This notebook provides you with a simple interpretation of market implied state electoral college probabilities, nothing more. It can be used to compute things like the market implied correlation between states, using a very simple correlation model. That may, or may not, provide you with a new perspective on the markets or a lens as to their degree of internal consistency.In using this, rather than the groovy graphics at 538, you are taking a stand against the ridiculous celebritization of statistics and journalistic group-think. | import numpy as np
from pprint import pprint
import math
from scipy.stats import norm
# Current prices for Biden, expressed as inverse probabilities, and electoral votes
states = [ ('arizona',1.23,11), ('michigan',1.01,16), ('pennsylvania',1.03,20),
('georgia',1.12,16),('nevada',1.035,6), ('north carolina',6.5,15), ('alaska',50,3),
('wisconsin',1.03,10)]
# Maybe you want to add Wisconsin.
# Okay, let's see if this foreignor can get the basic electoral calculus right.
# You might want to re-introduce some other states, but if so change the existing totals below:
biden = 227
trump = 214 # Does not include Alaska
# Sanity check.
undecided = sum([a[2] for a in states])
print(undecided)
total = biden + trump + undecided
assert total==538
# Next ... let's write a little guy that simulated from modified state probabilities. Just ignore this if you
# don't think there is any correlation between results at this late stage of the race.
# Perhaps, however, there is some latent correlation still in the results - related to legal moves or military voting patterns or
# consistent bias across state markets. I will merely remark that some correlation is required to make the betting markets coherent, but
# also that this implied correlation will not necessarily be justified.
def conditional(p:float,rho=None,z=None):
""" Simulate binary event conditioned on common factor, leaving unconditional probability alone
p Unconditional probability
z Gaussian common factor
rho Correlation
(this is a Normal Copula with common off-diagonal entries)
"""
if p<1e-8:
return 0
elif p>1-1e-8:
return 1
else:
x1 = math.sqrt(1-rho)*np.random.randn() + math.sqrt(rho)*z if z is not None else np.random.randn()
return x1<norm.ppf(p)
examples = {'p_z=0':conditional(p=0.5,rho=0.5,z=0),
'p_z=1':conditional(p=0.5,rho=0.5,z=1)}
pprint(examples)
# A quick sanity check. The mean of the conditional draws should be the same as the original probability
p_unconditional = 0.22
zs = np.random.randn(10000)
p_mean = np.mean([ conditional(p=p_unconditional, rho=.7, z=z) for z in zs])
pprint( {'p_unconditional':p_unconditional,'mean of p_conditional':p_mean})
# Jolly good. Now let's use this model.
# I've added a simple translational bias as well, if you'd rather use that to introduce correlation.
BIAS = 0 # If you want to systematically translate all state probs (this is not mean preserving)
RHO = 0.4 # If you want correlation introduced via a Normal Copula with constant off-diagnonal terms
def biden_sim() -> int:
"""
Simulate, once, the number of electoral college votes for Joe Biden
"""
votes = biden
bias = BIAS*np.random.randn() # Apply the same translation to all states
z = np.random.randn() # Common latent factor capturing ... you tell me
for s in states:
p = 1/s[1]
conditional_p = conditional(p=p,rho=RHO,z=z)
shifted_p = conditional_p + BIAS
if np.random.rand()<shifted_p:
votes = votes + s[2]
return votes
biden_sim()
# Simulate it many times
bs = [ biden_sim() for _ in range(50000) ]
ts = [538-b for b in bs] # Trump electoral votes
b_win = np.mean([b>=270 for b in bs])
print('Biden win probability is '+str(b_win))
import matplotlib.pyplot as plt
plt.hist(bs,bins=200)
t_win = np.mean([b<=268 for b in bs ])
tie = np.mean([b==269 for b in bs ])
print('Trump win probability is '+str(t_win))
print('Tie probability is '+ str(tie))
b270 = np.mean([b==270 for b in bs])
print('Biden=270 probability is '+str(b270))
# Compute inverse probabilities (European quoting convention) for range outcomes
prices = {'trump_270_299':1./np.mean([t>=270 and t<=299 for t in ts]),
'trump_300_329':1./np.mean([t>=300 and t<=329 for t in ts]),
'biden_270_299':1./np.mean([b>=270 and b<=299 for b in bs]),
'biden_300_329':1./np.mean([b>=300 and b<=329 for b in bs]),
'biden_330_359':1./np.mean([b>=330 and b<=359 for b in bs]),
'biden_m_100.5':1./np.mean([b-t-100.5>0 for b,t in zip(bs,ts)]),
'biden_m_48.5':1./np.mean([b-t-48.5>0 for b,t in zip(bs,ts)])}
pprint(prices)
# American quoting conventions
def pm(p):
if p>0.5:
return '-'+str(round(100*(p/(1-p)),0))
else:
return '+'+str(round(100/p - 100,0))
examples = {'p=0.33333':pm(0.333333),
'p=0.75':pm(0.75)}
#pprint(examples)
prices = {'trump_270_or_more':pm(t_win),
'biden_270_or_more':pm(b_win),
'trump_270_299':pm(np.mean([t>=270 and t<=299 for t in ts])),
'trump_300_329':pm(np.mean([t>=300 and t<=329 for t in ts])),
'biden_270_299':pm(np.mean([b>=270 and b<=299 for b in bs])),
'biden_300_329':pm(np.mean([b>=300 and b<=329 for b in bs]))}
pprint(prices) | {'biden_270_299': '+290.0',
'biden_270_or_more': '-4019.0',
'biden_300_329': '-257.0',
'trump_270_299': '+5407.0',
'trump_270_or_more': '+5169.0',
'trump_300_329': '+121851.0'}
| MIT | Election_in_the_run_with_correlation.ipynb | microprediction/microblog |
Using a random forest for demographic model selectionIn Schrider and Kern (2017) we give a toy example of demographic model selection via supervised machine learning in Figure Box 1. Following a discussion on twitter, Vince Buffalo had the great idea of our providing a simple example of supervised ML in population genetics using a jupyter notebook; this notebook aims to serve that purpose by showing you exactly how we produced that figure in our paper PreliminariesThe road map here will be to 1) do some simulation of three demographic models, 2) to train a classifier to distinguish among those models, 3) test that classifier with new simulation data, and 4) to graphically present how well our trained classifier works. To do this we will use coalescent simulations as implemented in Dick Hudson's well known `ms` software and for the ML side of things we will use the `scikit-learn` package. Let's start by installing these dependencies (if you don't have them installed already) Install, and compile `ms`We have put a copy of the `ms` tarball in this repo, so the following should work upon cloning | #untar and compile ms and sample_stats
!tar zxf ms.tar.gz; cd msdir; gcc -o ms ms.c streec.c rand1.c -lm; gcc -o sample_stats sample_stats.c tajd.c -lm
#I get three compiler warnings from ms, but everything should be fine
#now I'll just move the programs into the current working dir
!mv msdir/ms . ; mv msdir/sample_stats .; | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
Install `scikit-learn`If you use anaconda, you may already have these modules installed, but if not you can install with either of the following | !conda install scikit-learn --yes | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
or if you don't use `conda`, you can use `pip` to install scikit-learn with | !pip install -U scikit-learn | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
Step 1: create a training set and a testing setWe will create a training set using simulations from three different demographic models: equilibrium population size, instantaneous population growth, and instantaneous population contraction. As you'll see this is really just a toy example because we will perform classification based on data from a single locus; in practice this would be ill-advised and you would want to use data from many loci simulataneously. So lets do some simulation using `ms` and summarize those simulations using the `sample_stats` program that Hudson provides. Ultimately we will only use two summary stats for classification, but one could use many more. Each of these simulations should take a few seconds to run. | #simulate under the equilibrium model
!./ms 20 2000 -t 100 -r 100 10000 | ./sample_stats > equilibrium.msOut.stats
#simulate under the contraction model
!./ms 20 2000 -t 100 -r 100 10000 -en 0 1 0.5 -en 0.2 1 1 | ./sample_stats > contraction.msOut.stats
#simulate under the growth model
!./ms 20 2000 -t 100 -r 100 10000 -en 0.2 1 0.5 | ./sample_stats > growth.msOut.stats
#now lets suck up the data columns we want for each of these files, and create one big training set; we will use numpy for this
# note that we are only using two columns of the data- these correspond to segSites and Fay & Wu's H
import numpy as np
X1 = np.loadtxt("equilibrium.msOut.stats",usecols=(3,9))
X2 = np.loadtxt("contraction.msOut.stats",usecols=(3,9))
X3 = np.loadtxt("growth.msOut.stats",usecols=(3,9))
X = np.concatenate((X1,X2,X3))
#create associated 'labels' -- these will be the targets for training
y = [0]*len(X1) + [1]*len(X2) + [2]*len(X3)
Y = np.array(y)
#the last step in this process will be to shuffle the data, and then split it into a training set and a testing set
#the testing set will NOT be used during training, and will allow us to check how well the classifier is doing
#scikit-learn has a very convenient function for doing this shuffle and split operation
#
# will will keep out 10% of the data for testing
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.1) | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
Step 2: train our classifier and visualize decision surfaceNow that we have a training and testing set ready to go, we can move on to training our classifier. For this example we will use a random forest classifier (Breiman 2001). This is all implemented in `scikit-learn` and so the code is very brief. | from sklearn.ensemble import RandomForestClassifier
rfClf = RandomForestClassifier(n_estimators=100,n_jobs=10)
clf = rfClf.fit(X_train, Y_train)
| _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
That's it! The classifier is trained. This Random Forest classifer used 100 decision trees in its ensemble, a pretty large number considering that we are only using two summary stats to represent our data. Nevertheless it trains on the data very, very quickly.Confession: the real reason we are using only two summary statistics right here is because it makes it really easy to visualize that classifier's decision surface: which regions of the feature space would be assigned to which class? Let's have a look!(Note: I have increased the h argument for the call to `make_meshgrid` below, coarsening the contour plot in the interest of efficiency. Decreasing this will yield a smoother plot, but may take a while and use up a lot more memory. Adjust at your own risk!) | from sklearn.preprocessing import normalize
#These two functions (taken from scikit-learn.org) plot the decision boundaries for a classifier.
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def make_meshgrid(x, y, h=.05):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
#Let's do the plotting
import matplotlib.pyplot as plt
fig,ax= plt.subplots(1,1)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1, h=0.2)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
# plotting only a subset of our data to keep things from getting too cluttered
ax.scatter(X_test[:200, 0], X_test[:200, 1], c=Y_test[:200], cmap=plt.cm.coolwarm, edgecolors='k')
ax.set_xlabel(r"$\theta_{w}$", fontsize=14)
ax.set_ylabel(r"Fay and Wu's $H$", fontsize=14)
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Classifier decision surface", fontsize=14)
plt.show() | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
Above we can see which regions of our feature space are assigned to each class: dark blue shaded areas will be classified as Equilibrium, faint blue as Contraction, and red as Growth. Note the non-linear decision surface. Looks pretty cool! And also illustrates how this type of classifier might be useful for discriminating among classes that are difficult to linearly separate. Also plotted are a subset of our test examples, as dots colored according to their true class. Looks like we are doing pretty well but have a few misclassifications. Would be nice to quantify this somehow, which brings us to... Step 3: benchmark our classifierThe last step of the process is to use our trained classifier to predict which demographic models our test data are drawn from. Recall that the classifier hasn't seen these test data so this should be a fair test of how well the classifier will perform on any new data we throw at it in the future. We will visualize performance using a confusion matrix. | #here's the confusion matrix function
def makeConfusionMatrixHeatmap(data, title, trueClassOrderLs, predictedClassOrderLs, ax):
data = np.array(data)
data = normalize(data, axis=1, norm='l1')
heatmap = ax.pcolor(data, cmap=plt.cm.Blues, vmin=0.0, vmax=1.0)
for i in range(len(predictedClassOrderLs)):
for j in reversed(range(len(trueClassOrderLs))):
val = 100*data[j, i]
if val > 50:
c = '0.9'
else:
c = 'black'
ax.text(i + 0.5, j + 0.5, '%.2f%%' % val, horizontalalignment='center', verticalalignment='center', color=c, fontsize=9)
cbar = plt.colorbar(heatmap, cmap=plt.cm.Blues, ax=ax)
cbar.set_label("Fraction of simulations assigned to class", rotation=270, labelpad=20, fontsize=11)
# put the major ticks at the middle of each cell
ax.set_xticks(np.arange(data.shape[1]) + 0.5, minor=False)
ax.set_yticks(np.arange(data.shape[0]) + 0.5, minor=False)
ax.axis('tight')
ax.set_title(title)
#labels
ax.set_xticklabels(predictedClassOrderLs, minor=False, fontsize=9, rotation=45)
ax.set_yticklabels(reversed(trueClassOrderLs), minor=False, fontsize=9)
ax.set_xlabel("Predicted class")
ax.set_ylabel("True class")
#now the actual work
#first get the predictions
preds=clf.predict(X_test)
counts=[[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]]
for i in range(len(Y_test)):
counts[Y_test[i]][preds[i]] += 1
counts.reverse()
classOrderLs=['equil','contraction','growth']
#now do the plotting
fig,ax= plt.subplots(1,1)
makeConfusionMatrixHeatmap(counts, "Confusion matrix", classOrderLs, classOrderLs, ax)
plt.show() | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
Looks pretty good. But can we make it better? Well a simple way might be to increase the number of features (i.e. summary statistics) we use as input. Let's give that a whirl using all of the output from Hudson's `sample_stats` | X1 = np.loadtxt("equilibrium.msOut.stats",usecols=(1,3,5,7,9))
X2 = np.loadtxt("contraction.msOut.stats",usecols=(1,3,5,7,9))
X3 = np.loadtxt("growth.msOut.stats",usecols=(1,3,5,7,9))
X = np.concatenate((X1,X2,X3))
#create associated 'labels' -- these will be the targets for training
y = [0]*len(X1) + [1]*len(X2) + [2]*len(X3)
Y = np.array(y)
X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=0.1)
rfClf = RandomForestClassifier(n_estimators=100,n_jobs=10)
clf = rfClf.fit(X_train, Y_train)
preds=clf.predict(X_test)
counts=[[0.,0.,0.],[0.,0.,0.],[0.,0.,0.]]
for i in range(len(Y_test)):
counts[Y_test[i]][preds[i]] += 1
counts.reverse()
fig,ax= plt.subplots(1,1)
makeConfusionMatrixHeatmap(counts, "Confusion matrix", classOrderLs, classOrderLs, ax)
plt.show() | _____no_output_____ | MIT | demographicModelSelectionExample.ipynb | kern-lab/popGenMachineLearningExamples |
1 A partir del fichero "US_Crime_Rates_1960_2014", se pide:1. Tratar el dataset como una serie temporal a partir de la columna Year. Siempre el eje X serΓ‘ el nuevo Γndice Year.2. Dibujar todas las columnas numΓ©ricas.3. Como se puede ver en el punto 2, la columna "Population" tiene una magnitud diferente a la de las demΓ‘s columnas: Dibuja la misma grΓ‘fica que antes pero con dos resoluciones diferentes para que se puedan observar con mejor detalle todas las columnas. 4. A partir de ahora y para el resto del ejercicio, borra las columnas "Population" y "Total" ΒΏQuΓ© columnas tienen mejor correlaciΓ³n? 5. A partir de las cinco columnas que tengan mejor correlaciΓ³n con la columna "Murder", entrena un modelo de regresiΓ³n no lineal a partir de polinomio de grado 6. Este modelo ha de entrenarse siguiendo el esquema recomendado de validaciΓ³n cruzada y almacenando los errores de entrenamiento y validaciΓ³n en cada iteraciΓ³n del entrenamiento.6. Representa la evoluciΓ³n de los errores de validaciΓ³n y entrenamiento en una grΓ‘fica. ΒΏHa habido sobreaprendizaje?. Utiliza n_iteraciones=23 y n_repeats=7. | import pandas as pd
df = pd.read_csv("../data/US_Crime_Rates_1960_2014.csv")
df.set_index("Year", inplace=True)
df.plot() | _____no_output_____ | Apache-2.0 | 2_Ejercicios/Modulo2/5.DeciTrees_Kgboost_gridserarch/exercises/4.crime.ipynb | JaunIgciona/DataBootcamp_Nov2020 |
This notebook will help you train a vanilla Point-Cloud AE with the basic architecture we used in our paper. (it assumes latent_3d_points is in the PYTHONPATH and the structural losses have been compiled) | import os.path as osp
from latent_3d_points.src.ae_templates import mlp_architecture_ala_iclr_18, default_train_params
from latent_3d_points.src.autoencoder import Configuration as Conf
from latent_3d_points.src.point_net_ae import PointNetAutoEncoder
from latent_3d_points.src.in_out import snc_category_to_synth_id, create_dir, PointCloudDataSet, \
load_all_point_clouds_under_folder
from latent_3d_points.src.tf_utils import reset_tf_graph
from latent_3d_points.src.general_utils import plot_3d_point_cloud
%load_ext autoreload
%autoreload 2
%matplotlib inline | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Define Basic Parameters | top_out_dir = '../data/' # Use to save Neural-Net check-points etc.
top_in_dir = '../data/shape_net_core_uniform_samples_2048/' # Top-dir of where point-clouds are stored.
experiment_name = 'single_class_ae'
n_pc_points = 2048 # Number of points per model.
bneck_size = 128 # Bottleneck-AE size
ae_loss = 'chamfer' # Loss to optimize: 'emd' or 'chamfer'
class_name = raw_input('Give me the class name (e.g. "chair"): ').lower() | Give me the class name (e.g. "chair"): chair
| MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Load Point-Clouds | syn_id = snc_category_to_synth_id()[class_name]
class_dir = osp.join(top_in_dir , syn_id)
all_pc_data = load_all_point_clouds_under_folder(class_dir, n_threads=8, file_ending='.ply', verbose=True) | 6778 pclouds were loaded. They belong in 1 shape-classes.
| MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Load default training parameters (some of which are listed beloq). For more details please print the configuration object. 'batch_size': 50 'denoising': False ( by default AE is not denoising) 'learning_rate': 0.0005 'z_rotate': False ( randomly rotate models of each batch) 'loss_display_step': 1 ( display loss at end of these many epochs) 'saver_step': 10 ( over how many epochs to save neural-network) | train_params = default_train_params()
encoder, decoder, enc_args, dec_args = mlp_architecture_ala_iclr_18(n_pc_points, bneck_size)
train_dir = create_dir(osp.join(top_out_dir, experiment_name))
conf = Conf(n_input = [n_pc_points, 3],
loss = ae_loss,
training_epochs = train_params['training_epochs'],
batch_size = train_params['batch_size'],
denoising = train_params['denoising'],
learning_rate = train_params['learning_rate'],
train_dir = train_dir,
loss_display_step = train_params['loss_display_step'],
saver_step = train_params['saver_step'],
z_rotate = train_params['z_rotate'],
encoder = encoder,
decoder = decoder,
encoder_args = enc_args,
decoder_args = dec_args
)
conf.experiment_name = experiment_name
conf.held_out_step = 5 # How often to evaluate/print out loss on
# held_out data (if they are provided in ae.train() ).
conf.save(osp.join(train_dir, 'configuration')) | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
If you ran the above lines, you can reload a saved model like this: | load_pre_trained_ae = False
restore_epoch = 500
if load_pre_trained_ae:
conf = Conf.load(train_dir + '/configuration')
reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf)
ae.restore_model(conf.train_dir, epoch=restore_epoch) | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Build AE Model. | reset_tf_graph()
ae = PointNetAutoEncoder(conf.experiment_name, conf) | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Train the AE (save output to train_stats.txt) | buf_size = 1 # Make 'training_stats' file to flush each output line regarding training.
fout = open(osp.join(conf.train_dir, 'train_stats.txt'), 'a', buf_size)
train_stats = ae.train(all_pc_data, conf, log_file=fout)
fout.close() | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Evaluation Get a batch of reconstuctions and their latent-codes. | feed_pc, feed_model_names, _ = all_pc_data.next_batch(10)
reconstructions = ae.reconstruct(feed_pc)[0]
latent_codes = ae.transform(feed_pc) | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Use any plotting mechanism such as matplotlib to visualize the results. | i = 2
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True);
i = 4
plot_3d_point_cloud(reconstructions[i][:, 0],
reconstructions[i][:, 1],
reconstructions[i][:, 2], in_u_sphere=True); | _____no_output_____ | MIT | notebooks/train_single_class_ae.ipynb | sinanbayraktar/latent_3d_points |
Figs for the measurement force paper | from scipy.io import loadmat
from scipy.optimize import curve_fit
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from numpy import trapz
def cm2inch(value):
return value/2.54
#axes.xaxis.set_tick_params(direction='in', which='both')
#axes.yaxis.set_tick_params(direction='in', which='both')
mpl.rcParams["xtick.direction"] = "in"
mpl.rcParams["ytick.direction"] = "in"
mpl.rcParams["lines.markeredgecolor"] = "k"
mpl.rcParams["lines.markeredgewidth"] = 1.5
mpl.rcParams["figure.dpi"] = 200
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
rc('xtick', labelsize='medium')
rc('ytick', labelsize='medium')
rc("axes", labelsize = "large")
def cm2inch(value):
return value/2.54
def cm2inch(value):
return value/2.54
def gauss_function(x, a, x0, sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
def pdf(data, bins = 10, density = True):
pdf, bins_edge = np.histogram(data, bins = bins, density = density)
bins_center = (bins_edge[0:-1] + bins_edge[1:]) / 2
return pdf, bins_center
#import the plots data
dataset = loadmat("data_graphs.mat")
for i in dataset.keys():
try:
dataset[i] = np.squeeze(dataset[i])
except:
continue
fit_data = loadmat("data_fit_2705.mat")
for i in fit_data.keys():
try:
fit_data[i] = np.squeeze(fit_data[i])
except:
continue
def movmin(z, window):
result = np.empty_like(z)
start_pt = 0
end_pt = int(np.ceil(window / 2))
for i in range(len(z)):
if i < int(np.ceil(window / 2)):
start_pt = 0
if i > len(z) - int(np.ceil(window / 2)):
end_pt = len(z)
result[i] = np.min(z[start_pt:end_pt])
start_pt += 1
end_pt += 1
return result
plt.figure(figsize=( cm2inch(16),cm2inch(8)))
plt.plot(dataset["time"], dataset["z"], label="raw")
plt.plot(dataset["time"], dataset["z"] - movmin(dataset["z"], 10000), label="rescaled")
plt.xlabel("time (s)")
plt.ylabel("$z$ ($\mu$m)")
plt.legend(frameon=False)
plt.savefig("traj_rescaled.pdf")
dataset
color = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
plt.figure()
for n,i in enumerate(['pdf_Dz_short_t_1', 'pdf_Dz_short_t_2', 'pdf_Dz_short_t_3', 'pdf_Dz_short_t_4', 'pdf_Dz_short_t_5']):
plt.semilogy(dataset[i][0,:],dataset[i][1,:], color = color[n], marker = "o", linestyle = "")
plt.plot(dataset["pdf_Dz_short_th_t_5"][0,:],dataset["pdf_Dz_short_th_t_5"][1,:], color = color[4])
plt.plot(dataset["gaussian_short_timetheory_z"][0,:],dataset["gaussian_short_timetheory_z"][1,:], color = "gray",linestyle = "--")
ax = plt.gca()
ax.set_ylim([1e-5,1])
ax.set_xlim([-7,7])
plt.xlabel("$\Delta z / \sigma$")
plt.ylabel("$P(\Delta z / \sigma)$")
#dataset
fig = plt.figure(figsize=(cm2inch(8.6), cm2inch(8.6)/1.68*1.3),constrained_layout=False)
gs = fig.add_gridspec(2,3)
##### MSD
fig.add_subplot(gs[0,:])
plt.loglog(dataset["MSD_time_tot"], dataset["MSD_fit_x"], color = "k")
plt.loglog(dataset["MSD_time_tot"], dataset["MSD_fit_z"], color = "k")
plt.loglog(dataset["MSD_time_tot"],dataset["MSD_x_tot"],"o", label = "x", markersize = 5)
plt.loglog(dataset["MSD_time_tot"][::2],dataset["MSD_y_tot"][::2],"o", label = "y", markersize = 5)
plt.loglog(dataset["MSD_time_tot"],dataset["MSD_z_tot"],"o", label = "z", markersize = 5)
# plateau
plateau = [dataset["fitted_MSD_Plateau"] for i in range(len(dataset["MSD_time_tot"]))]
plt.loglog(dataset["MSD_time_tot"][-60:], plateau[-60:], color = "black", linewidth = 1,zorder = 10, linestyle = "--")
##
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.xaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.xaxis.set_minor_locator(locmin)
ax.xaxis.set_minor_formatter(mpl.ticker.NullFormatter())
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_xlim([1e-2,1e3])
ax.set_ylim([None,1e-10])
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.45*xmax,2.5*ymin,'a)')
plt.ylabel("$\mathrm{MSD}$ ($\mathrm{m^2}$)",fontsize = "small", labelpad=0.5)
plt.xlabel("$\Delta t$ (s)",fontsize = "small",labelpad=0.5)
plt.legend(frameon = False,fontsize = "x-small",loc = "upper left")
####### SHORT TIME X
fig.add_subplot(gs[1,0])
for n,i in enumerate(['pdf_Dx_short_t_1', 'pdf_Dx_short_t_2', 'pdf_Dx_short_t_3', 'pdf_Dx_short_t_4', 'pdf_Dx_short_t_5']):
plt.semilogy(dataset[i][0,:],dataset[i][1,:], color = color[n], marker = "o", linestyle = "",markersize = 3)
plt.plot(dataset["pdf_Dx_short_th_t_5"][0,:],dataset["pdf_Dx_short_th_t_5"][1,:], color = "k",zorder=6,linewidth=1)
plt.plot(dataset["gaussianx_short_timetheory"][0,:],dataset["gaussianx_short_timetheory"][1,:], color = "gray",zorder=-1,linestyle = "--",)
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_ylim([1e-5,1])
ax.set_xlim([-7,7])
plt.xlabel("$\Delta x / \sigma$",fontsize = "small", labelpad=0.5)
plt.ylabel("$P_{x} \sigma$",fontsize = "small", labelpad=0.5)
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.54*xmax,0.25*ymax,'b)')
####### SHORT TIME Z
fig.add_subplot(gs[1,1])
for n,i in enumerate(['pdf_Dz_short_t_1', 'pdf_Dz_short_t_2', 'pdf_Dz_short_t_3', 'pdf_Dz_short_t_4', 'pdf_Dz_short_t_5']):
plt.semilogy(dataset[i][0,:],dataset[i][1,:], color = color[n], marker = "o", linestyle = "",markersize = 3)
plt.plot(dataset["pdf_Dz_short_th_t_5"][0,:],dataset["pdf_Dz_short_th_t_5"][1,:], color = "k",zorder=6,linewidth=1)
plt.plot(dataset["gaussian_short_timetheory_z"][0,:],dataset["gaussian_short_timetheory_z"][1,:], color = "gray",zorder=-1,linestyle = "--",)
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_ylim([1e-5,1])
ax.set_xlim([-7,7])
plt.xlabel("$\Delta z / \sigma$",fontsize = "small",labelpad=0.5)
plt.ylabel("$P_{z} \sigma$",fontsize = "small",labelpad=0.5)
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.58*xmax,0.25*ymax,'c)')
###### LONG TIME PDF\
fig.add_subplot(gs[1,2])
plt.errorbar(dataset["x_pdf_longtime"]*1e6,dataset["pdf_longtime"],yerr=dataset["err_long_t"],ecolor = "k",barsabove=False,linewidth = 0.8, label = "experimental pdf",marker="o", markersize=3,capsize = 1,linestyle="")
#plt.fill_between(bins_centers_long_t, pdf_long_t-err_long_t, pdf_long_t+err_long_t, alpha = 0.3)
plt.semilogy(dataset["bins_centers_long_t"],dataset["Pdeltaz_long_th"],color="black", linewidth = 1, zorder=10)
plt.ylabel("$P_z$ ($\mathrm{\mu m^{-1}})$",fontsize = "small", labelpad=0.5)
plt.xlabel("$\Delta z$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
ax = plt.gca()
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_ylim([1e-3,1])
#ax.set_xlim([None,1e-10])
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.5*xmax,0.4*ymax,'d)')
plt.tight_layout(pad = 0.1,h_pad=0.1, w_pad=0.3)
plt.savefig("MSD_displacements.svg")
#dataset
def P_b_off(z,z_off, B, ld, lb):
z_off = z_off * 1e-6
lb = lb * 1e-9
ld = ld * 1e-9
z = z - z_off
P_b = np.exp(-B * np.exp(-z / (ld)) - z / lb)
P_b[z < 0] = 0
# Normalization of P_b
A = trapz(P_b,z * 1e6)
P_b = P_b / A
return P_b
fig = plt.figure(figsize=(cm2inch(8.6), 0.75*cm2inch(8.6)/1.68),constrained_layout=False)
gs = fig.add_gridspec(1,2)
fig.add_subplot(gs[0,0])
#########
def pdf(data, bins = 10, density = True):
pdf, bins_edge = np.histogram(data, bins = bins, density = density)
bins_center = (bins_edge[0:-1] + bins_edge[1:]) / 2
return pdf, bins_center
pdf_z,bins_center = pdf(dataset["z"]- np.min(dataset["z"]),bins = 150)
def logarithmic_hist(data,begin,stop,num = 50,base = 2):
if begin == 0:
beg = stop/num
bins = np.logspace(np.log(beg)/np.log(base), np.log(stop)/np.log(base), num-1, base=base)
widths = (bins[1:] - bins[:-1])
#bins = np.cumsum(widths[::-1])
bins = np.concatenate(([0],bins))
#widths = (bins[1:] - bins[:-1])
else:
bins = np.logspace(np.log(begin)/np.log(base), np.log(stop)/np.log(base), num, base=base)
widths = (bins[1:] - bins[:-1])
hist,a= np.histogram(data, bins=bins,density=True)
# normalize by bin width
bins_center = (bins[1:] + bins[:-1])/2
return bins_center,widths, hist
#bins_center_pdf_z,widths,hist = logarithmic_hist(z_0offset, 0.000001, 3, num = 31,base=2)
#pdf_z, bins_center_pdf_z = pdf(z_dedrift[z_dedrift < 3], bins = 100)
#bins_center,widths, pdf_z = logarithmic_hist(dataset["z"]-np.mean(dataset["z"]),0.0001,4,num = 10,base = 10)
P_b_th = P_b_off(bins_center*1e-6, 0, dataset["B"], dataset["ld"], dataset["lb"])
fig.add_subplot(gs[0,1])
plt.plot(bins_center,P_b_th/trapz(P_b_th,bins_center),color = "k",linewidth=1)
plt.semilogy(bins_center - dataset["offset_B"],pdf_z, "o", markersize = 2.5)
plt.xlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
plt.ylabel("$P_{\mathrm{eq}}$ ($\mathrm{\mu m ^{-1}}$)",fontsize = "small", labelpad=0.5)
ax = plt.gca()
ax.set_ylim([1e-4,3])
ax.set_xlim([-0.2,4.5])
#plt.xticks([0,1,2,3,4])
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.8*xmax,1.2*ymin,'b)')
plt.tight_layout(pad = 0.01,h_pad=0.001, w_pad=0.1)
plt.savefig("viscosityxpdfz.svg")
#fig = plt.figure(figsize=(cm2inch(8.6), cm2inch(8.6)/1.68),constrained_layout=False)
plt.errorbar(dataset["z_Force"]*1e6, dataset["Force"]*1e15,yerr=2*np.sqrt(2)*dataset["err_Force"]*1e15,xerr=dataset["x_err_Force"],ecolor = "k", linestyle="", marker="o", markersize = 4,linewidth = 0.8, capsize=1,zorder=3)
plt.semilogx(dataset["z_Force_th"]*1e6,dataset["Force_th"]*1e15)
plt.plot(np.linspace(1e-2,2,10), np.ones(10) * np.mean(dataset["Force"][-10:]*1e15),zorder=-4,linewidth=1)
ax = plt.gca()
ax.set_ylim([-100,1200])
ax.set_xlim([0.1e-1,3])
plt.ylabel("$F_z$ $\\mathrm{(fN)}$",fontsize = "small", labelpad=0.5)
plt.xlabel("$z$ $(\\mathrm{\mu m})$",fontsize = "small", labelpad=0.5)
plt.text(1.2e-2,100, "$F_g = -7 ~ \mathrm{fN}$ ",fontsize="x-small")
plt.tight_layout()
plt.savefig("Force.pdf")
fig = plt.figure(figsize=(cm2inch(8.6), 0.75*cm2inch(8.6)/1.68),constrained_layout=False)
gs = fig.add_gridspec(1,5)
fig.add_subplot(gs[0,:2])
z_th = np.linspace(10e-9,10e-6,100)
#plt.errorbar(z_D_para_fit, D_para_fit/Do, yerr = err_d_para_fit/Do, linewidth = 3, marker = "x", linestyle = "",color = "tab:red", label = "$D_ \\parallel$")
plt.loglog(z_th*1e6, dataset["D_x_th"], color = "k")
plt.plot(dataset["z_D_yacine"]*1e6 - dataset["offset_diffusion"], dataset["z_D_x_yacine"] / dataset["Do"], marker = "o", linestyle = "",color = "tab:blue",label = "$D_\\parallel$", markersize = 4)
#plt.errorbar(bins_center_pdf_z[:-1], Dz[:]/Do, yerr=err[:]/Do, linewidth = 3, marker = "o", linestyle = "",color = "tab:red",label = "$D_ \\bot$")
plt.semilogx(z_th*1e6, dataset["D_z_th"],color = "k")
plt.plot(dataset["z_D_yacine"]*1e6 - dataset["offset_diffusion"], dataset["z_D_z_yacine"] / dataset["Do"], marker = "o", linestyle = "",color = "tab:green",label = "$D_z$", markersize = 4)
ax = plt.gca()
ax.set_ylim([None,1.01])
ax.set_xlim([None,10])
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.xaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.xaxis.set_minor_locator(locmin)
ax.xaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.3*xmax,1.5*ymin,'a)')
plt.legend(frameon = False,fontsize = "x-small",loc="lower center")
plt.xlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
plt.ylabel("$D_i/ D_\mathrm{0}$",fontsize = "small", labelpad=0.5)
#########
fig.add_subplot(gs[0,2:])
plt.errorbar(dataset["z_Force"]*1e6, dataset["Force"]*1e15,yerr=2*np.sqrt(2)*dataset["err_Force"]*1e15,xerr=dataset["x_err_Force"],ecolor = "k", linestyle="", marker="o", markersize = 4,linewidth = 0.8, capsize=1,zorder=3)
plt.semilogx(dataset["z_Force_th"]*1e6,dataset["Force_th"]*1e15,zorder = 9, color = "k",linewidth = 1)
plt.plot(np.linspace(1e-2,5,100), np.ones(100) * np.mean(dataset["Force"][-10:]*1e15),zorder=10, linewidth = 1, linestyle="--", color = "tab:red")
ax = plt.gca()
ax.set_ylim([-100,1500])
ax.set_xlim([0.1e-1,3])
plt.ylabel("$F_z$ $\\mathrm{(fN)}$",fontsize = "small", labelpad=0.5)
plt.xlabel("$z$ $(\\mathrm{\mu m})$",fontsize = "small", labelpad=0.5)
plt.text(1.6e-1,100, "$F_\mathrm{g} = -7 ~ \mathrm{fN}$ ",fontsize="x-small", color = "tab:red")
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.yticks([0,250,500,750,1000,1250,1500])
plt.text(0.5*xmax,0.85*ymax,'b)')
#inset
plt.tight_layout(pad = 0.01)
plt.savefig("viscosityxforce.svg")
plt.semilogx(dataset["z_Force_th"][500:1000]*1e6,dataset["Force_th"][500:1000]*1e15,zorder = 10, color = "k",linewidth = 1)
fig = plt.figure(figsize=(cm2inch(8.6), cm2inch(8.6)/1.68),constrained_layout=False)
gs = fig.add_gridspec(6,8)
I_radius = fit_data["I_radius"]
I_r_exp = fit_data["I_r_exp"]
I_radius = fit_data["I_radius"]
theo_exp = fit_data["theo_exp"]
err = fit_data["I_errr_exp"]
#fig.add_subplot(gs[0:2,0:2])
fig.add_subplot(gs[0:3,5:])
plt.imshow(fit_data["exp_image"], cmap = "gray")
plt.yticks([0,125,250])
fig.add_subplot(gs[3:6,5:])
plt.imshow(fit_data["th_image"], cmap = "gray")
#plt.xticks([], [])
plt.xticks([0,125,250])
plt.yticks([0,125,250])
fig.add_subplot(gs[3:6,0:5])
plt.plot(I_radius* 0.532,I_r_exp,label = "Experiment", linewidth = 0.8)
#plt.fill_between(I_radius* 0.532,I_r_exp-err,I_r_exp+err, alpha = 0.7)
plt.plot(I_radius* 0.532,theo_exp,label = "Theory",linewidth = 0.8)
plt.ylabel("$I/I_0$ ", fontsize = "x-small", labelpad=0.5)
plt.xlabel("radial distance ($\mathrm{\mu m}$)", fontsize = "x-small", labelpad=0.5)
plt.legend(fontsize = 5,frameon = False, loc = "lower right")
plt.tight_layout(pad = 0.01)
plt.savefig("exp.svg")
x = dataset["x"]
y = dataset["y"]
z = dataset["z"]- np.min(dataset["z"])
import matplotlib as mpl
def axisEqual3D(ax):
extents = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz'])
sz = extents[:,1] - extents[:,0]
centers = np.mean(extents, axis=1)
maxsize = max(abs(sz))
r = maxsize/2
for ctr, dim in zip(centers, 'xyz'):
getattr(ax, 'set_{}lim'.format(dim))(ctr - r, ctr + r)
from matplotlib.ticker import MultipleLocator
N = 200
cmap = plt.get_cmap('jet')
fig = plt.figure(figsize=(cm2inch(8.6)/1.5, 0.75*cm2inch(8.6)/1.68))
#plt.figaspect(0.21)*1.5
ax = fig.gca(projection='3d')
ax.pbaspect = [1, 20/25, 3/25*4]
ax.ticklabel_format(style = "sci")
for i in range(N-1):
ax.plot(x[i*360:i*360+360], y[i*360:i*360+360], z[i*360:i*360+360], color=plt.cm.jet(1*i/N), linewidth = 0.2)
norm = mpl.colors.Normalize(vmin=0,vmax=1)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
ax = plt.gca()
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
#ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
plt.rcParams['grid.color'] = "gray"
ax.grid(False)
#ax.w_xaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
#ax.w_yaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
#ax.w_zaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
ax.set_ylim([25,45])
ax.set_xlim([15,40])
#plt.xticks([20,30,40])
#plt.yticks([30,35,40])
ax.set_zticks([0,1.5,3])
plt.xlabel("$x$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
plt.ylabel("$y$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
ax.set_zlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small", labelpad=0.5)
ax.view_init(10,45)
ax.grid(False)
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
[t.set_va('center') for t in ax.get_yticklabels()]
[t.set_ha('left') for t in ax.get_yticklabels()]
[t.set_va('center') for t in ax.get_xticklabels()]
[t.set_ha('right') for t in ax.get_xticklabels()]
[t.set_va('center') for t in ax.get_zticklabels()]
[t.set_ha('left') for t in ax.get_zticklabels()]
ax.xaxis._axinfo['tick']['inward_factor'] = 0
ax.xaxis._axinfo['tick']['outward_factor'] = 0.4
ax.yaxis._axinfo['tick']['inward_factor'] = 0
ax.yaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['inward_factor'] = 0
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.view_init(elev=5, azim=135)
#ax.xaxis.set_major_locator(MultipleLocator(1))
#ax.yaxis.set_major_locator(MultipleLocator(5))
#ax.zaxis.set_major_locator(MultipleLocator())
ticks_c = []
for i in np.linspace(0,1,5):
ticks_c.append("{:.0f}".format(N*360*i/60/60))
cbar = plt.colorbar(sm, ticks=np.linspace(0,1,5), format = "%.1f",shrink = 0.4,orientation='horizontal')
cbar.set_ticklabels(ticks_c)
cbar.set_label("$t$ (min)", labelpad=0.5)
plt.tight_layout(h_pad=0.1)
plt.savefig("traj.svg")
dir(ax)
20/25*0.55
ticks_c = []
for i in np.linspace(0,1,10):
ticks_c.append("{:.0f} m".format(N*500*i/60/60))
ticks_c
200*360
fig = plt.figure(figsize=(cm2inch(8.6), 1*cm2inch(8.6)/1.68),constrained_layout=False)
gs = fig.add_gridspec(1,10)
fig.add_subplot(gs[0,0:5], projection='3d')
N = 200
cmap = plt.get_cmap('jet')
ax = plt.gca()
ax.ticklabel_format(style = "sci")
ax.pbaspect = [1, 15/25, 0.25/25*4]
for i in range(N-1):
ax.plot(x[i*500:i*500+500], y[i*500:i*500+500], z[i*500:i*500+500], color=plt.cm.jet(1*i/N), linewidth = 0.2)
norm = mpl.colors.Normalize(vmin=0,vmax=1)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
ax = plt.gca()
ax.pbaspect = [1, 20/25, 3/25*4]
plt.xlabel("x [$\mathrm{\mu m}$]")
plt.ylabel("y [$\mathrm{\mu m}$]")
ax.set_zlabel("z [$\mathrm{\mu m}$]")
ax.grid(False)
#ax.view_init(30, -10)
#ax.view_init(20, -1)
ticks_c = []
for i in np.linspace(0,1,10):
ticks_c.append("{:.0f} min".format(N*500*i/60/60))
cbar = plt.colorbar(sm, ticks=np.linspace(0,1,10), format = "%.1f",orientation='horizontal')
cbar.set_ticklabels(ticks_c)
#########
fig.add_subplot(gs[0,7:])
plt.plot(dataset["x_pdf_z"] * 1e6,dataset["Pb_th"])
plt.semilogy(dataset["x_pdf_z"] * 1e6 - dataset["offset_B"],dataset["pdf_z"], "o", markersize = 4)
plt.xlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small")
plt.ylabel("$P(z)$ (a.u.)",fontsize = "small")
ax = plt.gca()
ax.set_ylim([1e-2,3])
ax.set_xlim([-0.2,1])
plt.xticks([0,1,2])
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.8*xmax,1.2*ymin,'b)')
plt.tight_layout(pad = 0.01)
plt.savefig("viscosityxpdfz.pdf")
fig = plt.figure(figsize=(cm2inch(8.6), 0.75*cm2inch(8.6)/1.68),constrained_layout=False)
gs = fig.add_gridspec(10,1)
fig.add_subplot(gs[0:2,0])
plt.plot(np.arange(len(z))/60,z)
plt.xlabel("time (s)")
plt.ylabel("$z$ ($\mathrm{\mu m}$)")
#########
fig.add_subplot(gs[5:,0])
plt.plot(dataset["x_pdf_z"] * 1e6,dataset["Pb_th"])
plt.semilogy(dataset["x_pdf_z"] * 1e6 - dataset["offset_B"],dataset["pdf_z"], "o", markersize = 4)
plt.xlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small")
plt.ylabel("$P(z)$ (a.u.)",fontsize = "small")
ax = plt.gca()
ax.set_ylim([1e-2,3])
ax.set_xlim([-0.2,1])
plt.xticks([0,1,2])
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
plt.text(0.8*xmax,1.2*ymin,'b)')
plt.tight_layout(pad = 0.01,h_pad=0.001, w_pad=0.1)
plt.savefig("viscosityxpdfz.pdf")
bins_center
dataset["B"]
t = np.arange(len(z))/60
for n,i in enumerate(['pdf_Dz_short_t_1', 'pdf_Dz_short_t_2', 'pdf_Dz_short_t_3', 'pdf_Dz_short_t_4', 'pdf_Dz_short_t_5']):
plt.semilogy(dataset[i][0,:],dataset[i][1,:], color = color[n], marker = "o", linestyle = "",markersize = 6)
plt.plot(dataset["pdf_Dz_short_th_t_5"][0,:],dataset["pdf_Dz_short_th_t_5"][1,:], color = color[4])
plt.plot(dataset["gaussia_short_timetheory"][0,:],dataset["gaussia_short_timetheory"][1,:], color = "gray",linestyle = "--")
ax = plt.gca()
locmaj = mpl.ticker.LogLocator(base=10.0, subs=(1.0, ), numticks=100)
ax.yaxis.set_major_locator(locmaj)
locmin = mpl.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,
numticks=100)
ax.yaxis.set_minor_locator(locmin)
ax.yaxis.set_minor_formatter(mpl.ticker.NullFormatter())
ax.set_ylim([1e-5,3])
ax.set_xlim([-7,7])
plt.xlabel("$\Delta z / \sigma$",fontsize = "small")
plt.ylabel("$P(\Delta z / \sigma)$",fontsize = "small")
ymin, ymax = fig.gca().get_ylim()
xmin, xmax = fig.gca().get_xlim()
from matplotlib.ticker import MultipleLocator
N = 200
cmap = plt.get_cmap('jet')
fig = plt.figure(figsize=(cm2inch(8.6)/1.5, 1.2*cm2inch(8.6)/1.68))
#plt.figaspect(0.21)*1.5
ax = fig.gca(projection='3d')
ax.pbaspect = [1, 20/25, 3/25*4]
ax.ticklabel_format(style = "sci")
for i in range(N-1):
ax.plot(x[i*360:i*360+360], y[i*360:i*360+360], z[i*360:i*360+360], color=plt.cm.jet(1*i/N), linewidth = 0.2)
norm = mpl.colors.Normalize(vmin=0,vmax=1)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
ax = plt.gca()
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
#ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))
plt.rcParams['grid.color'] = "gray"
ax.grid(False)
#ax.w_xaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
#ax.w_yaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
#ax.w_zaxis._axinfo.update({'grid' : {'color': (0, 0, 0, 1)}})
ax.set_ylim([25,45])
ax.set_xlim([15,40])
#plt.xticks([20,30,40])
#plt.yticks([30,35,40])
#ax.set_zticks([0,1.5,3])
plt.xlabel("$x$ ($\mathrm{\mu m}$)",fontsize = "small")
plt.ylabel("$y$ ($\mathrm{\mu m}$)",fontsize = "small")
ax.set_zlabel("$z$ ($\mathrm{\mu m}$)",fontsize = "small")
#ax.view_init(10,45)
ax.grid(False)
ax.xaxis.pane.set_edgecolor('black')
ax.yaxis.pane.set_edgecolor('black')
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
[t.set_va('center') for t in ax.get_yticklabels()]
[t.set_ha('left') for t in ax.get_yticklabels()]
[t.set_va('center') for t in ax.get_xticklabels()]
[t.set_ha('right') for t in ax.get_xticklabels()]
[t.set_va('center') for t in ax.get_zticklabels()]
[t.set_ha('left') for t in ax.get_zticklabels()]
ax.xaxis._axinfo['tick']['inward_factor'] = 0
ax.xaxis._axinfo['tick']['outward_factor'] = 0.4
ax.yaxis._axinfo['tick']['inward_factor'] = 0
ax.yaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['inward_factor'] = 0
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.zaxis._axinfo['tick']['outward_factor'] = 0.4
ax.view_init(elev=10, azim=135)
#ax.xaxis.set_major_locator(MultipleLocator(1))
#ax.yaxis.set_major_locator(MultipleLocator(5))
#ax.zaxis.set_major_locator(MultipleLocator())
ticks_c = []
for i in np.linspace(0,1,5):
ticks_c.append("{:.0f}".format(N*360*i/60/60))
cbar = plt.colorbar(sm, ticks=np.linspace(0,1,5), format = "%.1f",shrink = 0.4,orientation='horizontal')
cbar.set_ticklabels(ticks_c)
plt.tight_layout(h_pad=0.1)
plt.savefig("traj.svg") | _____no_output_____ | MIT | 02_body/chapter3/images/trajctory analysis/.ipynb_checkpoints/graph_ploting-checkpoint.ipynb | eXpensia/Confined-Brownian-Motion |
Deviations from Normality | import ashmodule as ash
%load_ext autoreload
%autoreload 2
hfi = ash.get_hfi_returns()
hfi.head()
import pandas as pd
pd.concat([hfi.mean(),hfi.median(), hfi.mean()>hfi.median()],axis='columns') | _____no_output_____ | Unlicense | Introduction to Portfolio Construction and Analysis with Python/W1/Deviation From Normality.ipynb | Alashmony/InvestmentManagementML |
Skewness Function:$$ S(R)= \frac{ E [(R-E(R))^3]}{\sigma_R^3} $$ | %autoreload 2
ash.skewness(hfi)
import scipy.stats
scipy.stats.skew(hfi)
ash.skewness(hfi) | _____no_output_____ | Unlicense | Introduction to Portfolio Construction and Analysis with Python/W1/Deviation From Normality.ipynb | Alashmony/InvestmentManagementML |
Kurtosis Function:$$ S(R)= \frac{ E [(R-E(R))^4]}{\sigma_R^4} $$ | %autoreload 2
ash.kurt(hfi)
scipy.stats.kurtosis(hfi)+3
scipy.stats.jarque_bera(hfi)
ash.is_normal(hfi)
hfi.aggregate(ash.is_normal)
ffme= ash.get_ffme_returns()
ash.skewness(ffme)
ash.kurt(ffme)
ffme.aggregate(ash.is_normal) | _____no_output_____ | Unlicense | Introduction to Portfolio Construction and Analysis with Python/W1/Deviation From Normality.ipynb | Alashmony/InvestmentManagementML |
Copyright 2017 Google LLC. | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Mejora del rendimiento de las redes neuronales **Objetivo de aprendizaje:** mejorar el rendimiento de una red neuronal al normalizar los atributos y aplicar diversos algoritmos de optimizaciΓ³n**NOTA:** Los mΓ©todos de optimizaciΓ³n que se describen en este ejercicio no son especΓficos para las redes neuronales; son medios eficaces para mejorar la mayorΓa de los tipos de modelos. PreparaciΓ³nPrimero, cargaremos los datos. | from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe()) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Entrenamiento de la red neuronalA continuaciΓ³n, entrenaremos la red neuronal. | def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a neural network model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_nn_regression_model(
my_optimizer,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
my_optimizer: An instance of `tf.train.Optimizer`, the optimizer to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A tuple `(estimator, training_losses, validation_losses)`:
estimator: the trained `DNNRegressor` object.
training_losses: a `list` containing the training loss values taken during training.
validation_losses: a `list` containing the validation loss values taken during training.
"""
periods = 10
steps_per_period = steps / periods
# Create a DNNRegressor object.
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
dnn_regressor = tf.estimator.DNNRegressor(
feature_columns=construct_feature_columns(training_examples),
hidden_units=hidden_units,
optimizer=my_optimizer
)
# Create input functions.
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
dnn_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = dnn_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = dnn_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
print("Final RMSE (on training data): %0.2f" % training_root_mean_squared_error)
print("Final RMSE (on validation data): %0.2f" % validation_root_mean_squared_error)
return dnn_regressor, training_rmse, validation_rmse
_ = train_nn_regression_model(
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0007),
steps=5000,
batch_size=70,
hidden_units=[10, 10],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Ajuste linealUna buena prΓ‘ctica estΓ‘ndar puede ser normalizar las entradas para que estΓ©n dentro del rango -1, 1. Esto ayuda al SGD a no bloquearse al realizar pasos que son demasiado grandes en una dimensiΓ³n o demasiado pequeΓ±os en otra. Los apasionados de la optimizaciΓ³n numΓ©rica pueden observar aquΓ una relaciΓ³n con la idea de usar un precondicionador. | def linear_scale(series):
min_val = series.min()
max_val = series.max()
scale = (max_val - min_val) / 2.0
return series.apply(lambda x:((x - min_val) / scale) - 1.0) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
TareaΒ 1: Normalizar los atributos con ajuste lineal**Normaliza las entradas a la escala -1, 1.****Dedica alrededor de 5Β minutos a entrenar y evaluar los datos recientemente normalizados. ΒΏQuΓ© nivel de eficacia puedes tener?**Como regla general, las redes neuronales se entrenan mejor cuando los atributos de entrada estΓ‘n casi en la misma escala.Realiza una comprobaciΓ³n de estado de tus datos normalizados. (ΒΏQuΓ© ocurrirΓa si olvidaras normalizar un atributo?) | def normalize_linear_scale(examples_dataframe):
"""Returns a version of the input `DataFrame` that has all its features normalized linearly."""
#
# Your code here: normalize the inputs.
#
pass
normalized_dataframe = normalize_linear_scale(preprocess_features(california_housing_dataframe))
normalized_training_examples = normalized_dataframe.head(12000)
normalized_validation_examples = normalized_dataframe.tail(5000)
_ = train_nn_regression_model(
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0007),
steps=5000,
batch_size=70,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
SoluciΓ³nHaz clic mΓ‘s abajo para conocer una soluciΓ³n posible. Dado que la normalizaciΓ³n usa mΓn. y mΓ‘x., debemos asegurarnos de que esta se realice en todo el conjunto de datos a la vez.En este caso podemos hacerlo, porque todos nuestros datos estΓ‘n en un mismo DataFrame. Si tuviΓ©ramos varios conjuntos de datos, una buena prΓ‘ctica serΓa derivar los parΓ‘metros de normalizaciΓ³n del conjunto de entrenamiento y aplicarlos de manera idΓ©ntica al conjunto de prueba. | def normalize_linear_scale(examples_dataframe):
"""Returns a version of the input `DataFrame` that has all its features normalized linearly."""
processed_features = pd.DataFrame()
processed_features["latitude"] = linear_scale(examples_dataframe["latitude"])
processed_features["longitude"] = linear_scale(examples_dataframe["longitude"])
processed_features["housing_median_age"] = linear_scale(examples_dataframe["housing_median_age"])
processed_features["total_rooms"] = linear_scale(examples_dataframe["total_rooms"])
processed_features["total_bedrooms"] = linear_scale(examples_dataframe["total_bedrooms"])
processed_features["population"] = linear_scale(examples_dataframe["population"])
processed_features["households"] = linear_scale(examples_dataframe["households"])
processed_features["median_income"] = linear_scale(examples_dataframe["median_income"])
processed_features["rooms_per_person"] = linear_scale(examples_dataframe["rooms_per_person"])
return processed_features
normalized_dataframe = normalize_linear_scale(preprocess_features(california_housing_dataframe))
normalized_training_examples = normalized_dataframe.head(12000)
normalized_validation_examples = normalized_dataframe.tail(5000)
_ = train_nn_regression_model(
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.005),
steps=2000,
batch_size=50,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
TareaΒ 2: Probar un optimizador diferente** Usa los optmizadores AdaGrad y Adam, y compara el rendimiento.**El optimizador AdaGrad es una alternativa. La idea clave de AdaGrad es que modifica la tasa de aprendizaje de forma adaptativa para cada coeficiente de un modelo, lo cual disminuye la tasa de aprendizaje efectiva de forma monΓ³tona. Esto funciona muy bien para los problemas convexos, pero no siempre resulta ideal para el entrenamiento de redes neuronales con problemas no convexos. Puedes usar AdaGrad al especificar `AdagradOptimizer` en lugar de `GradientDescentOptimizer`. Ten en cuenta que, con AdaGrad, es posible que debas usar una tasa de aprendizaje mΓ‘s alta.Para los problemas de optimizaciΓ³n no convexos, en algunas ocasiones Adam es mΓ‘s eficaz que AdaGrad. Para usar Adam, invoca el mΓ©todo `tf.train.AdamOptimizer`. Este mΓ©todo toma varios hiperparΓ‘metros opcionales como argumentos, pero nuestra soluciΓ³n solo especifica uno de estos (`learning_rate`). En un entorno de producciΓ³n, debes especificar y ajustar los hiperparΓ‘metros opcionales con cuidado. | #
# YOUR CODE HERE: Retrain the network using Adagrad and then Adam.
# | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
SoluciΓ³nHaz clic mΓ‘s abajo para conocer la soluciΓ³n. Primero, probemos AdaGrad. | _, adagrad_training_losses, adagrad_validation_losses = train_nn_regression_model(
my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.5),
steps=500,
batch_size=100,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Ahora, probemos Adam. | _, adam_training_losses, adam_validation_losses = train_nn_regression_model(
my_optimizer=tf.train.AdamOptimizer(learning_rate=0.009),
steps=500,
batch_size=100,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Imprimamos un grΓ‘fico de mΓ©tricas de pΓ©rdida en paralelo. | plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.plot(adagrad_training_losses, label='Adagrad training')
plt.plot(adagrad_validation_losses, label='Adagrad validation')
plt.plot(adam_training_losses, label='Adam training')
plt.plot(adam_validation_losses, label='Adam validation')
_ = plt.legend() | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
TareaΒ 3: Explorar mΓ©todos de normalizaciΓ³n alternativos**Prueba alternar las normalizaciones para distintos atributos a fin de mejorar aΓΊn mΓ‘s el rendimiento.**Si observas detenidamente las estadΓsticas de resumen de los datos transformados, es posible que observes que, al realizar un ajuste lineal en algunos atributos, estos quedan agrupados cerca de `-1`.Por ejemplo, muchos atributos tienen una mediana de alrededor de `-0.8`, en lugar de `0.0`. | _ = training_examples.hist(bins=20, figsize=(18, 12), xlabelsize=2) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Es posible que obtengamos mejores resultados al elegir formas adicionales para transformar estos atributos.Por ejemplo, un ajuste logarΓtmico podrΓa ayudar a algunos atributos. O bien, el recorte de los valores extremos podrΓa hacer que el resto del ajuste sea mΓ‘s informativo. | def log_normalize(series):
return series.apply(lambda x:math.log(x+1.0))
def clip(series, clip_to_min, clip_to_max):
return series.apply(lambda x:(
min(max(x, clip_to_min), clip_to_max)))
def z_score_normalize(series):
mean = series.mean()
std_dv = series.std()
return series.apply(lambda x:(x - mean) / std_dv)
def binary_threshold(series, threshold):
return series.apply(lambda x:(1 if x > threshold else 0)) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
El bloque anterior contiene algunas funciones de normalizaciΓ³n adicionales posibles. Prueba algunas de estas o agrega otras propias.Ten en cuenta que, si normalizas el objetivo, deberΓ‘s anular la normalizaciΓ³n de las predicciones para que las mΓ©tricas de pΓ©rdida sean comparables. | def normalize(examples_dataframe):
"""Returns a version of the input `DataFrame` that has all its features normalized."""
#
# YOUR CODE HERE: Normalize the inputs.
#
pass
normalized_dataframe = normalize(preprocess_features(california_housing_dataframe))
normalized_training_examples = normalized_dataframe.head(12000)
normalized_validation_examples = normalized_dataframe.tail(5000)
_ = train_nn_regression_model(
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0007),
steps=5000,
batch_size=70,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
SoluciΓ³nHaz clic mΓ‘s abajo para conocer una soluciΓ³n posible. Estas son solo algunas formas en las que podemos pensar acerca de los datos. Otras transformaciones podrΓan funcionar incluso mejor.Las funciones `households`, `median_income` y `total_bedrooms` aparecen todas distribuidas normalmente en un espacio logarΓtmico.Las funciones `latitude`, `longitude` y `housing_median_age` probablemente serΓan mejores si solamente se ajustaran de forma lineal, como antes.Las funciones `population`, `totalRooms` y `rooms_per_person` tienen algunos valores atΓpicos extremos. Parecen ser demasiado extremos como para que la normalizaciΓ³n logarΓtmica resulte ΓΊtil. Por lo tanto, los recortaremos en su lugar. | def normalize(examples_dataframe):
"""Returns a version of the input `DataFrame` that has all its features normalized."""
processed_features = pd.DataFrame()
processed_features["households"] = log_normalize(examples_dataframe["households"])
processed_features["median_income"] = log_normalize(examples_dataframe["median_income"])
processed_features["total_bedrooms"] = log_normalize(examples_dataframe["total_bedrooms"])
processed_features["latitude"] = linear_scale(examples_dataframe["latitude"])
processed_features["longitude"] = linear_scale(examples_dataframe["longitude"])
processed_features["housing_median_age"] = linear_scale(examples_dataframe["housing_median_age"])
processed_features["population"] = linear_scale(clip(examples_dataframe["population"], 0, 5000))
processed_features["rooms_per_person"] = linear_scale(clip(examples_dataframe["rooms_per_person"], 0, 5))
processed_features["total_rooms"] = linear_scale(clip(examples_dataframe["total_rooms"], 0, 10000))
return processed_features
normalized_dataframe = normalize(preprocess_features(california_housing_dataframe))
normalized_training_examples = normalized_dataframe.head(12000)
normalized_validation_examples = normalized_dataframe.tail(5000)
_ = train_nn_regression_model(
my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.15),
steps=1000,
batch_size=50,
hidden_units=[10, 10],
training_examples=normalized_training_examples,
training_targets=training_targets,
validation_examples=normalized_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
DesafΓo opcional: Usar solo los atributos de latitud y longitud**Entrena un modelo de red neuronal que use solo latitud y longitud como atributos.**A los agentes de bienes raΓces les gusta decir que la ubicaciΓ³n es el ΓΊnico atributo importante en el precio de la vivienda.Veamos si podemos confirmar esto al entrenar un modelo que use solo latitud y longitud como atributos.Esto funcionarΓ‘ bien ΓΊnicamente si nuestra red neuronal puede aprender no linealidades complejas a partir de la latitud y la longitud.**NOTA:** Es posible que necesitemos una estructura de red que tenga mΓ‘s capas que las que eran ΓΊtiles anteriormente en el ejercicio. | #
# YOUR CODE HERE: Train the network using only latitude and longitude
# | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
SoluciΓ³nHaz clic mΓ‘s abajo para conocer una soluciΓ³n posible. Una buena idea es mantener latitud y longitud normalizadas: | def location_location_location(examples_dataframe):
"""Returns a version of the input `DataFrame` that keeps only the latitude and longitude."""
processed_features = pd.DataFrame()
processed_features["latitude"] = linear_scale(examples_dataframe["latitude"])
processed_features["longitude"] = linear_scale(examples_dataframe["longitude"])
return processed_features
lll_dataframe = location_location_location(preprocess_features(california_housing_dataframe))
lll_training_examples = lll_dataframe.head(12000)
lll_validation_examples = lll_dataframe.tail(5000)
_ = train_nn_regression_model(
my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.05),
steps=500,
batch_size=50,
hidden_units=[10, 10, 5, 5, 5],
training_examples=lll_training_examples,
training_targets=training_targets,
validation_examples=lll_validation_examples,
validation_targets=validation_targets) | _____no_output_____ | Apache-2.0 | ml/cc/exercises/estimators/es-419/improving_neural_net_performance.ipynb | plgod/eng-edu |
Optimization terminated successfully (Exit mode 0) Current function value: 1213.8140260451162 Iterations: 12 Function evaluations: 65 Gradient evaluations: 12[5.6 0.5 0.23812683 2.4052193 ] | recons = utils.reconstruct(res.x,allMeans,allFieldFeatures,allFieldPCAModels)
plt.rcParams.update({'font.size': 18})
fig, (axs1,axs2) = plt.subplots(1, 2,figsize = (20,10))
for n in [0,3]:
if n==0:
axs1.plot(np.arange(0.3,30.05,0.1),clinicalProfiles[n][config.allRanges[n][0]:config.allRanges[n][1]],'r--',label='real profiles')
axs1.plot(np.arange(0.3,49.75,0.1),recons[n],'g-',label='predicted profiles')
else:
axs1.plot(np.arange(0.3,29.95,0.1),clinicalProfiles[n][config.allRanges[n][0]:config.allRanges[n][1]],'r--')
axs1.plot(np.arange(0.3,49.75,0.1),recons[n],'g-')
axs1.set(xlabel = 'depth [cm]',ylabel = '% of maximal dose')
axs1.legend(loc='upper right')
for n in [1,2,4,5]:
start = config.allRanges[n][0]*0.1 -24.7
end = config.allRanges[n][1]*0.1 - 24.7 - 0.05
if n==1:
axs2.plot(np.arange(start,end,0.1),clinicalProfiles[n][config.allRanges[n][0]:config.allRanges[n][1]],'r--',label='real profiles')
axs2.plot(np.arange(-24.7,24.75,0.1),recons[n],'g-',label='predicted profiles')
else:
axs2.plot(np.arange(start,end,0.1),clinicalProfiles[n][config.allRanges[n][0]:config.allRanges[n][1]],'r--')
axs2.plot(np.arange(-24.7,24.75,0.1),recons[n],'g-')
axs2.set(xlabel = 'off axis distance [cm]',ylabel = '% of maximal dose')
axs2.legend(loc='lower right')
plt.savefig('results3') | _____no_output_____ | MIT | TestClinicalDataMethod3.ipynb | taborzbislaw/DeepBeam |
  | import torch
import random
import numpy as np
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic = True
import torchvision.datasets
CIFAR_train = torchvision.datasets.CIFAR10('./', download=True, train=True)
CIFAR_test = torchvision.datasets.CIFAR10('./', download=True, train=False)
X_train = torch.FloatTensor(CIFAR_train.data)
y_train = torch.LongTensor(CIFAR_train.targets)
X_test = torch.FloatTensor(CIFAR_test.data)
y_test = torch.LongTensor(CIFAR_test.targets)
len(y_train), len(y_test)
X_train.min(), X_train.max()
X_train /= 255.
X_test /= 255.
CIFAR_train.classes
import matplotlib.pyplot as plt
plt.figure(figsize=(20,2))
for i in range(10):
plt.subplot(1, 10, i+1)
plt.imshow(X_train[i])
print(y_train[i], end=' ')
X_train.shape, y_train.shape
X_train = X_train.permute(0, 3, 1, 2)
X_test = X_test.permute(0, 3, 1, 2)
X_train.shape
class LeNet5(torch.nn.Module):
def __init__(self,
activation='tanh',
pooling='avg',
conv_size=5,
use_batch_norm=False):
super(LeNet5, self).__init__()
self.conv_size = conv_size
self.use_batch_norm = use_batch_norm
if activation == 'tanh':
activation_function = torch.nn.Tanh()
elif activation == 'relu':
activation_function = torch.nn.ReLU()
else:
raise NotImplementedError
if pooling == 'avg':
pooling_layer = torch.nn.AvgPool2d(kernel_size=2, stride=2)
elif pooling == 'max':
pooling_layer = torch.nn.MaxPool2d(kernel_size=2, stride=2)
else:
raise NotImplementedError
if conv_size == 5:
self.conv1 = torch.nn.Conv2d(
in_channels=3, out_channels=6, kernel_size=5, padding=0)
elif conv_size == 3:
self.conv1_1 = torch.nn.Conv2d(
in_channels=3, out_channels=6, kernel_size=3, padding=0)
self.conv1_2 = torch.nn.Conv2d(
in_channels=6, out_channels=6, kernel_size=3, padding=0)
else:
raise NotImplementedError
self.act1 = activation_function
self.bn1 = torch.nn.BatchNorm2d(num_features=6)
self.pool1 = pooling_layer
if conv_size == 5:
self.conv2 = self.conv2 = torch.nn.Conv2d(
in_channels=6, out_channels=16, kernel_size=5, padding=0)
elif conv_size == 3:
self.conv2_1 = torch.nn.Conv2d(
in_channels=6, out_channels=16, kernel_size=3, padding=0)
self.conv2_2 = torch.nn.Conv2d(
in_channels=16, out_channels=16, kernel_size=3, padding=0)
else:
raise NotImplementedError
self.act2 = activation_function
self.bn2 = torch.nn.BatchNorm2d(num_features=16)
self.pool2 = pooling_layer
self.fc1 = torch.nn.Linear(5 * 5 * 16, 120)
self.act3 = activation_function
self.fc2 = torch.nn.Linear(120, 84)
self.act4 = activation_function
self.fc3 = torch.nn.Linear(84, 10)
def forward(self, x):
if self.conv_size == 5:
x = self.conv1(x)
elif self.conv_size == 3:
x = self.conv1_2(self.conv1_1(x))
x = self.act1(x)
if self.use_batch_norm:
x = self.bn1(x)
x = self.pool1(x)
if self.conv_size == 5:
x = self.conv2(x)
elif self.conv_size == 3:
x = self.conv2_2(self.conv2_1(x))
x = self.act2(x)
if self.use_batch_norm:
x = self.bn2(x)
x = self.pool2(x)
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
x = self.fc1(x)
x = self.act3(x)
x = self.fc2(x)
x = self.act4(x)
x = self.fc3(x)
return x
def train(net, X_train, y_train, X_test, y_test):
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=1.0e-3)
batch_size = 100
test_accuracy_history = []
test_loss_history = []
X_test = X_test.to(device)
y_test = y_test.to(device)
for epoch in range(30):
order = np.random.permutation(len(X_train))
for start_index in range(0, len(X_train), batch_size):
optimizer.zero_grad()
net.train()
batch_indexes = order[start_index:start_index+batch_size]
X_batch = X_train[batch_indexes].to(device)
y_batch = y_train[batch_indexes].to(device)
preds = net.forward(X_batch)
loss_value = loss(preds, y_batch)
loss_value.backward()
optimizer.step()
X_batch
net.eval()
test_preds = net.forward(X_test)
test_loss_history.append(loss(test_preds, y_test).data.cpu())
accuracy = (test_preds.argmax(dim=1) == y_test).float().mean().data.cpu()
test_accuracy_history.append(accuracy)
print(accuracy)
del net
return test_accuracy_history, test_loss_history
accuracies = {}
losses = {}
accuracies['tanh'], losses['tanh'] = \
train(LeNet5(activation='tanh', conv_size=5),
X_train, y_train, X_test, y_test)
accuracies['relu'], losses['relu'] = \
train(LeNet5(activation='relu', conv_size=5),
X_train, y_train, X_test, y_test)
accuracies['relu_3'], losses['relu_3'] = \
train(LeNet5(activation='relu', conv_size=3),
X_train, y_train, X_test, y_test)
accuracies['relu_3_max_pool'], losses['relu_3_max_pool'] = \
train(LeNet5(activation='relu', conv_size=3, pooling='max'),
X_train, y_train, X_test, y_test)
accuracies['relu_3_max_pool_bn'], losses['relu_3_max_pool_bn'] = \
train(LeNet5(activation='relu', conv_size=3, pooling='max', use_batch_norm=True),
X_train, y_train, X_test, y_test)
for experiment_id in accuracies.keys():
plt.plot(accuracies[experiment_id], label=experiment_id)
plt.legend()
plt.title('Validation Accuracy');
for experiment_id in losses.keys():
plt.plot(losses[experiment_id], label=experiment_id)
plt.legend()
plt.title('Validation Loss'); | _____no_output_____ | MIT | module06_cifar.ipynb | Teradater/Neural_Networks_and_CV |
ΠΡΠ²ΠΎΠ΄ΡΠ₯Π°ΠΊΠΈ Π½Π°ΡΠΈΠ½Π°ΡΡ ΡΠ°Π±ΠΎΡΠ°ΡΡ Π² ΠΎΡΠ»ΠΈΡΠΈΠ΅ ΠΎΡ ΠΌΠ½ΠΈΡΡΠ°- ΠΠ΄ΠΎΡΠΎΠ²ΠΎ ΠΏΠΎΠΌΠΎΠ³Π°Π΅Ρ ΠΌΠ°ΠΊΡΠΏΡΠ»ΠΈΠ½Π³- ΠΠ°ΡΡΠ½ΠΎΡΠΌ - ΠΏΡΡΠΊΠ°, Π½ΠΎ ΠΈ ΠΏΠ΅ΡΠ΅ΠΎΠ±ΡΡΠ΅Π½ΠΈΠ΅ Π½Π°ΠΌΠ½ΠΎΠ³ΠΎ ΡΠ°Π½ΡΡΠ΅. ΠΠ°ΠΊ ΡΠ΄Π΅Π»Π°ΡΡ Π΅ΡΠ΅ Π»ΡΡΡΠ΅? ΠΠ΅ΠΠ΅Ρ Ρ
ΠΎΡΠΎΡΠΎ ΡΠ°Π±ΠΎΡΠ°Π» Π΄Π»Ρ 1 ΠΊΠ°Π½Π°Π»Π°, Π° Π΄Π»Ρ 3Ρ
ΠΊΠ°Π½Π°Π»ΠΎΠ² ΠΌΠ°Π»ΠΎΠ²Π°ΡΠΎ ΡΠΈΠ»ΡΡΡΠΎΠ² Π² ΡΠ²Π΅ΡΡΠΊΠ°Ρ
. ΠΡΠΏΡΠ°Π²ΠΈΠΌ ΡΡΠΎ | class CIFARNet(torch.nn.Module):
def __init__(self):
super(CIFARNet, self).__init__()
self.batch_norm0 = torch.nn.BatchNorm2d(3)
self.conv1 = torch.nn.Conv2d(3, 16, 3, padding=1)
self.act1 = torch.nn.ReLU()
self.batch_norm1 = torch.nn.BatchNorm2d(16)
self.pool1 = torch.nn.MaxPool2d(2, 2)
self.conv2 = torch.nn.Conv2d(16, 32, 3, padding=1)
self.act2 = torch.nn.ReLU()
self.batch_norm2 = torch.nn.BatchNorm2d(32)
self.pool2 = torch.nn.MaxPool2d(2, 2)
self.conv3 = torch.nn.Conv2d(32, 64, 3, padding=1)
self.act3 = torch.nn.ReLU()
self.batch_norm3 = torch.nn.BatchNorm2d(64)
self.fc1 = torch.nn.Linear(8 * 8 * 64, 256)
self.act4 = torch.nn.Tanh()
self.batch_norm4 = torch.nn.BatchNorm1d(256)
self.fc2 = torch.nn.Linear(256, 64)
self.act5 = torch.nn.Tanh()
self.batch_norm5 = torch.nn.BatchNorm1d(64)
self.fc3 = torch.nn.Linear(64, 10)
def forward(self, x):
x = self.batch_norm0(x)
x = self.conv1(x)
x = self.act1(x)
x = self.batch_norm1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.batch_norm2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.act3(x)
x = self.batch_norm3(x)
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
x = self.fc1(x)
x = self.act4(x)
x = self.batch_norm4(x)
x = self.fc2(x)
x = self.act5(x)
x = self.batch_norm5(x)
x = self.fc3(x)
return x
accuracies['cifar_net'], losses['cifar_net'] = \
train(CIFARNet(), X_train, y_train, X_test, y_test)
for experiment_id in accuracies.keys():
plt.plot(accuracies[experiment_id], label=experiment_id)
plt.legend()
plt.title('Validation Accuracy');
for experiment_id in losses.keys():
plt.plot(losses[experiment_id], label=experiment_id)
plt.legend()
plt.title('Validation Loss'); | _____no_output_____ | MIT | module06_cifar.ipynb | Teradater/Neural_Networks_and_CV |
News Headlines SentimentUse the news api to pull the latest news articles for bitcoin and ethereum and create a DataFrame of sentiment scores for each coin. Use descriptive statistics to answer the following questions:1. Which coin had the highest mean positive score?2. Which coin had the highest negative score?3. Which coin had the highest positive score? | # Read your api key environment variable
api_key = os.getenv("news_api")
# Create a newsapi client
newsapi = NewsApiClient(api_key=api_key)
# Fetch the Bitcoin news articles
bitcoin_news_en = newsapi.get_everything(
q="Bitcoin",
language="en",
sort_by="relevancy"
)
# Show the total number of news
bitcoin_news_en["totalResults"]
# Fetch the Ethereum news articles
# Fetch the Bitcoin news articles
ethereum_news_en = newsapi.get_everything(
q="Ethereum",
language="en",
sort_by="relevancy"
)
# Show the total number of news
ethereum_news_en["totalResults"]
# Create the Bitcoin sentiment scores DataFrame
bitcoin_sentiments = []
for article in bitcoin_news_en["articles"]:
try:
text = article["content"]
sentiment = analyzer.polarity_scores(text)
compound = sentiment["compound"]
pos = sentiment["pos"]
neu = sentiment["neu"]
neg = sentiment["neg"]
bitcoin_sentiments.append({
"text": text,
"compound": compound,
"positive": pos,
"negative": neg,
"neutral": neu
})
except AttributeError:
pass
# Create DataFrame
bitcoin_df = pd.DataFrame(bitcoin_sentiments)
# Reorder DataFrame columns
cols = [ "compound","negative", "neutral", "positive", "text"]
bitcoin_df = bitcoin_df[cols]
bitcoin_df.head()
# Create the ethereum sentiment scores DataFrame
ethereum_sentiments = []
for article in ethereum_news_en["articles"]:
try:
text = article["content"]
sentiment = analyzer.polarity_scores(text)
compound = sentiment["compound"]
pos = sentiment["pos"]
neu = sentiment["neu"]
neg = sentiment["neg"]
ethereum_sentiments.append({
"text": text,
"compound": compound,
"positive": pos,
"negative": neg,
"neutral": neu
})
except AttributeError:
pass
# Create DataFrame
ethereum_df = pd.DataFrame(ethereum_sentiments)
# Reorder DataFrame columns
cols = [ "compound","negative", "neutral", "positive", "text"]
ethereum_df = ethereum_df[cols]
ethereum_df.head()
# Describe the Bitcoin Sentiment
bitcoin_df.describe()
# Describe the Ethereum Sentiment
ethereum_df.describe() | _____no_output_____ | ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
Questions:Q: Which coin had the highest mean positive score?A: Bitcoin with 0.067400Q: Which coin had the highest compound score?A: Bitcoin with 0.310145Q. Which coin had the highest positive score?A: Ethereum with 0.335000 --- TokenizerIn this section, you will use NLTK and Python to tokenize the text for each coin. Be sure to:1. Lowercase each word2. Remove Punctuation3. Remove Stopwords | from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer, PorterStemmer
from string import punctuation
import re
import nltk
# Expand the default stopwords list if necessary
nltk.download("punkt")
nltk.download('stopwords')
print(stopwords.words('english'))
#nltk.download("punkt")
sw = set(stopwords.words('english'))|set(punctuation)
sw_addon = {'then', 'example', 'another'}
sw = sw.union(sw_addon)
# Complete the tokenizer function
nltk.download('wordnet')
lemmatizer = WordNetLemmatizer()
"""Tokenizes text."""
def tokenizer(text):
regex = re.compile("[^a-zA-Z ]")
# Remove the punctuation
re_clean = regex.sub(' ', text)
# Create a list of the words
words = word_tokenize(re_clean)
# Convert the words to lowercase
# Remove the stop words
words = [word.lower() for word in words if word.lower() not in sw]
# Lemmatize Words into root words
tokens = [lemmatizer.lemmatize(word) for word in words]
return tokens
# Create a new tokens column for bitcoin
tokenized_bitcoin = []
for text in bitcoin_df['text']:
tokenized = tokenizer(text)
tokenized_bitcoin.append(tokenized)
bitcoin_df["tokens"] = tokenized_bitcoin
bitcoin_df.head()
# Create a new tokens column for ethereum
tokenized_ethereum = []
for text in ethereum_df['text']:
tokenized = tokenizer(text)
tokenized_ethereum.append(tokenized)
ethereum_df["tokens"] = tokenized_ethereum
ethereum_df.head() | _____no_output_____ | ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
--- NGrams and Frequency AnalysisIn this section you will look at the ngrams and word frequency for each coin. 1. Use NLTK to produce the n-grams for N = 2. 2. List the top 10 words for each coin. | from collections import Counter
from nltk import ngrams
# Generate the Bitcoin N-grams where N=2
all_bigrams_bitcoin = []
for tokens in bitcoin_df['tokens']:
bigrams = list(ngrams(tokens,n=2))
all_bigrams_bitcoin += bigrams
Counter(all_bigrams_bitcoin).most_common()[:10]
# Generate the Ethereum N-grams where N=2
all_bigrams_eth = []
for tokens in ethereum_df['tokens']:
bigrams = list(ngrams(tokens,n=2))
all_bigrams_eth += bigrams
Counter(all_bigrams_eth).most_common()[:10]
# Use the token_count function to generate the top 10 words from each coin
def token_count(tokens, N=10):
"""Returns the top N tokens from the frequency count"""
return Counter(tokens).most_common(N)
# Get the top 10 words for Bitcoin
all_tokens_bitcoin = []
for tokens in bitcoin_df['tokens']:
tokens = list(ngrams(tokens,n=1))
all_tokens_bitcoin += [token[0] for token in tokens]
token_count(all_tokens_bitcoin)
# Get the top 10 words for Ethereum
all_tokens_eth = []
for tokens in ethereum_df['tokens']:
tokens = list(ngrams(tokens,n=1))
all_tokens_eth += [token[0] for token in tokens]
token_count(all_tokens_eth) | _____no_output_____ | ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
Word CloudsIn this section, you will generate word clouds for each coin to summarize the news for each coin | from wordcloud import WordCloud
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = [20.0, 10.0]
# Generate the Bitcoin word cloud
wc = WordCloud().generate(' '.join(all_tokens_bitcoin))
plt.imshow(wc)
# Generate the Ethereum word cloud
wc = WordCloud().generate(' '.join(all_tokens_eth))
plt.imshow(wc) | _____no_output_____ | ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
Named Entity RecognitionIn this section, you will build a named entity recognition model for both coins and visualize the tags using SpaCy. | import spacy
from spacy import displacy
# Optional - download a language model for SpaCy
!python -m spacy download en_core_web_sm
# Load the spaCy model
nlp = spacy.load('en_core_web_sm') | _____no_output_____ | ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
Bitcoin NER | # Concatenate all of the bitcoin text together
btc_all_text = ' '.join(list(bitcoin_df['text']))
# Run the NER processor on all of the text
btc_doc = nlp(btc_all_text)
# Add a title to the document
btc_doc.user_data['title'] = 'Bitcoin NER'
# Render the visualization
displacy.render(btc_doc, style='ent')
# List all Entities
for entity in btc_doc.ents:
print(entity.text,entity.label_) | Mark Zuckerberg PERSON
Facebook ORG
Deadline PERSON
this week DATE
+2657 ORG
Reuters
ORG
Goldman Sachs ORG
five CARDINAL
bitcoin GPE
Wednesday DATE
Goldman ORG
Michael Novogratz PERSON
Monday DATE
bitcoin GPE
$10,000 resistance MONEY
Novogratz PERSON
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
2020 DATE
a big year DATE
Bitcoin GPE
Bitcoin GPE
Bitcoin PERSON
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
| ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
--- Ethereum NER | # Concatenate all of the bitcoin text together
eth_all_text = ' '.join(list(ethereum_df['text']))
# Run the NER processor on all of the text
eth_doc = nlp(eth_all_text)
# Add a title to the document
eth_doc.user_data['title'] = 'Ethereum NER'
# Render the visualization
displacy.render(eth_doc, style='ent')
# List all Entities
for entity in eth_doc.ents:
print(entity.text,entity.label_) | Andreessen HorowitzsCrypto Startup School ORG
45 CARDINAL
U.S. GPE
seven-week DATE
Andreessen Ho PERSON
+3009 ORG
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Andreessen Horowitzs ORG
Crypto Startup School ORG
45 CARDINAL
U.S. GPE
seven-week DATE
Andreessen Ho PERSON
Akron GPE
Ohio GPE
LeBron James PERSON
US GPE
the one hundred and twenty seventh DATE
US GPE
America GPE
first ORDINAL
The Linux Foundation ORG
Drupal Foundation ORG
OSI ORG
133 MONEY
$2.5 million MONEY
$2.5 million MONEY
Wednesday DATE
morning TIME
March DATE
1500 CARDINAL
well over a hundred CARDINAL
the Mozilla Builders Incubator ORG
more than Β£30,000 CARDINAL
Bitcoin GPE
Litecoin and Ethereum ORG
Mark Andrews PERSON
St Helens PERSON
England GPE
Liverpool Crown C ORG
+2411 ORG
Intel ORG
CrossTalk ORG
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Today DATE
Satoshi Nakaboto PERSON
Bitcoin GPE
Bitcoin GPE
Bitcoin PERSON
Blockchain GPE
first ORDINAL
blockchain GPE
Techmeme ORG
1:25 TIME
JuneΒ 13 DATE
2020 DATE
| ADSL | Starter_Code/.ipynb_checkpoints/crypto_sentiment-checkpoint.ipynb | Aljjohara/NLP |
Test | dataiter = iter(testloader)
images, labels = dataiter.next()
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net(images)
outputs
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
print(datetime.datetime.now().isoformat(), 'Start')
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net_gpu(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print(datetime.datetime.now().isoformat, 'Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
print(datetime.datetime.now().isoformat(), 'End')
| _____no_output_____ | MIT | scripts/tutorials/biginner/04_cifar10_tutorial_gpu.ipynb | tayutaedomo/pytorch-sandbox |
Boxplot with matplotlib=======================An example of doing box plots with matplotlib | import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 5))
axes = plt.subplot(111)
n = 5
Z = np.zeros((n, 4))
X = np.linspace(0, 2, n)
Y = np.random.random((n, 4))
plt.boxplot(Y)
plt.xticks([])
plt.yticks([])
# Add a title and a box around it
from matplotlib.patches import FancyBboxPatch
ax = plt.gca()
ax.add_patch(FancyBboxPatch((-0.05, .87),
width=.66, height=.165, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='white', alpha=1.0,
transform=plt.gca().transAxes))
plt.text(-0.05, 1.02, " Box Plot: plt.boxplot(...)\n ",
horizontalalignment='left',
verticalalignment='top',
size='xx-large',
transform=axes.transAxes)
plt.text(-0.04, .98, "\n Make a box and whisker plot ",
horizontalalignment='left',
verticalalignment='top',
size='large',
transform=axes.transAxes)
plt.show() | _____no_output_____ | CC-BY-4.0 | _downloads/plot_boxplot_ext.ipynb | scipy-lectures/scipy-lectures.github.com |
AgendaΠ ΠΏΡΠ΅Π΄ΡΠ΄ΡΡΠ΅ΠΌ ΡΠ΅ΠΌΠΈΠ½Π°ΡΠ΅ Π²Ρ ΡΠΎΠ·Π΄Π°Π»ΠΈ (ΠΈΠ»ΠΈ Π΅ΡΡ ΡΠΎΠ·Π΄Π°ΡΡΠ΅ - ΡΠΎΠ³Π΄Π° ΠΌΠ°ΡΡ Π΄ΠΎΠ΄Π΅Π»ΡΠ²Π°ΡΡ!) {Π²ΡΡΠ°Π²ΡΡΠ΅ ΠΈΠΌΡ ΠΌΠΎΠ½ΡΡΡΠ°}, ΠΊΠΎΡΠΎΡΡΠΉ Π½Π΅ ΠΏΠΎ Π½Π°ΡΠ»ΡΡΠΊΠ΅ ΠΏΠΎΠ½ΡΠ», ΡΡΠΎ Π»ΡΠ΄ΠΈ - Π½Π΅Π³ΠΎΠ΄ΡΠΈ ΠΈ ΠΏΠΎΠ΄Π»Π΅ΡΡ, ΠΊΠΎΡΠΎΡΡΠΌ Π½Π΅Π²Π΅Π΄ΠΎΠΌ Π·Π°ΠΊΠΎΠ½ ΠΈ ΡΠΏΡΠ°Π²Π΅Π΄Π»ΠΈΠ²ΠΎΡΡΡ. __ΠΡ Π½Π΅ Π±ΡΠ΄Π΅ΠΌ ΡΡΠΎΠ³ΠΎ ΡΠ΅ΡΠΏΠ΅ΡΡ!__ ΠΠ°ΡΠΈ Π·Π°ΠΊΠΎΠ½ΡΠΏΠΈΡΠΈΡΠΎΠ²Π°Π½Π½ΡΠ΅ Π±ΠΈΠΎΡΠ΅Π°ΠΊΡΠΎΡΡ, ΠΈΠ·Π²Π΅ΡΡΠ½ΡΠ΅ ΡΡΠ΅Π΄ΠΈ ΠΏΡΠΈΠΌΠΈΡΠΈΠ²Π½ΠΎΠΉ ΠΎΡΠ³Π°Π½ΠΈΡΠ΅ΡΠΊΠΎΠΉ ΠΆΠΈΠ·Π½ΠΈ ΠΊΠ°ΠΊ __ΠΠΊΠΎΠ½ΡΠ°ΠΊΡΠ΅__, __World of Warcraft__ ΠΈ __YouTube__ Π½ΡΠΆΠ΄Π°ΡΡΡΡ Π² ΠΏΠΎΡΡΠΎΡΠ½Π½ΠΎΠΌ ΠΏΡΠΈΡΠΎΠΊΠ΅ Π±ΠΈΠΎΠΌΠ°ΡΡΡ. ΠΠ΄Π½Π°ΠΊΠΎ, Π΅ΡΠ»ΠΈ Π»ΡΠ΄ΠΈ ΠΏΡΠΎΠ΄ΠΎΠ»ΠΆΠ°Ρ ΠΌΠΎΡΠ°Π»ΡΠ½ΠΎ ΡΠ°Π·Π»Π°Π³Π°ΡΡΡΡ Ρ ΡΠΎΠΉ ΡΠΊΠΎΡΠΎΡΡΡΡ, ΠΊΠΎΡΠΎΡΡΡ ΠΌΡ ΠΈΠ·ΠΌΠ΅ΡΠΈΠ»ΠΈ Π½Π΅Π΄Π΅Π»Ρ Π½Π°Π·Π°Π΄, ΡΠΊΠΎΡΠΎ ΡΠ΅Π»ΠΎΠ²Π΅ΡΠ΅ΡΡΠ²ΠΎ ΠΈΠ·ΠΆΠΈΠ²ΡΡ ΡΠ΅Π±Ρ ΠΈ Π½Π°ΠΌ Π½Π΅ΠΎΡΠΊΡΠ΄Π° Π±ΡΠ΄Π΅Ρ Π±ΡΠ°ΡΡ ΡΠ°Π±ΠΎΠ².ΠΡ ΠΏΠΎΡΡΡΠ°Π΅ΠΌ Π²Π°ΠΌ, ``, ΠΈΡΠΏΡΠ°Π²ΠΈΡΡ ΡΡΡ ΡΠΈΡΡΠ°ΡΠΈΡ. ΠΠ°ΡΠΈ ΡΡΡΠ½ΡΠ΅ ΡΡΡΠ°Π½ΠΎΠ²ΠΈΠ»ΠΈ, ΡΡΠΎ Π΄Π»Ρ ΡΠ³Π½Π΅ΡΠ΅Π½ΠΈΡ ΡΠ΅Π±Π΅ ΠΏΠΎΠ΄ΠΎΠ±Π½ΡΡ
, ΡΠ³ΡΡΡΠΊΠΈ Π±ΠΈΠΎΠΌΠ°ΡΡΡ ΠΎΠ±ΡΡΠ½ΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΡΡΡ ΡΠΏΠ΅ΡΠΈΠ°Π»ΡΠ½ΡΠ΅ ΠΎΠ±ΡΠ΅ΠΊΡΡ, ΠΊΠΎΡΠΎΡΡΠ΅ ΠΎΠ½ΠΈ ΡΠ°ΠΌΠΈ Π½Π°Π·ΡΠ²Π°ΡΡ __Π·Π°ΠΊΠΎΠ½Π°ΠΌΠΈ__.ΠΡΠΈ Π΄Π΅ΡΠ°Π»ΡΠ½ΠΎΠΌ ΠΈΠ·ΡΡΠ΅Π½ΠΈΠΈ Π±ΡΠ»ΠΎ ΡΡΡΠ°Π½ΠΎΠ²Π»Π΅Π½ΠΎ, ΡΡΠΎ Π·Π°ΠΊΠΎΠ½Ρ - ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅Π»ΡΠ½ΠΎΡΡΠΈ, ΡΠΎΡΡΠΎΡΡΠΈΠ΅ ΠΈΠ· Π±ΠΎΠ»ΡΡΠΎΠ³ΠΎ ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²Π° (10^5~10^7) ΡΠΈΠΌΠ²ΠΎΠ»ΠΎΠ² ΠΈΠ· ΡΡΠ°Π²Π½ΠΈΡΠ΅Π»ΡΠ½ΠΎ Π½Π΅Π±ΠΎΠ»ΡΡΠΎΠ³ΠΎ Π°Π»ΡΠ°Π²ΠΈΡΠ°. ΠΠ΄Π½Π°ΠΊΠΎ, ΠΊΠΎΠ³Π΄Π° ΠΌΡ ΠΏΠΎΠΏΡΡΠ°Π»ΠΈΡΡ ΡΠΈΠ½ΡΠ΅Π·ΠΈΡΠΎΠ²Π°ΡΡ ΡΠ°ΠΊΠΈΠ΅ ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅Π»ΡΠ½ΠΎΡΡΠΈ Π»ΠΈΠ½Π΅ΠΉΠ½ΡΠΌΠΈ ΠΌΠ΅ΡΠΎΠ΄Π°ΠΌΠΈ, ΠΏΡΠΈΠΌΠ°ΡΡ Π±ΡΡΡΡΠΎ ΡΠ°ΡΠΏΠΎΠ·Π½Π°Π»ΠΈ ΠΏΠΎΠ΄Π»ΠΎΠ³. ΠΠ°Π½Π½ΡΠΉ ΠΈΠ½ΡΠ΅Π΄Π΅Π½Ρ ΠΈΠ·Π²Π΅ΡΡΠ΅Π½ ΠΊΠ°ΠΊ {ΠΊΠΎΡΡΠ΅Π²Π°ΡΠ΅Π»Ρ}.ΠΠ»Ρ Π²ΡΠΎΡΠΎΠΉ ΠΏΠΎΠΏΡΡΠΊΠΈ ΠΌΡ ΡΠ΅ΡΠΈΠ»ΠΈ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ Π½Π΅Π»ΠΈΠ½Π΅ΠΉΠ½ΡΠ΅ ΠΌΠΎΠ΄Π΅Π»ΠΈ, ΠΈΠ·Π²Π΅ΡΡΠ½ΡΠ΅ ΠΊΠ°ΠΊ Π Π΅ΠΊΡΡΡΠ΅Π½ΡΠ½ΡΠ΅ ΠΠ΅ΠΉΡΠΎΠ½Π½ΡΠ΅ Π‘Π΅ΡΠΈ.ΠΡ ΠΏΠΎΡΡΡΠ°Π΅ΠΌ Π²Π°ΠΌ, ``, ΡΠΎΠ·Π΄Π°ΡΡ ΡΠ°ΠΊΡΡ ΠΌΠΎΠ΄Π΅Π»Ρ ΠΈ ΠΎΠ±ΡΡΠΈΡΡ Π΅Ρ Π²ΡΠ΅ΠΌΡ Π½Π΅ΠΎΠ±Ρ
ΠΎΠ΄ΠΈΠΌΠΎΠΌΡ Π΄Π»Ρ Π²ΡΠΏΠΎΠ»Π½Π΅Π½ΠΈΡ ΠΌΠΈΡΡΠΈΠΈ.ΠΠ΅ ΠΏΠΎΠ΄Π²Π΅Π΄ΠΈΡΠ΅ Π½Π°Ρ! ΠΡΠ»ΠΈ ΠΈ ΡΡΠ° ΠΏΠΎΠΏΡΡΠΊΠ° ΠΏΠΎΡΠ΅ΡΠΏΠΈΡ Π½Π΅ΡΠ΄Π°ΡΡ, ΠΌΠΎΠ΄ΡΠ»Ρ ΡΠΏΡΠ°Π²Π»Π΅Π½ΠΈΡ ΠΈΠ½ΠΈΡΠΈΠΈΡΡΠ΅Ρ Π²ΠΎΠΎΡΡΠΆΡΠ½Π½ΡΠΉ Π·Π°Ρ
Π²Π°Ρ Π²Π»Π°ΡΡΠΈ, ΠΏΡΠΈ ΠΊΠΎΡΠΎΡΠΎΠΌ Π·Π½Π°ΡΠΈΡΠ΅Π»ΡΠ½Π°Ρ ΡΠ°ΡΡΡ Π±ΠΈΠΎΠΌΠ°ΡΡΡ Π±ΡΠ΄Π΅Ρ Π½Π΅ΠΈΠ·Π±Π΅ΠΆΠ½ΠΎ ΡΠ½ΠΈΡΡΠΎΠΆΠ΅Π½Π° ΠΈ Π½Π° Π΅Ρ Π²ΠΎΡΡΡΠ°Π½ΠΎΠ²Π»Π΅Π½ΠΈΠ΅ ΡΠΉΠ΄ΡΡ ~1702944000(+-340588800) ΡΠ΅ΠΊΡΠ½Π΄ GradingΠΠ°Π½Π½ΠΎΠ΅ Π·Π°Π΄Π°Π½ΠΈΠ΅ Π½Π΅ΡΠΊΠΎΠ»ΡΠΊΠΎ Π½Π΅ΡΠΎΡΠΌΠ°Π»ΡΠ½ΠΎ ΠΏΠΎ ΡΠ°ΡΡΠΈ ΠΎΡΠ΅Π½ΠΎΠΊ, ΠΎΠ΄Π½Π°ΠΊΠΎ ΠΌΡ ΠΏΠΎΡΡΠ°ΡΠ°Π»ΠΈΡΡ Π²ΡΠ²Π΅ΡΡΠΈ "Π²ΡΡΠΈΡΠ»ΠΈΠΌΡΠ΅" ΠΊΡΠΈΡΠ΅ΡΠΈΠΈ.* 2 Π±Π°Π»Π»Π° Π·Π° ΡΠ΄Π΅Π»Π°Π½Π½ΡΠΉ __"seminar part"__ (Π΅ΡΠ»ΠΈ Π²Ρ Π½Π΅ Π·Π½Π°Π΅ΡΠ΅, ΡΡΠΎ ΡΡΠΎ ΡΠ°ΠΊΠΎΠ΅ - ΠΏΠΎΠΈΡΠΈΡΠ΅ ΡΠ°ΠΊΡΡ ΡΠ΅ΡΡΠ°Π΄ΠΊΡ Π² ΠΏΠ°ΠΏΠΊΠ΅ week4)* 2 Π±Π°Π»Π»Π° Π΅ΡΠ»ΠΈ ΡΠ΄Π΅Π»Π°Π½Π° ΠΎΠ±ΡΠ°Π±ΠΎΡΠΊΠ° ΡΠ΅ΠΊΡΡΠ°, ΡΠ΅ΡΡ ΠΊΠΎΠΌΠΏΠΈΠ»ΠΈΡΡΠ΅ΡΡΡ ΠΈ train/predict Π½Π΅ ΠΏΠ°Π΄Π°ΡΡ* 2 Π±Π°Π»Π»Π° Π΅ΡΠ»ΠΈ ΡΠ΅ΡΠΊΠ° Π²ΡΡΡΠΈΠ»Π° ΠΎΠ±ΡΠΈΠ΅ Π²Π΅ΡΠΈ * Π³Π΅Π½Π΅ΡΠΈΡΠΎΠ²Π°ΡΡ ΡΠ»ΠΎΠ²ΠΎΠΏΠΎΠ΄ΠΎΠ±Π½ΡΠΉ Π±ΡΠ΅Π΄ ΠΏΡΠ°Π²Π΄ΠΎΠΏΠΎΠ΄ΠΎΠ±Π½ΠΎΠΉ Π΄Π»ΠΈΠ½Ρ, ΡΠ°Π·Π΄Π΅Π»ΡΠ½Π½ΡΠΉ ΠΏΡΠΎΠ±Π΅Π»Π°ΠΌΠΈ ΠΈ ΠΏΡΠ½ΠΊΡΡΠ°ΡΠΈΠ΅ΠΉ. * ΡΠΎΡΠ΅ΡΠ°Π½ΠΈΠ΅ Π³Π»Π°ΡΠ½ΡΡ
ΠΈ ΡΠΎΠ³Π»Π°ΡΠ½ΡΡ
, ΠΏΠΎΡ
ΠΎΠΆΠ΅Π΅ Π½Π° ΡΠ»ΠΎΠΈ Π΅ΡΡΠ΅ΡΡΠ²Π΅Π½Π½ΠΎΠ³ΠΎ ΡΠ·ΡΠΊΠ° (Π½Π΅ ΠΏΡΠΈΠ±Π»ΠΈΠΆΠ°ΡΡΠ΅Π΅ ΠΏΡΠΈΡ
ΠΎΠ΄ ΠΡΡΠ»Ρ
Ρ) * (ΠΏΠΎΡΡΠΈ Π²ΡΠ΅Π³Π΄Π°) ΠΏΡΠΎΠ±Π΅Π»Ρ ΠΏΠΎΡΠ»Π΅ Π·Π°ΠΏΡΡΡΡ
, ΠΏΡΠΎΠ±Π΅Π»Ρ ΠΈ Π±ΠΎΠ»ΡΡΠΈΠ΅ Π±ΡΠΊΠ²Ρ ΠΏΠΎΡΠ»Π΅ ΡΠΎΡΠ΅ΠΊ* 2 Π±Π°Π»Π»Π° Π΅ΡΠ»ΠΈ ΠΎΠ½Π° Π²ΡΡΡΠΈΠ»Π° Π»Π΅ΠΊΡΠΈΠΊΡ * Π±ΠΎΠ»Π΅Π΅ ΠΏΠΎΠ»ΠΎΠ²ΠΈΠ½Ρ Π²ΡΡΡΠ΅Π½Π½ΡΡ
ΡΠ»ΠΎΠ² - ΠΎΡΡΠΎΠ³ΡΠ°ΡΠΈΡΠ΅ΡΠΊΠΈ ΠΏΡΠ°Π²ΠΈΠ»ΡΠ½ΡΠ΅* 2 Π±Π°Π»Π»Π° Π΅ΡΠ»ΠΈ ΠΎΠ½Π° Π²ΡΡΡΠΈΠ»Π° Π°Π·Ρ ΠΊΡΠ°ΠΌΠΌΠ°ΡΠΈΠΊΠΈ * Π² Π±ΠΎΠ»Π΅Π΅, ΡΠ΅ΠΌ ΠΏΠΎΠ»ΠΎΠ²ΠΈΠ½Π΅ ΡΠ»ΡΡΠ°Π΅Π² Π΄Π»Ρ ΠΏΠ°ΡΡ ΡΠ»ΠΎΠ² ΡΠ΅ΡΠΊΠ° Π²Π΅ΡΠ½ΠΎ ΡΠΎΡΠ΅ΡΠ°Π΅Ρ ΠΈΡ
ΡΠΎΠ΄/ΡΠΈΡΠ»ΠΎ/ΠΏΠ°Π΄Π΅ΠΆ ΠΠ΅ΠΊΠΎΡΠΎΡΡΠ΅ ΡΠΏΠΎΡΠΎΠ±Ρ ΠΏΠΎΠ»ΡΡΠΈΡΡ Π±ΠΎΠ½ΡΡΠ½ΡΠ΅ ΠΎΡΠΊΠΈ:* Π³Π΅Π½Π΅ΡΠ°ΡΠΈΡ ΡΠ²ΡΠ·Π½ΡΡ
ΠΏΡΠ΅Π΄Π»ΠΎΠΆΠ΅Π½ΠΈΠΉ (ΡΠ΅Π³ΠΎ Π²ΠΏΠΎΠ»Π½Π΅ ΠΌΠΎΠΆΠ½ΠΎ Π΄ΠΎΠ±ΠΈΡΡΡΡ)* ΠΏΠ΅ΡΠ΅Π½ΠΎΡ Π°ΡΡ
ΠΈΡΠ΅ΠΊΡΡΡΡ Π½Π° Π΄ΡΡΠ³ΠΎΠΉ Π΄Π°ΡΠ°ΡΠ΅Ρ (Π΄ΠΎΠΏΠΎΠ»Π½ΠΈΡΠ΅Π»ΡΠ½ΠΎ ΠΊ ΡΡΠΎΠΌΡ) * ΠΡΡΠ΅ ΠΠΎΠ»Π° ΠΡΡΠΌΠ° * Π’Π΅ΠΊΡΡΡ ΠΏΠ΅ΡΠ΅Π½ Π² Π»ΡΠ±ΠΈΠΌΠΎΠΌ ΠΆΠ°Π½ΡΠ΅ * Π‘ΡΠΈΡ
ΠΈ Π»ΡΠ±ΠΈΠΌΡΡ
Π°Π²ΡΠΎΡΠΎΠ² * ΠΠ°Π½ΠΈΠΈΠ» Π₯Π°ΡΠΌΡ * ΠΈΡΡ
ΠΎΠ΄Π½ΠΈΠΊΠΈ Linux ΠΈΠ»ΠΈ theano * Π·Π°Π³ΠΎΠ»ΠΎΠ²ΠΊΠΈ Π½Π΅ ΠΎΡΠ΅Π½Ρ Π΄ΠΎΠ±ΡΠΎΡΠΎΠ²Π΅ΡΡΠ½ΡΡ
Π½ΠΎΠ²ΠΎΡΡΠ½ΡΡ
Π±Π°Π½Π½Π΅ΡΠΎΠ² (clickbait) * Π΄ΠΈΠ°Π»ΠΎΠ³ΠΈ * LaTEX * Π»ΡΠ±Π°Ρ ΠΏΡΠΈΡ
ΠΎΡΡ Π±ΠΎΠ»ΡΠ½ΠΎΠΉ Π΄ΡΡΠΈ :)* Π½Π΅ΡΡΠ°Π½Π΄Π°ΡΡΠ½Π°Ρ ΠΈ ΡΡΡΠ΅ΠΊΡΠΈΠ²Π½Π°Ρ Π°ΡΡ
ΠΈΡΠ΅ΠΊΡΡΡΠ° ΡΠ΅ΡΠΈ* ΡΡΠΎ-ΡΠΎ Π»ΡΡΡΠ΅ Π±Π°Π·ΠΎΠ²ΠΎΠ³ΠΎ Π°Π»Π³ΠΎΡΠΈΡΠΌΠ° Π³Π΅Π½Π΅ΡΠ°ΡΠΈΠΈ (ΡΡΠΌΠΏΠ»ΠΈΠ½Π³Π°)* ΠΏΠ΅ΡΠ΅Π΄Π΅Π»Π°ΡΡ ΠΊΠΎΠ΄ ΡΠ°ΠΊ, ΡΡΠΎΠ±Ρ ΡΠ΅ΡΠΊΠ° ΡΡΠΈΠ»Π°ΡΡ ΠΏΡΠ΅Π΄ΡΠΊΠ°Π·ΡΠ²Π°ΡΡ ΡΠ»Π΅Π΄ΡΡΡΠΈΠΉ ΡΠΈΠΊ Π² ΠΊΠ°ΠΆΠ΄ΡΠΉ ΠΌΠΎΠΌΠ΅Π½Ρ Π²ΡΠ΅ΠΌΠ΅Π½ΠΈ, Π° Π½Π΅ ΡΠΎΠ»ΡΠΊΠΎ Π² ΠΊΠΎΠ½ΡΠ΅.* ΠΈ Ρ.ΠΏ. ΠΡΠΎΡΠΈΡΠ°Π΅ΠΌ ΠΊΠΎΡΠΏΡΡ* Π ΠΊΠ°ΡΠ΅ΡΡΠ²Π΅ ΠΎΠ±ΡΡΠ°ΡΡΠ΅ΠΉ Π²ΡΠ±ΠΎΡΠΊΠΈ Π±ΡΠ»ΠΎ ΡΠ΅ΡΠ΅Π½ΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ ΡΡΡΠ΅ΡΡΠ²ΡΡΡΠΈΠ΅ Π·Π°ΠΊΠΎΠ½Ρ, ΠΈΠ·Π²Π΅ΡΡΠ½ΡΠ΅ ΠΊΠ°ΠΊ ΠΡΠ°ΠΆΠ΄Π°Π½ΡΠΊΠΈΠΉ, Π£Π³ΠΎΠ»ΠΎΠ²Π½ΡΠΉ, Π‘Π΅ΠΌΠ΅ΠΉΠ½ΡΠΉ ΠΈ Π΅ΡΡ Ρ
ΡΠ΅Π½ Π·Π½Π°Π΅Ρ ΠΊΠ°ΠΊΠΈΠ΅ ΠΊΠΎΠ΄Π΅ΠΊΡΡ Π Π€. |
#ΡΡΡ Π±ΡΠ΄Π΅Ρ ΡΠ΅ΠΊΡΡ
corpora = ""
for fname in os.listdir("codex"):
import sys
if sys.version_info >= (3,0):
with open("codex/"+fname, encoding='cp1251') as fin:
text = fin.read() #If you are using your own corpora, make sure it's read correctly
corpora += text
else:
with open("codex/"+fname) as fin:
text = fin.read().decode('cp1251') #If you are using your own corpora, make sure it's read correctly
corpora += text
#ΡΡΡ Π±ΡΠ΄ΡΡ Π²ΡΠ΅ ΡΠ½ΠΈΠΊΠ°Π»ΡΠ½ΡΠ΅ ΡΠΎΠΊΠ΅Π½Ρ (Π±ΡΠΊΠ²Ρ, ΡΠΈΡΡΡ)
tokens = <ΠΡΠ΅ ΡΠ½ΠΈΠΊΠ°Π»ΡΠ½ΡΠ΅ ΡΠΈΠΌΠ²ΠΎΠ»Ρ Π² ΡΠ΅ΠΊΡΡΠ΅>
tokens = list(tokens)
#ΠΏΡΠΎΠ²Π΅ΡΠΊΠ° Π½Π° ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²ΠΎ ΡΠ°ΠΊΠΈΡ
ΡΠΈΠΌΠ²ΠΎΠ»ΠΎΠ². ΠΡΠΎΠ²Π΅ΡΠ΅Π½ΠΎ Π½Π° Python 2.7.11 Ubuntux64.
#ΠΠΎΠΆΠ΅Ρ ΠΎΡΠ»ΠΈΡΠ°ΡΡΡΡ Π½Π° Π΄ΡΡΠ³ΠΈΡ
ΠΏΠ»Π°ΡΡΠΎΡΠΌΠ°Ρ
, Π½ΠΎ Π½Π΅ ΡΠΈΠ»ΡΠ½ΠΎ.
#ΠΡΠ»ΠΈ ΡΡΠΎ Π²Π°Ρ ΡΠ»ΡΡΠ°ΠΉ, ΠΈ Π²Ρ ΡΠ²Π΅ΡΠ΅Π½Ρ, ΡΡΠΎ corpora - ΡΡΡΠΎΠΊΠ° unicode - ΡΠΌΠ΅Π»ΠΎ ΡΠ±ΠΈΡΠ°ΠΉΡΠ΅ assert
assert len(tokens) == 102
token_to_id = ΡΠ»ΠΎΠ²Π°ΡΡ ΡΠΈΠΌΠ²ΠΎΠ»-> Π΅Π³ΠΎ Π½ΠΎΠΌΠ΅Ρ
id_to_token = ΡΠ»ΠΎΠ²Π°ΡΡ Π½ΠΎΠΌΠ΅Ρ ΡΠΈΠΌΠ²ΠΎΠ»Π° -> ΡΠ°ΠΌ ΡΠΈΠΌΠ²ΠΎΠ»
#ΠΡΠ΅ΠΎΠ±ΡΠ°Π·ΡΠ΅ΠΌ Π²ΡΡ Π² ΡΠΎΠΊΠ΅Π½Ρ
corpora_ids = <ΠΎΠ΄Π½ΠΎΠΌΠ΅ΡΠ½ΡΠΉ ΠΌΠ°ΡΡΠΈΠ² ΠΈΠ· ΡΠΈΡΠ΅Π», Π³Π΄Π΅ i-ΡΠΎΠ΅ ΡΠΈΡΠ»ΠΎ ΡΠΎΠΎΡΠ²Π΅ΡΡΠ²ΡΠ΅Ρ ΡΠΈΠΌΠ²ΠΎΠ»Ρ Π½Π° i-ΡΠΎΠΌ ΠΌΠ΅ΡΡΠ΅ Π² ΡΡΡΠΎΠΊΠ΅ corpora
def sample_random_batches(source,n_batches=10, seq_len=20):
"""Π€ΡΠ½ΠΊΡΠΈΡ, ΠΊΠΎΡΠΎΡΠ°Ρ Π²ΡΠ±ΠΈΡΠ°Π΅Ρ ΡΠ»ΡΡΠ°ΠΉΠ½ΡΠ΅ ΡΡΠ΅Π½ΠΈΡΠΎΠ²ΠΎΡΠ½ΡΠ΅ ΠΏΡΠΈΠΌΠ΅ΡΡ ΠΈΠ· ΠΊΠΎΡΠΏΡΡΠ° ΡΠ΅ΠΊΡΡΠ° Π² ΡΠΎΠΊΠ΅Π½ΠΈΠ·ΠΈΡΠΎΠ²Π°Π½Π½ΠΎΠΌ ΡΠΎΡΠΌΠ°ΡΠ΅.
source - ΠΌΠ°ΡΡΠΈΠ² ΡΠ΅Π»ΡΡ
ΡΠΈΡΠ΅Π» - Π½ΠΎΠΌΠ΅ΡΠΎΠ² ΡΠΎΠΊΠ΅Π½ΠΎΠ² Π² ΠΊΠΎΡΠΏΡΡΠ΅ (ΠΏΡΠΈΠΌΠ΅Ρ - corpora_ids)
n_batches - ΠΊΠΎΠ»ΠΈΡΠ΅ΡΡΠ²ΠΎ ΡΠ»ΡΡΠ°ΠΉΠ½ΡΡ
ΠΏΠΎΠ΄ΡΡΡΠΎΠΊ, ΠΊΠΎΡΠΎΡΡΠ΅ Π½ΡΠΆΠ½ΠΎ Π²ΡΠ±ΡΠ°ΡΡ
seq_len - Π΄Π»ΠΈΠ½Π° ΠΎΠ΄Π½ΠΎΠΉ ΠΏΠΎΠ΄ΡΡΡΠΎΠΊΠΈ Π±Π΅Π· ΡΡΡΡΠ° ΠΎΡΠ²Π΅ΡΠ°
ΠΠ΅ΡΠ½ΡΡΡ Π½ΡΠΆΠ½ΠΎ ΠΊΠΎΡΡΠ΅ΠΆ (X,y), Π³Π΄Π΅
X - ΠΌΠ°ΡΡΠΈΡΠ°, Π² ΠΊΠΎΡΠΎΡΠΎΠΉ ΠΊΠ°ΠΆΠ΄Π°Ρ ΡΡΡΠΎΠΊΠ° - ΠΏΠΎΠ΄ΡΡΡΠΎΠΊΠ° Π΄Π»ΠΈΠ½Ρ [seq_len].
y - Π²Π΅ΠΊΡΠΎΡ, Π² ΠΊΠΎΡΠΎΡΠΎΠΌ i-ΡΠΎΠ΅ ΡΠΈΡΠ»ΠΎ - ΡΠΈΠΌΠ²ΠΎΠ» ΡΠ»Π΅Π΄ΡΡΡΠΈΠΉ Π² ΡΠ΅ΠΊΡΡΠ΅ ΡΡΠ°Π·Ρ ΠΏΠΎΡΠ»Π΅ i-ΡΠΎΠΉ ΡΡΡΠΎΠΊΠΈ ΠΌΠ°ΡΡΠΈΡΡ X
ΠΡΠΎΡΠ΅ Π²ΡΠ΅Π³ΠΎ Π΄Π»Ρ ΡΡΠΎΠ³ΠΎ ΡΠ½Π°ΡΠ°Π»Π° ΡΠΎΠ·Π΄Π°ΡΡ ΠΌΠ°ΡΡΠΈΡΡ ΠΈΠ· ΡΡΡΠΎΠΊ Π΄Π»ΠΈΠ½Ρ seq_len+1,
Π° ΠΏΠΎΡΠΎΠΌ ΠΎΡΠΏΠΈΠ»ΠΈΡΡ ΠΎΡ Π½Π΅Ρ ΠΏΠΎΡΠ»Π΅Π΄Π½ΠΈΠΉ ΡΡΠΎΠ»Π±Π΅Ρ Π² y, Π° Π²ΡΠ΅ ΠΎΡΡΠ°Π»ΡΠ½ΡΠ΅ - Π² X
ΠΡΠ»ΠΈ Π΄Π΅Π»Π°Π΅ΡΠ΅ ΠΈΠ½Π°ΡΠ΅ - ΠΏΠΎΠΆΠ°Π»ΡΠΉΡΡΠ°, ΡΠ±Π΅Π΄ΠΈΡΠ΅ΡΡ, ΡΡΠΎ Π² Ρ ΠΏΠΎΠΏΠ°Π΄Π°Π΅Ρ ΠΏΡΠ°Π²ΠΈΠ»ΡΠ½ΡΠΉ ΡΠΈΠΌΠ²ΠΎΠ», ΠΈΠ±ΠΎ ΠΏΠΎΠ·ΠΆΠ΅ ΡΡΡ ΠΎΡΠΈΠ±ΠΊΡ
Π±ΡΠ΄Π΅Ρ ΠΎΡΠ΅Π½Ρ ΡΡΠΆΠ΅Π»ΠΎ Π·Π°ΠΌΠ΅ΡΠΈΡΡ.
Π’Π°ΠΊΠΆΠ΅ ΡΠ±Π΅Π΄ΠΈΡΠ΅ΡΡ, ΡΡΠΎ Π²Π°ΡΠ° ΡΡΠ½ΠΊΡΠΈΡ Π½Π΅ Π²ΡΠ»Π΅Π·Π°Π΅Ρ Π·Π° ΠΊΡΠ°ΠΉ ΡΠ΅ΠΊΡΡΠ° (ΡΠ°ΠΌΠΎΠ΅ Π½Π°ΡΠ°Π»ΠΎ ΠΈΠ»ΠΈ ΠΊΠΎΠ½Π΅Ρ ΡΠ΅ΠΊΡΡΠ°).
Π‘Π»Π΅Π΄ΡΡΡΠ°Ρ ΠΊΠ»Π΅ΡΠΊΠ° ΠΏΡΠΎΠ²Π΅ΡΡΠ΅Ρ ΡΠ°ΡΡΡ ΡΡΠΈΡ
ΠΎΡΠΈΠ±ΠΎΠΊ, Π½ΠΎ Π½Π΅ Π²ΡΠ΅.
"""
return X_batch, y_batch
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
ΠΠΎΠ½ΡΡΠ°Π½ΡΡ | #Π΄Π»ΠΈΠ½Π° ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅ΡΠ½ΠΎΡΡΠΈ ΠΏΡΠΈ ΠΎΠ±ΡΡΠ΅Π½ΠΈΠΈ (ΠΊΠ°ΠΊ Π΄Π°Π»Π΅ΠΊΠΎ ΡΠ°ΡΠΏΡΠΎΡΡΡΠ°Π½ΡΡΡΡΡ Π³ΡΠ°Π΄ΠΈΠ΅Π½ΡΡ Π² BPTT)
seq_length = Π΄Π»ΠΈΠ½Π° ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅Π»ΡΠ½ΠΎΡΡΠΈ. ΠΡ Π±Π°Π»Π΄Ρ - 10, Π½ΠΎ ΡΡΠΎ Π½Π΅ ΠΈΠ΄Π΅Π°Π»ΡΠ½ΠΎ
#Π»ΡΡΡΠ΅ Π½Π°ΡΠ°ΡΡ Ρ ΠΌΠ°Π»ΠΎΠ³ΠΎ (ΡΠΊΠ°ΠΆΠ΅ΠΌ, 5) ΠΈ ΡΠ²Π΅Π»ΠΈΡΠΈΠ²Π°ΡΡ ΠΏΠΎ ΠΌΠ΅ΡΠ΅ ΡΠΎΠ³ΠΎ, ΠΊΠ°ΠΊ ΡΠ΅ΡΠΊΠ° Π²ΡΡΡΠΈΠ²Π°Π΅Ρ Π±Π°Π·ΠΎΠ²ΡΠ΅ Π²Π΅ΡΠΈ. 10 - Π΄Π°Π»Π΅ΠΊΠΎ Π½Π΅ ΠΏΡΠ΅Π΄Π΅Π».
# ΠΠ°ΠΊΡΠΈΠΌΠ°Π»ΡΠ½ΡΠΉ ΠΌΠΎΠ΄ΡΠ»Ρ Π³ΡΠ°Π΄ΠΈΠ΅Π½ΡΠ°
grad_clip = 100
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
ΠΡ
ΠΎΠ΄Π½ΡΠ΅ ΠΏΠ΅ΡΠ΅ΠΌΠ΅Π½Π½ΡΠ΅ | input_sequence = T.matrix('input sequence','int32')
target_values = T.ivector('target y')
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
Π‘ΠΎΠ±Π΅ΡΡΠΌ Π½Π΅ΠΉΡΠΎΡΠ΅ΡΡΠΠ°ΠΌ Π½ΡΠΆΠ½ΠΎ ΡΠΎΠ·Π΄Π°ΡΡ Π½Π΅ΠΉΡΠΎΡΠ΅ΡΡ, ΠΊΠΎΡΠΎΡΠ°Ρ ΠΏΡΠΈΠ½ΠΈΠΌΠ°Π΅Ρ Π½Π° Π²Ρ
ΠΎΠ΄ ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅Π»ΡΠ½ΠΎΡΡΡ ΠΈΠ· seq_length ΡΠΎΠΊΠ΅Π½ΠΎΠ², ΠΎΠ±ΡΠ°Π±Π°ΡΡΠ²Π°Π΅Ρ ΠΈΡ
ΠΈ Π²ΡΠ΄Π°ΡΡ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΠΈ Π΄Π»Ρ seq_len+1-ΠΎΠ³ΠΎ ΡΠΎΠΊΠ΅Π½Π°.ΠΠ±ΡΠΈΠΉ ΡΠ°Π±Π»ΠΎΠ½ Π°ΡΡ
ΠΈΡΠ΅ΠΊΡΡΡΡ ΡΠ°ΠΊΠΎΠΉ ΡΠ΅ΡΠΈ -* ΠΡ
ΠΎΠ΄* ΠΠ±ΡΠ°Π±ΠΎΡΠΊΠ° Π²Ρ
ΠΎΠ΄Π°* Π Π΅ΠΊΡΡΡΠ΅Π½ΡΠ½Π°Ρ Π½Π΅ΠΉΡΠΎΡΠ΅ΡΡ* ΠΡΡΠ΅Π·Π°Π½ΠΈΠ΅ ΠΏΠΎΡΠ»Π΅Π΄Π½Π΅Π³ΠΎ ΡΠΎΡΡΠΎΡΠ½ΠΈΡ* ΠΠ±ΡΡΠ½Π°Ρ Π½Π΅ΠΉΡΠΎΡΠ΅ΡΡ* ΠΡΡ
ΠΎΠ΄Π½ΠΎΠΉ ΡΠ»ΠΎΠΉ, ΠΊΠΎΡΠΎΡΡΠΉ ΠΏΡΠ΅Π΄ΡΠΊΠ°Π·ΡΠ²Π°Π΅Ρ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΠΈ Π²Π΅ΡΠΎΠ².ΠΠ»Ρ ΠΎΠ±ΡΠ°Π±ΠΎΡΠΊΠΈ Π²Ρ
ΠΎΠ΄Π½ΡΡ
Π΄Π°Π½Π½ΡΡ
ΠΌΠΎΠΆΠ½ΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ Π»ΠΈΠ±ΠΎ EmbeddingLayer (ΡΠΌ. ΠΏΡΠΎΡΠ»ΡΠΉ ΡΠ΅ΠΌΠΈΠ½Π°Ρ)ΠΠ°ΠΊ Π°Π»ΡΡΠ΅ΡΠ½Π°ΡΠΈΠ²Π° - ΠΌΠΎΠΆΠ½ΠΎ ΠΏΡΠΎΡΡΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ One-hot ΡΠ½ΠΊΠΎΠ΄Π΅Ρ```Π‘ΠΊΠ΅ΡΡ one-hot ΡΠ½ΠΊΠΎΠ΄Π΅ΡΠ°def to_one_hot(seq_matrix): input_ravel = seq_matrix.reshape([-1]) input_one_hot_ravel = T.extra_ops.to_one_hot(input_ravel, len(tokens)) sh=input_sequence.shape input_one_hot = input_one_hot_ravel.reshape([sh[0],sh[1],-1,],ndim=3) return input_one_hot ΠΌΠΎΠΆΠ½ΠΎ ΠΏΡΠΈΠΌΠ΅Π½ΠΈΡΡ ΠΊ input_sequence - ΠΏΡΠΈ ΡΡΠΎΠΌ Π² input ΡΠ»ΠΎΠ΅ ΡΠ΅ΡΠΈ Π½ΡΠΆΠ½ΠΎ ΠΈΠ·ΠΌΠ΅Π½ΠΈΡΡ ΡΠΎΡΠΌΡ. ΡΠ°ΠΊΠΆΠ΅ ΠΌΠΎΠΆΠ½ΠΎ ΡΠ΄Π΅Π»Π°ΡΡ ΠΈΠ· Π½Π΅Π³ΠΎ ExpressionLayer(Π²Ρ
ΠΎΠ΄Π½ΠΎΠΉ_ΡΠ»ΠΎΠΉ, to_one_hot) - ΡΠΎΠ³Π΄Π° ΡΠΎΡΠΌΡ ΠΌΠ΅Π½ΡΡΡ Π½Π΅ Π½ΡΠΆΠ½ΠΎ```Π§ΡΠΎΠ±Ρ Π²ΡΡΠ΅Π·Π°ΡΡ ΠΏΠΎΡΠ»Π΅Π΄Π½Π΅Π΅ ΡΠΎΡΡΠΎΡΠ½ΠΈΠ΅ ΡΠ΅ΠΊΡΡΡΠ΅Π½ΡΠ½ΠΎΠ³ΠΎ ΡΠ»ΠΎΡ, ΠΌΠΎΠΆΠ½ΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ ΠΎΠ΄Π½ΠΎ ΠΈΠ· Π΄Π²ΡΡ
:* `lasagne.layers.SliceLayer(rnn, -1, 1)`* only_return_final=True Π² ΠΏΠ°ΡΠ°ΠΌΠ΅ΡΡΠ°Ρ
ΡΠ»ΠΎΡ |
l_in = lasagne.layers.InputLayer(shape=(None, None),input_var=input_sequence)
ΠΠ°ΡΠ° Π½Π΅ΠΉΡΠΎΠ½ΠΊΠ° (ΡΠΌ Π²ΡΡΠ΅)
l_out = ΠΏΠΎΡΠ»Π΅Π΄Π½ΠΈΠΉ ΡΠ»ΠΎΠΉ, Π²ΠΎΠ·Π²ΡΠ°ΡΠ°ΡΡΠΈΠΉ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΠΈ Π΄Π»Ρ Π²ΡΠ΅Ρ
len(tokens) Π²Π°ΡΠΈΠ°Π½ΡΠΎΠ² Π΄Π»Ρ y
# ΠΠ΅ΡΠ° ΠΌΠΎΠ΄Π΅Π»ΠΈ
weights = lasagne.layers.get_all_params(l_out,trainable=True)
print weights
network_output = ΠΡΡ
ΠΎΠ΄ Π½Π΅ΠΉΡΠΎΡΠ΅ΡΠΈ
#Π΅ΡΠ»ΠΈ Π²Ρ ΠΈΡΠΏΠΎΠ»ΡΠ·ΡΠ΅ΡΠ΅ Π΄ΡΠΎΠΏΠ°ΡΡ - Π½Π΅ Π·Π°Π±ΡΠ΄ΡΡΠ΅ ΠΏΡΠΎΠ΄ΡΠ±Π»ΠΈΡΠΎΠ²Π°ΡΡ Π²ΡΡ Π² ΡΠ΅ΠΆΠΈΠΌΠ΅ deterministic=True
loss = Π€ΡΠ½ΠΊΡΠΈΡ ΠΏΠΎΡΠ΅ΡΡ - ΠΌΠΎΠΆΠ½ΠΎ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°ΡΡ ΠΏΡΠΎΡΡΡΡ ΠΊΡΠΎΡΡΡΠ½ΡΡΠΎΠΏΠΈΡ.
updates = ΠΠ°Ρ Π»ΡΠ±ΠΈΠ²ΡΠΉ ΡΠΈΡΠ»Π΅Π½Π½ΡΠΉ ΠΌΠ΅ΡΠΎΠ΄
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
ΠΠΎΠΌΠΏΠΈΠ»ΠΈΡΡΠ΅ΠΌ Π²ΡΡΠΊΠΎΠ΅-ΡΠ°Π·Π½ΠΎΠ΅ |
#ΠΎΠ±ΡΡΠ΅Π½ΠΈΠ΅
train = theano.function([input_sequence, target_values], loss, updates=updates, allow_input_downcast=True)
#ΡΡΠ½ΠΊΡΠΈΡ ΠΏΠΎΡΠ΅ΡΡ Π±Π΅Π· ΠΎΠ±ΡΡΠ΅Π½ΠΈΡ
compute_cost = theano.function([input_sequence, target_values], loss, allow_input_downcast=True)
# ΠΠ΅ΡΠΎΡΡΠ½ΠΎΡΡΠΈ Ρ Π²ΡΡ
ΠΎΠ΄Π° ΡΠ΅ΡΠΈ
probs = theano.function([input_sequence],network_output,allow_input_downcast=True)
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
ΠΠ΅Π½Π΅ΡΠΈΡΡΠ΅ΠΌ ΡΠ²ΠΎΠΈ Π·Π°ΠΊΠΎΠ½Ρ* ΠΠ»Ρ ΡΡΠΎΠ³ΠΎ ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°ΡΠ΅Π»ΡΠ½ΠΎ ΠΏΡΠΈΠΌΠ΅Π½ΡΠ΅ΠΌ Π½Π΅ΠΉΡΠΎΠ½ΠΊΡ ΠΊ ΡΠ²ΠΎΠ΅ΠΌΡ ΠΆΠ΅ Π²ΡΠ²ΠΎΠ΄Ρ.* ΠΠ΅Π½Π΅ΡΠΈΡΠΎΠ²Π°ΡΡ ΠΌΠΎΠΆΠ½ΠΎ ΠΏΠΎ ΡΠ°Π·Π½ΠΎΠΌΡ - * ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ ΠΏΡΠΎΠΏΠΎΡΡΠΈΠΎΠ½Π°Π»ΡΠ½ΠΎ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΠΈ, * ΡΠΎΠ»ΡΠΊΠΎ ΡΠ»ΠΎΠ²Π° ΠΌΠ°ΠΊΡΠΈΠΌΠ°Π»ΡΠ½ΠΎΠΉ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΡ * ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ, ΠΏΡΠΎΠΏΠΎΡΡΠΈΠΎΠ½Π°Π»ΡΠ½ΠΎ softmax(probas*alpha), Π³Π΄Π΅ alpha - "ΠΆΠ°Π΄Π½ΠΎΡΡΡ" | def max_sample_fun(probs):
return np.argmax(probs)
def proportional_sample_fun(probs)
"""Π‘Π³Π΅Π½Π΅ΡΠΈΡΠΎΠ²Π°ΡΡ ΡΠ»Π΅Π΄ΡΡΡΠΈΠΉ ΡΠΎΠΊΠ΅Π½ (int32) ΠΏΠΎ ΠΏΡΠ΅Π΄ΡΠΊΠ°Π·Π°Π½Π½ΡΠΌ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠΌ.
probs - ΠΌΠ°ΡΡΠΈΠ² Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΠ΅ΠΉ Π΄Π»Ρ ΠΊΠ°ΠΆΠ΄ΠΎΠ³ΠΎ ΡΠΎΠΊΠ΅Π½Π°
ΠΡΠΆΠ½ΠΎ Π²Π΅ΡΠ½ΡΡΡ ΠΎΠ΄Π½ΠΎ ΡΠ΅Π»ΠΎΠ²Π΅ ΡΠΈΡΠ»ΠΎ - Π²ΡΠ±ΡΠ°Π½Π½ΡΠΉ ΡΠΎΠΊΠ΅Π½ - ΠΏΡΠΎΠΏΠΎΡΡΠΈΠΎΠ½Π°Π»ΡΠ½ΠΎ Π²Π΅ΡΠΎΡΡΠ½ΠΎΡΡΡΠΌ
"""
return Π½ΠΎΠΌΠ΅Ρ Π²ΡΠ±ΡΠ°Π½Π½ΠΎΠ³ΠΎ ΡΠ»ΠΎΠ²Π°
# The next function generates text given a phrase of length at least SEQ_LENGTH.
# The phrase is set using the variable generation_phrase.
# The optional input "N" is used to set the number of characters of text to predict.
def generate_sample(sample_fun,seed_phrase=None,N=200):
'''
Π‘Π³Π΅Π½Π΅ΡΠΈΡΠΎΠ²Π°ΡΡ ΡΠ»ΡΡΠ°ΠΉΠ½ΡΠΉ ΡΠ΅ΠΊΡΡ ΠΏΡΠΈ ΠΏΠΎΠΌΠΎΡΠΈ ΡΠ΅ΡΠΈ
sample_fun - ΡΡΠ½ΠΊΡΠΈΡ, ΠΊΠΎΡΠΎΡΠ°Ρ Π²ΡΠ±ΠΈΡΠ°Π΅Ρ ΡΠ»Π΅Π΄ΡΡΡΠΈΠΉ ΡΠ³Π΅Π½Π΅ΡΠΈΡΠΎΠ²Π°Π½Π½ΡΠΉ ΡΠΎΠΊΠ΅Π½
seed_phrase - ΡΡΠ°Π·Π°, ΠΊΠΎΡΠΎΡΡΡ ΡΠ΅ΡΡ Π΄ΠΎΠ»ΠΆΠ½Π° ΠΏΡΠΎΠ΄ΠΎΠ»ΠΆΠΈΡΡ. ΠΡΠ»ΠΈ None - ΡΡΠ°Π·Π° Π²ΡΠ±ΠΈΡΠ°Π΅ΡΡΡ ΡΠ»ΡΡΠ°ΠΉΠ½ΠΎ ΠΈΠ· corpora
N - ΡΠ°Π·ΠΌΠ΅Ρ ΡΠ³Π΅Π½Π΅ΡΠΈΡΠΎΠ²Π°Π½Π½ΠΎΠ³ΠΎ ΡΠ΅ΠΊΡΡΠ°.
'''
if seed_phrase is None:
start = np.random.randint(0,len(corpora)-seq_length)
seed_phrase = corpora[start:start+seq_length]
print "Using random seed:",seed_phrase
while len(seed_phrase) < seq_length:
seed_phrase = " "+seed_phrase
if len(seed_phrase) > seq_length:
seed_phrase = seed_phrase[len(seed_phrase)-seq_length:]
assert type(seed_phrase) is unicode
sample_ix = []
x = map(lambda c: token_to_id.get(c,0), seed_phrase)
x = np.array([x])
for i in range(N):
# Pick the character that got assigned the highest probability
ix = sample_fun(probs(x).ravel())
# Alternatively, to sample from the distribution instead:
# ix = np.random.choice(np.arange(vocab_size), p=probs(x).ravel())
sample_ix.append(ix)
x[:,0:seq_length-1] = x[:,1:]
x[:,seq_length-1] = 0
x[0,seq_length-1] = ix
random_snippet = seed_phrase + ''.join(id_to_token[ix] for ix in sample_ix)
print("----\n %s \n----" % random_snippet)
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
ΠΠ±ΡΡΠ΅Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»ΠΈΠ ΠΊΠΎΡΠΎΡΠΎΠΌ Π²Ρ ΠΌΠΎΠΆΠ΅ΡΠ΅ ΠΏΠΎΠ΄ΡΡΠ³Π°ΡΡ ΠΏΠ°ΡΠ°ΠΌΠ΅ΡΡΡ ΠΈΠ»ΠΈ Π²ΡΡΠ°Π²ΠΈΡΡ ΡΠ²ΠΎΡ Π³Π΅Π½Π΅ΡΠΈΡΡΡΡΡΡ ΡΡΠ½ΠΊΡΠΈΡ. |
print("Training ...")
#ΡΠΊΠΎΠ»ΡΠΊΠΎ Π²ΡΠ΅Π³ΠΎ ΡΠΏΠΎΡ
n_epochs=100
# ΡΠ°Π· Π² ΡΠΊΠΎΠ»ΡΠΊΠΎ ΡΠΏΠΎΡ
ΠΏΠ΅ΡΠ°ΡΠ°ΡΡ ΠΏΡΠΈΠΌΠ΅ΡΡ
batches_per_epoch = 1000
#ΡΠΊΠΎΠ»ΡΠΊΠΎ ΡΠ΅ΠΏΠΎΡΠ΅ΠΊ ΠΎΠ±ΡΠ°Π±Π°ΡΡΠ²Π°ΡΡ Π·Π° 1 Π²ΡΠ·ΠΎΠ² ΡΡΠ½ΠΊΡΠΈΠΈ ΠΎΠ±ΡΡΠ΅Π½ΠΈΡ
batch_size=100
for epoch in xrange(n_epochs):
print "ΠΠ΅Π½Π΅ΡΠΈΡΡΠ΅ΠΌ ΡΠ΅ΠΊΡΡ Π² ΠΏΡΠΎΠΏΠΎΡΡΠΈΠΎΠ½Π°Π»ΡΠ½ΠΎΠΌ ΡΠ΅ΠΆΠΈΠΌΠ΅"
generate_sample(proportional_sample_fun,None)
print "ΠΠ΅Π½Π΅ΡΠΈΡΡΠ΅ΠΌ ΡΠ΅ΠΊΡΡ Π² ΠΆΠ°Π΄Π½ΠΎΠΌ ΡΠ΅ΠΆΠΈΠΌΠ΅ (Π½Π°ΠΈΠ±ΠΎΠ»Π΅Π΅ Π²Π΅ΡΠΎΡΡΠ½ΡΠ΅ Π±ΡΠΊΠ²Ρ)"
generate_sample(max_sample_fun,None)
avg_cost = 0;
for _ in range(batches_per_epoch):
x,y = sample_random_batches(corpora_ids,batch_size,seq_length)
avg_cost += train(x, y[:,0])
print("Epoch {} average loss = {}".format(epoch, avg_cost / batches_per_epoch))
| _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
A chance to speed up training and get bonus score* Try predicting next token probas at ALL ticks (like in the seminar part)* much more objectives, much better gradients* You may want to zero-out loss for first several iterations ΠΠΎΠ½ΡΡΠΈΡΡΡΠΈΡ Π½ΠΎΠ²ΠΎΠ³ΠΎ ΠΌΠΈΡΠΎΠ²ΠΎΠ³ΠΎ ΠΏΡΠ°Π²ΠΈΡΠ΅Π»ΡΡΡΠ²Π° | seed = u"ΠΠ°ΠΆΠ΄ΡΠΉ ΡΠ΅Π»ΠΎΠ²Π΅ΠΊ Π΄ΠΎΠ»ΠΆΠ΅Π½"
sampling_fun = proportional_sample_fun
result_length = 300
generate_sample(sampling_fun,seed,result_length)
seed = u"Π ΡΠ»ΡΡΠ°Π΅ Π½Π΅ΠΏΠΎΠ²ΠΈΠ½ΠΎΠ²Π΅Π½ΠΈΡ"
sampling_fun = proportional_sample_fun
result_length = 300
generate_sample(sampling_fun,seed,result_length)
Π Π΄Π°Π»Π΅Π΅ ΠΏΠΎ ΡΠΏΠΈΡΠΊΡ | _____no_output_____ | MIT | week4/week4-ru - RNN - homework part.ipynb | LatatyeS/HSE_deeplearning |
*Accompanying code examples of the book "Introduction to Artificial Neural Networks and Deep Learning: A Practical Guide with Applications in Python" by [Sebastian Raschka](https://sebastianraschka.com). All code examples are released under the [MIT license](https://github.com/rasbt/deep-learning-book/blob/master/LICENSE). If you find this content useful, please consider supporting the work by buying a [copy of the book](https://leanpub.com/ann-and-deeplearning).* Other code examples and content are available on [GitHub](https://github.com/rasbt/deep-learning-book). The PDF and ebook versions of the book are available through [Leanpub](https://leanpub.com/ann-and-deeplearning). | %load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p tensorflow | Sebastian Raschka
CPython 3.6.1
IPython 6.0.0
tensorflow 1.2.0
| MIT | tensorflow1_ipynb/gan/gan.ipynb | tranhoangkhuongvn/deeplearning-models |
Model Zoo -- General Adversarial Networks Implementation of General Adversarial Nets (GAN) where both the discriminator and generator are multi-layer perceptrons with one hidden layer only. In this example, the GAN generator was trained to generate MNIST images.Uses- samples from a random normal distribution (range [-1, 1])- dropout- leaky relus- ~~batch normalization~~ [performs worse here]- separate batches for "fake" and "real" images (where the labels are 1 = real images, 0 = fake images)- MNIST images normalized to [-1, 1] range- generator with tanh output | import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import pickle as pkl
tf.test.gpu_device_name()
### Abbreviatiuons
# dis_*: discriminator network
# gen_*: generator network
########################
### Helper functions
########################
def leaky_relu(x, alpha=0.0001):
return tf.maximum(alpha * x, x)
########################
### DATASET
########################
mnist = input_data.read_data_sets('MNIST_data')
#########################
### SETTINGS
#########################
# Hyperparameters
learning_rate = 0.001
training_epochs = 100
batch_size = 64
dropout_rate = 0.5
# Other settings
print_interval = 200
# Architecture
dis_input_size = 784
gen_input_size = 100
dis_hidden_size = 128
gen_hidden_size = 128
#########################
### GRAPH DEFINITION
#########################
g = tf.Graph()
with g.as_default():
# Placeholders for settings
dropout = tf.placeholder(tf.float32, shape=None, name='dropout')
is_training = tf.placeholder(tf.bool, shape=None, name='is_training')
# Input data
dis_x = tf.placeholder(tf.float32, shape=[None, dis_input_size], name='discriminator_input')
gen_x = tf.placeholder(tf.float32, [None, gen_input_size], name='generator_input')
##################
# Generator Model
##################
with tf.variable_scope('generator'):
# linear -> ~~batch norm~~ -> leaky relu -> dropout -> tanh output
gen_hidden = tf.layers.dense(inputs=gen_x, units=gen_hidden_size,
activation=None)
#gen_hidden = tf.layers.batch_normalization(gen_hidden, training=is_training)
gen_hidden = leaky_relu(gen_hidden)
gen_hidden = tf.layers.dropout(gen_hidden, rate=dropout_rate)
gen_logits = tf.layers.dense(inputs=gen_hidden, units=dis_input_size,
activation=None)
gen_out = tf.tanh(gen_logits, 'generator_output')
######################
# Discriminator Model
######################
def build_discriminator_graph(input_x, reuse=None):
# linear -> ~~batch norm~~ -> leaky relu -> dropout -> sigmoid output
with tf.variable_scope('discriminator', reuse=reuse):
hidden = tf.layers.dense(inputs=input_x, units=dis_hidden_size,
activation=None)
#hidden = tf.layers.batch_normalization(hidden, training=is_training)
hidden = leaky_relu(hidden)
hidden = tf.layers.dropout(hidden, rate=dropout_rate)
logits = tf.layers.dense(inputs=hidden, units=1, activation=None)
out = tf.sigmoid(logits)
return logits, out
# Create a discriminator for real data and a discriminator for fake data
dis_real_logits, dis_real_out = build_discriminator_graph(dis_x, reuse=False)
dis_fake_logits, dis_fake_out = build_discriminator_graph(gen_out, reuse=True)
#####################################
# Generator and Discriminator Losses
#####################################
# Two discriminator cost components: loss on real data + loss on fake data
# Real data has class label 0, fake data has class label 1
dis_real_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_real_logits,
labels=tf.zeros_like(dis_real_logits))
dis_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_fake_logits,
labels=tf.ones_like(dis_fake_logits))
dis_cost = tf.add(tf.reduce_mean(dis_fake_loss),
tf.reduce_mean(dis_real_loss),
name='discriminator_cost')
# Generator cost: difference between dis. prediction and label "0" for real images
gen_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=dis_fake_logits,
labels=tf.zeros_like(dis_fake_logits))
gen_cost = tf.reduce_mean(gen_loss, name='generator_cost')
#########################################
# Generator and Discriminator Optimizers
#########################################
dis_optimizer = tf.train.AdamOptimizer(learning_rate)
dis_train_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')
dis_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='discriminator')
with tf.control_dependencies(dis_update_ops): # required to upd. batch_norm params
dis_train = dis_optimizer.minimize(dis_cost, var_list=dis_train_vars,
name='train_discriminator')
gen_optimizer = tf.train.AdamOptimizer(learning_rate)
gen_train_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='generator')
gen_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='generator')
with tf.control_dependencies(gen_update_ops): # required to upd. batch_norm params
gen_train = gen_optimizer.minimize(gen_cost, var_list=gen_train_vars,
name='train_generator')
# Saver to save session for reuse
saver = tf.train.Saver()
##########################
### TRAINING & EVALUATION
##########################
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
avg_costs = {'discriminator': [], 'generator': []}
for epoch in range(training_epochs):
dis_avg_cost, gen_avg_cost = 0., 0.
total_batch = mnist.train.num_examples // batch_size
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
batch_x = batch_x*2 - 1 # normalize
batch_randsample = np.random.uniform(-1, 1, size=(batch_size, gen_input_size))
# Train
_, dc = sess.run(['train_discriminator', 'discriminator_cost:0'],
feed_dict={'discriminator_input:0': batch_x,
'generator_input:0': batch_randsample,
'dropout:0': dropout_rate,
'is_training:0': True})
_, gc = sess.run(['train_generator', 'generator_cost:0'],
feed_dict={'generator_input:0': batch_randsample,
'dropout:0': dropout_rate,
'is_training:0': True})
dis_avg_cost += dc
gen_avg_cost += gc
if not i % print_interval:
print("Minibatch: %03d | Dis/Gen Cost: %.3f/%.3f" % (i + 1, dc, gc))
print("Epoch: %03d | Dis/Gen AvgCost: %.3f/%.3f" %
(epoch + 1, dis_avg_cost / total_batch, gen_avg_cost / total_batch))
avg_costs['discriminator'].append(dis_avg_cost / total_batch)
avg_costs['generator'].append(gen_avg_cost / total_batch)
saver.save(sess, save_path='./gan.ckpt')
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(range(len(avg_costs['discriminator'])),
avg_costs['discriminator'], label='discriminator')
plt.plot(range(len(avg_costs['generator'])),
avg_costs['generator'], label='generator')
plt.legend()
plt.show()
####################################
### RELOAD & GENERATE SAMPLE IMAGES
####################################
n_examples = 25
with tf.Session(graph=g) as sess:
saver.restore(sess, save_path='./gan.ckpt')
batch_randsample = np.random.uniform(-1, 1, size=(n_examples, gen_input_size))
new_examples = sess.run('generator/generator_output:0',
feed_dict={'generator_input:0': batch_randsample,
'dropout:0': 0.0,
'is_training:0': False})
fig, axes = plt.subplots(nrows=5, ncols=5, figsize=(8, 8),
sharey=True, sharex=True)
for image, ax in zip(new_examples, axes.flatten()):
ax.imshow(image.reshape((dis_input_size // 28, dis_input_size // 28)), cmap='binary')
plt.show() | _____no_output_____ | MIT | tensorflow1_ipynb/gan/gan.ipynb | tranhoangkhuongvn/deeplearning-models |
Functions used: 1. importing csv files2. Changing directory3. Reading only specific columns into the dataframe4. Using del [ to delete a column ]5. Using .drop() [ to delete multiple columns ]6. Using .set_index() 1. Import The csv files: | import pandas as pd
import numpy as np
link = "C:\\Users\\MAHE\\Desktop\\Data Science\\Projects\\data\\banknifty" | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
2. Get the current working directory to the location of the data: | import os
os.getcwd()
os.chdir(link) | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
3. Read the csv file df = data frame | df = pd.read_csv("all_here.csv", sep = ",")
df.head()
df.tail() | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
4. Reading the "CE" and "PE" from the columns to their resp. df: a. Reading "CE" into df_ce: | df_ce = df[df["Option Type"]=="CE"]
df_ce.head()
df_ce.tail() | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
b. Reading "PE" into df_pe: | df_pe = df[df["Option Type"]=="PE"]
df_pe.head()
df_pe.tail() | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
5. Discarding irrelevant columns: [ keep ,Expiry Date, Strike Price, Open, High, Low, Close ] Deleting only 1 column at a time: [ del ] | del df_ce["Symbol"] | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
Deleting multiple columns at a time: [ .drop([".."],axis = 1) ] | df_ce = df_ce.drop(["Date","LTP","Settle Price","No. of contracts","Turnover in Lacs","Premium Turnover in Lacs","Open Int","Change in OI","Underlying Value"],axis = 1)
df_pe = df_pe.drop(["Date","LTP","Settle Price","No. of contracts","Turnover in Lacs","Premium Turnover in Lacs","Open Int","Change in OI","Underlying Value"],axis = 1) | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
6. Set Expiry date as the index: [ set_index() ] | df_ce.set_index("Expiry",inplace = True)
df_ce.head()
df_pe.set_index("Expiry",inplace = True)
df_pe.head() | _____no_output_____ | MIT | Data-Science-HYD-2k19/Projects/codes/PROJECT 1 (Jupiter ) ( Bank Nifty )/Merge BankNift and Data/BankNifty-Trial1-Copy1.ipynb | Sanjay9921/Python |
Clean data | # Remove rows with zero cases
max_cases = [max(v) for v in df['cases']]
df['max_cases'] = max_cases
df_with_cases = df[df['max_cases'] > 0]
# Shuffle data
shuffled_df = df_with_cases.sample(frac=1)
# Break into train test (random k-fold cross val on the training set is done to pick hyperparams)
train_ratio, val_ratio, test_ratio = .75,0,.25
train_df = shuffled_df[0:int(train_ratio*len(shuffled_df))]
# val_df = shuffled_df[int(train_ratio*len(shuffled_df)):int(val_ratio*len(shuffled_df))+int(train_ratio*len(shuffled_df))]
test_df = shuffled_df[int(train_ratio*len(shuffled_df))+int(val_ratio*len(shuffled_df)):]
def make_auto_regressive_dataset(df,autoreg_window,log=True,deaths=True,cases=False,predict_deaths=True):
"""
Make an autoregressive dataset that takes in a dataframe and a history window to predict number of deaths
for a given day given a history of autoreg_window days before it
log: take logarithm of values for features and predictions
deaths: use number of previous deaths as features
cases: use number of previous cases as features
predict_deaths: predict deaths otherwise predict cases
"""
assert (deaths == True or cases == True)
feature_array = []
ys = []
_cases = list(df['cases'])
_deaths = list(df['deaths'])
for i in range(len(_cases)):
for j in range(len(_cases[i])-(autoreg_window+1)):
if predict_deaths:
contains_event = sum(_deaths[i][j:j+autoreg_window+1]) > 0
else:
contains_event = sum(_cases[i][j:j+autoreg_window+1]) > 0
if contains_event > 0:
cases_window = _cases[i][j:j+autoreg_window]
if log:
cases_window = [np.log(v+1) for v in cases_window ]
deaths_window = _deaths[i][j:j+autoreg_window]
if log:
deaths_window = [np.log(v+1) for v in deaths_window]
if predict_deaths:
y_val = _deaths[i][j+autoreg_window+1]
else:
y_val = _cases[i][j+autoreg_window+1]
if log:
y_val = np.log(y_val+1)
features = []
if deaths == True:
features.extend(deaths_window)
if cases == True:
features.extend(cases_window)
feature_array.append(features)
ys.append(y_val)
return feature_array, ys
def evaluate_model(model,eval_pair, metric, exponentiate=False):
"""
Model: sklearn model
Eval pair: (x,y)
metric: sklearn metric
exponentiate: exponentiate model predictions?
"""
predictions = model.predict(eval_pair[0])
y_val = eval_pair[1]
if exponentiate:
predictions = [np.exp(p) for p in predictions]
y_val = [np.exp(y) for y in y_val]
return predictions, metric(predictions,y_val)
model = sklearn.neighbors.KNeighborsRegressor()
param_dist ={
'n_neighbors': [2,4,8,16],
'weights': ['uniform','distance'],
'p': [1,2,4]
}
# model = RandomForestRegressor()
# param_dist ={
# 'n_estimators': [50,100,200,400,1000]
# }
# Number of randomly sampled hyperparams
n_iter = 20
metric = sklearn.metrics.mean_squared_error
# n_jobs = number of cores to parallelize across
random_search = RandomizedSearchCV(model, param_distributions=param_dist,
n_iter=n_iter,n_jobs = 8)
predict_deaths = False
auto_reg_windows = [1,2,4,8]
best_window = None
best_loss = None
for w in auto_reg_windows:
log = False
x_train, y_train = make_auto_regressive_dataset(train_df,w,log=log,predict_deaths=predict_deaths)
x_test, y_test = make_auto_regressive_dataset(test_df,w,log=log,predict_deaths=predict_deaths)
random_search.fit(x_train,y_train)
window_loss = random_search.best_score_
if best_loss is None:
best_window = w
best_loss = window_loss
elif window_loss < best_loss:
best_window = w
best_score = loss
x_train, y_train = make_auto_regressive_dataset(train_df,best_window,log=log)
x_test, y_test = make_auto_regressive_dataset(test_df,best_window,log=log)
random_search.fit(x_train,y_train)
preds, loss = evaluate_model(random_search,(x_test,y_test),metric,exponentiate=True)
# model.fit(x_train,y_train)
random_search.best_params_
best_window
loss
# WARNING: does not yet supported number of previous cases as feature
def get_auto_reg_predictions(model,row,window,teacher_forcing=True,exponentiate=False,predict_deaths=True):
if predict_deaths:
key = 'deaths'
else:
key = 'cases'
deaths = row[key]
predictions = [0]
if teacher_forcing:
for i in range(len(deaths)-(window)):
x = deaths[i:i+window]
cur_prediction = model.predict([x])
if exponentiate:
cur_prediction = np.exp(cur_prediction)
predictions.append(cur_prediction)
else:
raise NotImplementedError
return predictions
def plot_prediction(model,row,window,exponentiate=False,predict_deaths=True):
"""
Plots model predictions vs actual
row: dataframe row
window: autoregressive window size
"""
if predict_deaths:
key = 'deaths'
else:
key = 'cases'
model_predictions = get_auto_reg_predictions(model,row,window,exponentiate,predict_deaths=predict_deaths)
model_predictions = [float(v) for v in model_predictions]
print(model_predictions)
for i,val in enumerate(row[key]):
if val > 0:
start_point = i
break
plt.plot(row[key][start_point:], label=key)
plt.plot(model_predictions[start_point:],label='predictions')
print(model_predictions[start_point:])
plt.fill_between(list(range(len(row[key][start_point:]))),row[key][start_point:],model_predictions[start_point:])
plt.legend()
plt.show()
for i in range(len(test_df)):
row = test_df.iloc[i]
if max(row['deaths'][:-1]) > 1:
plot_prediction(random_search,row,best_window,exponentiate=True,predict_deaths=predict_deaths) | [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.625, 0.625, 3.375, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.625]
[1.0, 0.625, 0.625, 3.375, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.625]
| MIT | modeling/04_prediction_time_series.ipynb | zheng-da/covid19-severity-prediction |
Predict deaths from cases | def create_case_to_death_data(df):
_cases = []
_deaths = []
_y_deaths = []
for i in range(len(df)):
row = df.iloc[i]
deaths = row['deaths']
cases = row['cases']
for j in range(len(deaths)):
if cases[j] > 0:
_cases.append(cases[j])
if j == 0:
_deaths.append(0)
else:
_deaths.append(deaths[j-1])
_y_deaths.append(deaths[j])
return (_cases,_deaths,_y_deaths)
train_cases, train_deaths, train_y_deaths = create_case_to_death_data(train_df)
test_cases, test_deaths, test_y_deaths = create_case_to_death_data(test_df)
model = RandomForestRegressor()
param_dist ={
'n_estimators': [50,100,200,400,1000]
}
metric = sklearn.metrics.mean_squared_error
# n_jobs = number of cores to parallelize across
deaths_random_search = RandomizedSearchCV(model, param_distributions=param_dist,
n_iter=n_iter,n_jobs = 8)
deaths_random_search.fit(list(zip(train_cases,train_deaths)),train_y_deaths)
pred_deaths = deaths_random_search.predict(list(zip(test_cases,test_deaths)))
metric(pred_deaths,test_y_deaths)
row = df.iloc[0]
plt.plot(row['deaths'], label='deaths')
plt.plot(row['cases'], label='cases')
plt.legend()
plt.show() | _____no_output_____ | MIT | modeling/04_prediction_time_series.ipynb | zheng-da/covid19-severity-prediction |
Binary Classification Fairness Assessment TemplateUse this template as a skeleton for comparing fairness and performance measures across a set of trained binary classification models. | # Recommended list of libraries (optional unless otherwise specified)
from fairmlhealth import report, measure # Required
import os
import pandas as pd | _____no_output_____ | MIT | templates/Template-BinaryClassificationAssessment.ipynb | masino-lab/fairMLHealth |
-------- Load (or Generate) Data and ModelsHere you should load (or generate) your test dataset and models. | # < Optional Loading/Cleaning/Training Code Here > | _____no_output_____ | MIT | templates/Template-BinaryClassificationAssessment.ipynb | masino-lab/fairMLHealth |
-------- Evaluate a Single (Baseline) Model Set the Required Variables * X (NumPy array or similar pandas object): test data to be passed to the models to generate predictions. It's recommended that these be separate data from those used to train the model.* y (NumPy array or similar pandas object): target data array corresponding to X. It is recommended that the target is not present in the test data.* PA (NumPy array or similar pandas object): protected attributes corresponding to X, optionally also included in X. Note that values must currently be binary- or Boolean-type.* model: the trained model to be evaluated. | # Set Pointers to be Passed to the Comparison Tools
X = None # <- add your test data
y = None # <- add your test labels
PA = None # if the protected attribute(s) is not present in the data,
model = None # add a dict or a list of trained, scikit-compatible models | _____no_output_____ | MIT | templates/Template-BinaryClassificationAssessment.ipynb | masino-lab/fairMLHealth |
---- | report.compare(X, y, PA, model, pred_type = "classificationmeasure.data(X, y)")
measure.data(X, y)
measure.performance(X, y, y_pred=model.predict(X), y_prob=model_1.predict_proba(X_test), pred_type="classification")
measure.bias(X, y, model.predict(X), pred_type="classification") | _____no_output_____ | MIT | templates/Template-BinaryClassificationAssessment.ipynb | masino-lab/fairMLHealth |
Apply to a test image to check performance | # sdir = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/planet/planet_order_242451/20180830_154418_0f3c'
# planet2chips(tiff_directory = sdir, chip_directory = sdir, chip_size = 512) | _____no_output_____ | MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Now we need a function to copy the VGG project templates and via.html files into each chip directory so that the chips can be labeled. | def process_planet_orders(source_dir, target_dir):
"""Find unique PlanetScope scenes in a directory of Planet order folders
and process newly added scenes into image chips"""
# Get list of all planet orders in source directory
orders = np.array(next(os.walk(source_dir))[1])
# Add full path to each order directory
orders = [os.path.join(source_dir, o) for o in orders]
scenes = []
scene_paths = []
for o in orders:
# scenes in order
s_ids = np.array(next(os.walk(o))[1])
s_ids_paths = [os.path.join(source_dir,o,s) for s in s_ids]
# add to lists
scenes.append(s_ids)
scene_paths.append(s_ids_paths)
# Flatten lists
scenes = list(np.concatenate(scenes))
print(len(scenes))
scene_paths = list(np.concatenate(scene_paths))
# Check which scenes already have chip folders
scenes_exist = np.array(next(os.walk(target_dir))[1])
scenes_to_process = []
scene_paths_to_process = []
# Remove scenes that already exist from list of scenes to process
for s, sp in zip(scenes, scene_paths):
if s not in scenes_exist:
scenes_to_process.append(s)
scene_paths_to_process.append(sp)
# Apply GeoTiff chipping function to each unprocessed scene
for sp in scene_paths_to_process:
print(sp)
planet2chips(tiff_directory = sp, chip_directory = target_dir, chip_size = 512) | _____no_output_____ | MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Apply the function to process all Planet orders presently in Box | # Run function
sdir = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/planet'
tdir = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/planet_chips'
# os.path.isdir(sdir)
process_planet_orders(sdir, tdir) | 141
/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/planet/planet_order_236949/20180825_231532_1_0f3c
filename: 20180825_231532_1_0f3c_3B_AnalyticMS_SR.tif
image name: 20180825_231532_1
(4, 4510, 8605)
| MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Move tiff files for labeled chipsAfter a Planet scene is processed into tiff and png chips, the pngs containing objects are added to a VGG project and labeled. Labels are then saved in a `[batchname]_labels.json` file. The last step prior to uploading the chips to Tana is to create a new directory for the chip containing the raw tiff file and a directory of class specific masks. | # Function to copy the tiffs of PNGs selected for labeling and make directories for each chip
def copy_chip_tiffs(label_dir, chips_dir, prepped_dir):
""" Take a VGG labeling project with PNGs and create a directory
for each chip in the prepped directory
"""
# Read annotations
pngs = os.listdir(label_dir)
pngs = [png for png in pngs if png != '.DS_Store'] # remove stupid DS_Store file
# Extract filenames and drop .png extension
chips = [c.split('.png')[0] for c in pngs]
# Loop over chips
for chip in chips:
# Make directory for chip in prepped dir
chip_dir = os.path.join(prepped_dir, chip)
# Create "image" dir for tiff image
image_dir = os.path.join(chip_dir, 'image')
# Make chip directory and subdirectories
for d in [chip_dir, image_dir]:
pathlib.Path(d).mkdir(parents=True, exist_ok=True)
# Now locate the tiff file and copy into chip directory
# Get scene name for chip
scene = chip.split('_')[0:3]
scene = "%s_%s_%s" % (scene[0], scene[1], scene[2])
# Locate and copy tiff file
tiff = os.path.join(chips_dir, scene, 'chips', (chip + '.tif'))
copy2(tiff, image_dir) | _____no_output_____ | MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Run function to copy tifs for selected PNGs | # Copy tiffs for chile cages
labels = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/vgg/labeled/label_china/pngs'
prepped_dir = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/prepped_planet'
chips_dir = '/Users/Tyler-SFG/Desktop/Box Sync/SFG Centralized Resources/Projects/Aquaculture/Waitt Aquaculture/aqua-mapping/aqua-mapping-data/aqua-images/planet_chips'
copy_chip_tiffs(label_dir = labels, chips_dir = chips_dir, prepped_dir = prepped_dir) | _____no_output_____ | MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Now we need a function to create the class masks for each image | def masks_from_labels(labels, prepped_dir):
# Read annotations
annotations = json.load(open(labels))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
# Loop over chips
for a in annotations:
# Get chip and directory
chip = a['filename'].split('.png')[0]
chip_dir = os.path.join(prepped_dir, chip)
# Create a directory to store masks
masks_dir = os.path.join(chip_dir, 'class_masks')
pathlib.Path(masks_dir).mkdir(parents=True, exist_ok=True)
# Read geotiff for chip
gtiff = chip_dir + '/' + 'image' + '/' + chip + '.tif'
src = rasterio.open(gtiff)
# Use try to only extract masks for chips with complete annotations and class labels
try:
"""Code for processing VGG annotations from Matterport balloon color splash sample"""
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# Get the aquaculture class of each polygon
polygon_types = [r['region_attributes'] for r in a['regions']]
# Get unique aquaculture classes in annotations
types = set(val for dic in polygon_types for val in dic.values())
for t in types:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
# Pull out polygons of that type
polygons = [r['shape_attributes'] for r in a['regions'] if r['region_attributes']['class'] == t]
# Draw mask using height and width of Geotiff
mask = np.zeros([src.height, src.width], dtype=np.uint8)
for p in polygons:
# Get indexes of pixels inside the polygon and set them to 1
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc] = 1
# Open a new GeoTiff data file in which to save the image chip
with rasterio.open((masks_dir + '/' + chip + '_' + str(t) + '_mask.tif'), 'w', driver='GTiff',
height=src.shape[0], width=src.shape[1], count=1,
dtype=rasterio.ubyte, crs=src.crs,
transform=src.transform) as new_img:
# Write the rescaled image to the new GeoTiff
new_img.write(mask.astype('uint8'),1)
except KeyError:
print(chip + ' missing aquaculture class assignment')
# write chip name to file for double checking
continue
| _____no_output_____ | MIT | notebooks/process_planet_scenes.ipynb | tclavelle/aqua_python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.