Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
11,700
|
<ASSISTANT_TASK:>
Python Code:
sent = "Each of us is full of shit in our own special way"
# setup display for demo
%matplotlib inline
import os
os.environ['DISPLAY'] = 'localhost:1'
from stat_parser import Parser
parser = Parser()
parser.parse(sent)
tree = parser.parse(sent) # returns nltk Tree instance
tree
from textblob import TextBlob
blob = TextBlob(sent)
blob.parse()
import nltk
mp = nltk.parse.malt.MaltParser(os.getcwd(),
model_filename="engmalt.linear-1.7.mco")
mp.parse_one(sent.split()).tree()
from pattern.en import parse, pprint
s = parse(sent,
tokenize = True, # Tokenize the input, i.e. split punctuation from words.
tags = True, # Find part-of-speech tags.
chunks = True, # Find chunk tags, e.g. "the black cat" = NP = noun phrase.
relations = True, # Find relations between chunks.
lemmata = True, # Find word lemmata.
light = False)
pprint(s)
from spacy.en import English
parser = English()
parsedData = parser(unicode(sent))
for i, token in enumerate(parsedData):
print("original:", token.orth, token.orth_)
print("lowercased:", token.lower, token.lower_)
print("lemma:", token.lemma, token.lemma_)
print("shape:", token.shape, token.shape_)
print("prefix:", token.prefix, token.prefix_)
print("suffix:", token.suffix, token.suffix_)
print("log probability:", token.prob)
print("Brown cluster id:", token.cluster)
print("----------------------------------------")
if i > 1:
break
from visualize_word_graph import draw_graph
draw_graph("dog")
draw_graph("noise", hypernym=True)
bad_sounds =['The sound in the place is terrible.',
'dining with clatter and the occasional smell of BMW exausts',
'Also, the acoustics are not conducive to having any sort of conversation.']
not_bad_sounds = ["not to sound like a snob",
"at your table and you can tune the sound to whichever game you're interested in",
"oh god I sound old!"]
from pattern.en import parse, pprint
def print_parts(sents):
for sent in sents:
s = parse(sent,
tokenize = True, # Tokenize the input, i.e. split punctuation from words.
tags = True, # Find part-of-speech tags.
chunks = True, # Find chunk tags, e.g. "the black cat" = NP = noun phrase.
relations = True, # Find relations between chunks.
lemmata = True, # Find word lemmata.
light = False)
print sent
pprint(s)
sents = bad_sounds + not_bad_sounds
print_parts(bad_sounds + not_bad_sounds)
from pattern.en import parsetree
from pattern.search import search
for sent in sents:
t = parsetree(sent)
print
print sent
print "Tagged Sent:", t
print "Verbs:", search('VB*', t) # verbs
print "ADJP:", search('ADJP', t) # verbs
print "Nouns:", search('NN', t) # all nouns
from nltk.corpus import wordnet as wn
from pattern.en import parsetree
from pattern.search import taxonomy, WordNetClassifier, search
taxonomy.classifiers.append(WordNetClassifier())
def get_parts(word, pos, recursive=False):
parts = [word, ]
parts += taxonomy.children(word, pos=pos, recursive=recursive)
parts += taxonomy.parents(word, pos=pos, recursive=recursive)
return parts
def word_search(t, word, pos):
parts = get_parts(word, pos)
results = search(pos, t)
for result in results:
# print result.string, parts
if any(x in result.string.split() for x in parts):
return True
return False
def run_a_rule(sent, word, pos):
t = parsetree(sent)
return word_search(t, word, pos)
print "1. 'sound' is a NN"
print run_a_rule(sents[0], 'noise', 'NN')
print "2. clatter is a NN"
print run_a_rule(sents[1], 'noise', 'NN')
print "3. acoustics is NNS and RB Not"
print run_a_rule(sents[2], 'acoustics', 'NNS') and run_a_rule(sents[2], 'not', 'RB')
print "4. sound is a VB"
print run_a_rule(sents[3], 'noise', 'VB*')
print "5. Sounds is JJ"
print run_a_rule(sents[4], 'sound', 'JJ')
print "6. sound is VBP"
print run_a_rule(sents[5], 'noise', 'VB*')
def ext_func(tgt):
return bool(not (run_a_rule(tgt, 'noise', 'VB*') and not run_a_rule(tgt, 'sound', 'JJ'))
and (run_a_rule(tgt, 'noise', 'NN') or run_a_rule(tgt, 'acoustics', 'NNS') or
(run_a_rule(tgt, 'acoustics', 'NNS') and run_a_rule(tgt, 'not', 'RB'))))
print "bad noises in review:"
for sent in bad_sounds:
print "\t" + sent
assert(ext_func(sent) == True)
print
print "no mention of bad noises:"
for sent in not_bad_sounds:
print "\t" + sent
assert(ext_func(sent) == False)
import zipfile
import pickle
from lxml import etree
from StringIO import StringIO
zf = zipfile.ZipFile('nhtsa_as_xml.zip', 'r')
nhtsa_injured = zf.read('nhtsa_injured.xml')
nhtsa_not_injured = zf.read('nhtsa_not_injured.xml')
xml_injured = etree.parse(StringIO(nhtsa_injured))
xml_not_injured = etree.parse(StringIO(nhtsa_not_injured))
def injured(l):
return ['0' != str(x) and 'injured' or 'notinjured' for x in l]
def data(x):
out = [x.xpath("//rows/row/@c1"),
injured(x.xpath("//rows/row/@c8")),
x.xpath("//rows/row/@c2")]
return list(reversed(zip(*out)))
xml_injured_data = data(xml_injured)[:800]
xml_not_injured_data = data(xml_not_injured)[:800]
xml_injured_data[0]
from visualize_word_graph import draw_graph
draw_graph("injury")
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from pattern.search import taxonomy, search
taxonomy.append('dislocated', type='injury')
taxonomy.append('sustained', type='injury')
taxonomy.append('burn', type='injury')
taxonomy.append('injury', type='hurt')
def check_sustained(text):
if len(search('HURT', text)) > 0:
return True
return False
def feats(text):
words = text.replace(".", "").split()
out = dict([(word, True) for word in words])
if 'SUSTAINED' in out:
del out['SUSTAINED']
out['rule(SUSTAINED)'] = check_sustained(text)
return out
negcutoff = len(xml_not_injured_data)*3/4
poscutoff = len(xml_injured_data)*3/4
not_inj_data = xml_not_injured_data[:negcutoff] + xml_injured_data[:poscutoff]
inj_data = xml_not_injured_data[negcutoff:] + xml_injured_data[poscutoff:]
negfeats = [(feats(f[2]), 'not') for f in not_inj_data]
posfeats = [(feats(f[2]), 'injure') for f in inj_data]
egcutoff = len(negfeats)*3/4
poscutoff = len(posfeats)*3/4
trainfeats = negfeats[:negcutoff] + posfeats[:poscutoff]
testfeats = negfeats[negcutoff:] + posfeats[poscutoff:]
print 'train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats))
classifier = NaiveBayesClassifier.train(trainfeats)
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
classifier.show_most_informative_features(n=100)
classifier.classify(feats("HE SUSTAINED INJURY"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pyStatParser
Step2: TextBlob
Step3: MaltParser
Step4: Pattern
Step5: spaCy
Step6: <a href="https
Step7: Alice's Yelp Data
Step8: 1. parts of speach for each
Step9: Penn Treebank Project Chunks <a href="tagguide.pdf">guide</a>
Step10: 3. create similar word list (stemming + synsets)
Step11: 3. test
Step12: 4. create a feature extractor function
Step13: Machine Learning Example
|
11,701
|
<ASSISTANT_TASK:>
Python Code:
from threeML import *
import matplotlib.pyplot as plt
%matplotlib inline
%matplotlib notebook
triggerName = 'bn090217206'
ra = 204.9
dec = -8.4
#Data are in the current directory
datadir = os.path.abspath('.')
#Create an instance of the GBM plugin for each detector
#Data files
obsSpectrum = os.path.join( datadir, "bn090217206_n6_srcspectra.pha{1}" )
bakSpectrum = os.path.join( datadir, "bn090217206_n6_bkgspectra.bak{1}" )
rspFile = os.path.join( datadir, "bn090217206_n6_weightedrsp.rsp{1}" )
#Plugin instance
NaI6 = OGIPLike( "NaI6", observation=obsSpectrum, background=bakSpectrum, response=rspFile )
#Choose energies to use (in this case, I exclude the energy
#range from 30 to 40 keV to avoid the k-edge, as well as anything above
#950 keV, where the calibration is uncertain)
NaI6.set_active_measurements( "10.0-30.0", "40.0-950.0" )
NaI6.set_active_measurements?
NaI6
NaI6.display()
NaI6.view_count_spectrum()
NaI6.view_count_spectrum(significance_level=5)
#This declares which data we want to use. In our case, all that we have already created.
data_list = DataList( NaI6 )
powerlaw = Powerlaw()
GRB = PointSource( triggerName, ra, dec, spectral_shape=powerlaw )
model = Model( GRB )
jl = JointLikelihood( model, data_list, verbose=False )
jl.set_minimizer('ROOT')
res = jl.fit()
jl.minimizer
res = jl.get_errors()
res = jl.get_contours(powerlaw.index,-1.3,-1.1,20)
res = jl.get_contours(powerlaw.index,-1.25,-1.1,60,powerlaw.K,1.8,3.4,60)
jl.restore_best_fit()
_=display_spectrum_model_counts(jl)
plot_point_source_spectra(jl.results,flux_unit='erg/(s cm2 keV)')
powerlaw.index.prior = Uniform_prior(lower_bound=-5.0, upper_bound=5.0)
powerlaw.K.prior = Log_uniform_prior(lower_bound=1.0, upper_bound=10)
bayes = BayesianAnalysis(model, data_list)
samples = bayes.sample(n_walkers=50,burn_in=100, n_samples=1000)
fig = bayes.corner_plot()
fig = bayes.corner_plot_cc()
plot_point_source_spectra(bayes.results, flux_unit='erg/(cm2 s keV)',equal_tailed=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple standard analysis
Step2: As we can see, the plugin probes the data to choose the appropriate likelihood for the given obseration and background data distribution.
Step3: Signature
Step4: To examine our energy selections, we can view the count spectrum
Step5: Deselected regions are marked shaded in grey.
Step6: Setup for spectral fitting
Step7: Examining the fitted model
Step8: And to plot the fit in the data space we call the data spectrum plotter
Step9: Or we can examine the fit in model space. Note that we must use the analysis_results of the joint likelihood for the model plotting
Step10: We can go Bayesian too!
|
11,702
|
<ASSISTANT_TASK:>
Python Code:
# This is a sentence
sentence = 'This is a rather long sentence. I want to find the number of words with two letters'
# This is the code you need to find the number of words of length 2 (e.g., is, to, and of)
words = sentence.split(' ') # Split the sentence string into a list of words, the space between the
# quotations marks means that the sentence is
# split at every space character.
# Using sentence.split(',') would instead divide the sentence at commas.
print(words)
# Not count the number of words of length 2
nWords_len2 = 0 # We need this variable for word counting
for word in words: # Go through all the words in the list, one by one
if len(word) == 2: # If the length of the word is equal to 2
nWords_len2 += 1 # This means increasing the value of nWords_len2 by +1.
# Same as nWords_len2 = nWords_len2 + 1
print(nWords_len2) # There are three words with two letters (is, to, of)
def find_len2_words(input_text):
'''Returns the number of words containing two letters
Args:
input_text (str): input text
Returns:
nWords (int): number of words
''' # This is called a doc-string and is shown as help when you try to use the function
words = input_text.split(' ')
nWords = 0
for word in words:
if len(word) == 2:
nWords += 1
return nWords
nWords_with_two_letters = find_len2_words(sentence)
print(nWords_with_two_letters)
def find_lenn_words(input_text, n=2):
'''Returns the number of words containing n letters.
If no input n is given, n = 2.
Args:
input_text (str): input text.
n (int, optional): Defaults to 2.
Returns:
nWords (int): number of words.
'''
words = input_text.split(' ')
nWords = 0
for word in words:
if len(word) == n:
nWords += 1
return nWords
# Test the function
my_input_text = sentence # borrow the sentence defined above as input
nWords = find_lenn_words(my_input_text) # What happens if I don't specify the length n?
nWords2 = find_lenn_words(my_input_text, n = 2)
nWords6 = find_lenn_words(my_input_text, n = 6)
# Print the results
print(nWords, nWords2, nWords6)
variable_definded_outside_function = 12
def myfun1(a, b):
''' Adds two number and returns the result
Args:
a (int)
b (int)
Returns:
c (int)
'''
c = a + b
# Test to print a variable defined outside of the function
print(variable_definded_outside_function) # This works! The function can 'see'
# variables defined outside of the function
return c
# Try the function
a1 = 5
a2 = 4
myres = myfun1(a1, a2)
print(myres)
print(a) # a (and b and c) only exists inside the function (local scope), so I can print it here.
x = 5 # Define a global variable x = 5
def return_global():
'''Returns global variable'''
return x
def modify_global():
'''Modifies global variable. '''
# A global variable can only be modified if it is defined as global within the function
global x
x = 'global'
return x
def create_local():
'''Creates a local variable x'''
# The local variable 'x' knows nothing about the global variable 'x'.
x = 'local'
return x
# Test the functions
y0 = return_global() # Returns 5
y1 = modify_global() # Returns 'global'
y2 = create_local() # Returns 'local'
y3 = return_global() # Now returns 'global', why?
print(y0, y1, y2, y3)
import matplotlib.pyplot as plt # a library for plotting
import pandas as pd # a library for reading csv files
import numpy as np # A library to work with numbers
# Read data from a csv file and plot them. Data are in a numpy array.
# The data are stored in a folder 'img' in the
# same directory as the script containing this code.
eye_velocity = np.array(pd.read_csv("img/eye_velocity.csv")).flatten()
# Plot the data and annotate the plot
plt.plot(eye_velocity)
plt.xlabel('Time (ms)')
plt.ylabel('Velocity (deg/s)')
plt.show()
# Zoom in on a smaller part of the data
eye_velocity_short = eye_velocity[100:800]
plt.plot(eye_velocity_short)
plt.xlabel('Time (ms)')
plt.ylabel('Velocity (deg/s)')
# Indicate fixations with interval shaded
plt.axvspan(90, 200, color='y', alpha=0.5, lw=0)
plt.axvspan(270, 360, color='y', alpha=0.5, lw=0)
plt.axvspan(415, 700, color='y', alpha=0.5, lw=0)
# Show the results
plt.show()
# Fixations are located where velocity < 30 deg/s
threshold = 30
fixation_samples = (eye_velocity_short < threshold) * 1 # *1 to convert True / False to 1 / 0
plt.plot(fixation_samples)
plt.show()
import numpy as np # Now we need numpy, so let's import it
plt.plot(np.diff(fixation_samples)) # diff means to the the difference between two consecutive
# samples i - (i-1). [0, 1, 0] -> [1, -1]
plt.show()
fixation_samples_0 = np.hstack((0, fixation_samples, 0))
plt.plot(np.diff(fixation_samples_0)) # diff means to the the difference between two consecutive
# samples i - (i-1). [0, 1, 0] -> [1, -1]
plt.show()
# Find the fixation onsets
fix_onsets = np.where(np.diff(fixation_samples_0) == 1)
print(fix_onsets)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imagine this is a task we do every day, wouldn't it be nice to have a way to perform this without re-typing all this code every time? Something like
Step2: The function (and all functions) starts by def, followed by the name of the function and the input variables within parenthesis. In this case, there is one input variable, but there could be two or more.
Step3: We completed the task in just one line of code and got the same result! The function can easily be re-used and even shared with collegues in your community.
Step4: Local and global variables
Step5: What if I try to print the local variable 'a' outside of the function?
Step6: Consider the code below and try to understand why the output looks the way it does.
Step7: Introduction to Lab 2
Step8: Now we have simplified the problem. Let's find the onsets (positions where the plot changes from 0->1) and offsets (positions where the plot changes from 1->0) of the fixations.
Step9: One problem remains
Step10: Now the problem is made even simpler. To find the number of fixations, we need to find the number of 1s (representing the onsets) or the number of -1s (representing the offsets). The duration of a fixation can be computed by taking the difference between offset and onset locations. To find out where in an array something happens, the 'where' function in numpy can be used. Note that the output is a tuple!
|
11,703
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
from numpy import size
def Planckfunc_cgs(freq, temperature):
Calculate Planck function.
Inputs:
freq: frequency, in Hz
temperature: temperature in Kelvin
Return:
Intensity: in cgs unit ( erg s^-1 sr^-1 cm^-2 Hz-1 )
# defining physical constants
c_cgs = 29979245800.0 # light speed
h_cgs = 6.62606885e-27 # planck constant
kB_cgs = 1.38064852e-16 # Boltzmann constant
inputsize = size(freq)
if (inputsize ==1):
A = ( 2.0 * h_cgs * (freq**3.0) ) / ( c_cgs ** 2.0 )
B = math.exp( (h_cgs * freq) / (kB_cgs * temperature) )
return A * ( 1.0 / (B - 1.0) )
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
A = ( 2.0 * h_cgs * (freq[id]**3.0) ) / ( c_cgs ** 2.0 )
B = math.exp( (h_cgs * freq[id]) / (kB_cgs * temperature) )
out_array[id] = A * ( 1.0 / (B - 1.0) )
return out_array
def Planckfunc_mks(freq, temperature):
Calculate Planck function.
Inputs:
freq: frequency, in Hz
temperature: temperature in Kelvin
Return:
Intensity: in mks unit ( J s^-1 sr^-1 m^-2 Hz-1 )
# defining physical constants
c_mks = 299792458.0 # light speed
h_mks = 6.62607004e-34 # planck constant
kB_mks = 1.38064852e-23 # Boltzmann constant
inputsize = size(freq)
if (inputsize ==1):
A = ( 2.0 * h_mks * (freq**3.0) ) / ( c_mks ** 2.0 )
B = math.exp( (h_mks * freq) / (kB_mks * temperature) )
return A * ( 1.0 / (B - 1.0) )
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
A = ( 2.0 * h_mks * (freq[id]**3.0) ) / ( c_mks ** 2.0 )
B = math.exp( (h_mks * freq[id]) / (kB_mks * temperature) )
return out_array
# free-free emission
def emission_measure(ne, ell):
Estimate emission measure, in unit of pc cm^-6
Inputs:
ne: election number volume-density, in unit of cm^-3
ell: line-of-sight thickness of emission region, in unit of pc
Return:
emission measure (EM), in unit of pc cm^-6
emission_measure = math.pow( ne, 2.0 ) * ell
return emission_measure
def tauff_Mezger67(freq, Te, EM):
Calculate electron optical depth for free-free emission.
following the prescription of Mezger & Henderson (1967) and
Keto et al. (2003).
Inputs:
freq: frequency / frequencies, in Hz
Te : electron temperature in Kelvin
EM : emission measure in pc cm^-6
Return:
optical depth (dimension free)
inputsize = size(freq)
if (inputsize ==1):
tauff = 8.235e-2 * math.pow( Te, -1.35 ) * math.pow( freq/1e9, -2.1 ) * EM
return tauff
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
out_array[id] = 8.235e-2 * \
math.pow( Te, -1.35 )* \
math.pow( freq[id]/1e9, -2.1 ) * \
EM
return out_array
# Simplified dust
def dustkappa_cgs(freq, rep_freq, opacity_at_repfreq, opacity_index):
Calculate dust opacity at the specified frequency
Inputs:
freq: frequency / frequencies, in Hz
rep_freq: a frequency which the dust opacity is specified, in Hz
opacity_at_repfreq: opacity at the specified representative frequency, in cm^2 g^-1
opacity index: dust opacity spectral index (dimension free)
Return:
dust opacity, in units of cm^2 g^-1
inputsize = size(freq)
if (inputsize ==1):
opacity = opacity_at_repfreq * math.pow( (freq / rep_freq ) , opacity_index)
return opacity
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
out_array[id] = opacity_at_repfreq * math.pow( (freq[id] / rep_freq ) , opacity_index)
return out_array
# Modified black body flux
def blackbody_Fnu_cgs(freq, temperature, tau, Omega):
Evaluate flux of black body emission, in cgs unit.
Inputs:
Frequency : frequency / frequencies, in Hz
temperature : temperature, in Kelvin
tau : optical depth /depths, dimensionless
Omega : solid angle, in Sr
Return:
flux in cgs unit
inputsize = size(freq)
if (inputsize ==1):
flux = Planckfunc_cgs(freq, temperature) * \
(1.0 - math.exp(-1.0 * tau) )* \
Omega
return flux
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
out_array[id] = Planckfunc_cgs(freq[id], temperature) * \
(1.0 - math.exp(-1.0 * tau[id]) )* \
Omega
return out_array
print ('electron optical depth: ', tauff_Mezger67(33.0*1e9, 8000.0, 1.0e9), end='\n' )
print ('dust opacity: ', dustkappa_cgs(33.0*1e9, 230.0*1e9, 1.0 ,1.75), end='cm${^2}$ g$^{-1}$' )
def freefall_cgs(density):
Calculate free-fall timescale.
Input:
density: density, in g cm^-3
Return:
Free fall time ( seconds )
# defining physical constants
G_cgs = 6.674e-8
inputsize = size(density)
if (inputsize ==1):
A = 3.0 * math.pi
B = 32.0 * G_cgs * density
time = math.sqrt( A / B )
return time
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
A = 3.0 * math.pi
B = 32.0 * G_cgs * density[id]
out_array[id] = math.sqrt( A / B )
return out_array
def Jeanslength_cgs(density, temperature, particlemass):
Calculate Jeans Length.
Inputs:
density: density, in g cm^-3
temperature: temperature in Kelvin
particlemass: in g, to be used for calculating sound speed
Return:
Jeans length in cgs unit ( cm )
# defining physical constants
kB_cgs = 1.38064852e-16 # Boltzmann constant
G_cgs = 6.674e-8
inputsize = size(density)
if (inputsize ==1):
A = 15.0 * kB_cgs * temperature
B = 4.0 * math.pi * G_cgs * density * particlemass
length = math.sqrt( A / B )
return length
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
A = 15.0 * kB_cgs * temperature
B = 4.0 * math.pi * G_cgs * density[id] * particlemass
length = math.sqrt( A / B )
out_array[id] = length
return out_array
def Jeansmass_cgs(density, temperature, particlemass):
Calculate Jeans mass.
Inputs:
density: density, in g cm^-3
temperature: temperature in Kelvin
particlemass: in g, to be used for calculating sound speed
Return:
Jeans mass in cgs unit ( g )
inputsize = size(density)
if (inputsize ==1):
mass = (4.0 / 3.0) * math.pi \
* ( Jeanslength_cgs(density, temperature, particlemass) **3 ) \
* density
return mass
else:
out_array = np.arange(0, inputsize) * 0.0
for id in list(range(0,inputsize)):
out_array[id] = (4.0 / 3.0) * math.pi \
* ( Jeanslength_cgs(density[id], temperature, particlemass) **3 ) \
* density[id]
return out_array
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
freq_array = np.arange(1, 2501) * 1e10 # frequency in Hz
output_array_mks = np.arange(1, 2501) * 0.0
# physical constants
cgsflux_to_Jy = 1e23
mksflux_to_Jy = 1e26
str_to_sqdegree = 3282.80635
str_to_sqarcsecond = 3282.80635 * (3600.0 ** 2.0)
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
plt.axis([0.5, 4.9, -5, 5.2])
# plt.axis([2.0, 3.0, -1, 1])
# evaluate Planck function in CGS unit
temperature = 15.0 # Kelvin
output_array_cgs = Planckfunc_cgs(freq_array, temperature) \
* cgsflux_to_Jy / str_to_sqarcsecond
plt.plot(np.log10( freq_array / 1e9) , np.log10(output_array_cgs), \
color = (0, 0, 1.0, 0.2),
linewidth=8, label = '15 K')
# evaluate Planck function in MKS unit
TCMB = 2.72548 # Kelvin
for id in list(range(0, 2500)):
output_array_mks[id] = Planckfunc_mks(freq_array[id], TCMB) * \
mksflux_to_Jy / str_to_sqarcsecond
plt.plot(np.log10( freq_array / 1e9), np.log10(output_array_mks), \
linestyle = 'dashed',
color = (0.2, 0.6, 0, 0.4),
linewidth =5, label = '$T_{CMB}$')
plt.plot(np.log10( freq_array / 1e9), np.log10(output_array_cgs - output_array_mks), \
linestyle = 'dashed',
color = (1.0, 0, 0, 0.4),
linewidth =5, label = '15 K - $T_{CMB}$')
# evaluate Planck function in CGS unit
temperature = 30.0 # Kelvin
output_array_cgs = Planckfunc_cgs(freq_array, temperature) \
* cgsflux_to_Jy / str_to_sqarcsecond
plt.plot(np.log10( freq_array / 1e9) , np.log10(output_array_cgs), \
color = (0, 0, 1.0, 0.4),
linewidth=8, label = '30 K')
# evaluate Planck function in CGS unit
temperature = 100.0 # Kelvin
output_array_cgs = Planckfunc_cgs(freq_array, temperature) \
* cgsflux_to_Jy / str_to_sqarcsecond
plt.plot(np.log10( freq_array / 1e9) , np.log10(output_array_cgs), \
color = (0, 0, 1.0, 0.6),
linewidth=8, label = '100 K')
# evaluate Planck function in CGS unit
temperature = 300.0 # Kelvin
output_array_cgs = Planckfunc_cgs(freq_array, temperature) \
* cgsflux_to_Jy / str_to_sqarcsecond
plt.plot(np.log10( freq_array / 1e9) , np.log10(output_array_cgs), \
color = (0, 0, 1.0, 0.8),
linewidth=8, label = '300 K')
plt.title('Planck Function')
plt.xlabel('Log$_{10}$(Frequency [GHz])')
plt.ylabel('Log$_{10}$(Intensity [Jy arcsecond$^{-2}$])')
plt.legend(loc=2)
# initializing arrays
freq = np.arange(1, 250) * 1e9 # frequency in Hz # Hz
# initial condition
ell = 1.0 / 2.0626e5 # parsec
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
# evaluate electron (free-free) optical depth
Te = 8000.0 # Kelvin
ne = 1e4 # cm^-3
EM = emission_measure( ne, ell )
output_array = tauff_Mezger67(freq, Te, EM)
label = 'T$_{e}$=8000 K, N$_{e}$=%e' % ( round(ne,0) )
plt.plot(np.log10( freq / 1e9 ), np.log10(output_array), \
color = (0.1, 0.0, 0.4, 0.2),
linewidth=8, label = label)
Te = 8000.0 # Kelvin
ne = 1e5 # cm^-3
EM = emission_measure( ne, ell )
output_array = tauff_Mezger67(freq, Te, EM)
label = 'T$_{e}$=8000 K, N$_{e}$=%e' % ( round(ne,0) )
plt.plot(np.log10( freq / 1e9 ), np.log10(output_array), \
color = (0.1, 0.0, 0.4, 0.4),
linewidth=8, label = label)
Te = 8000.0 # Kelvin
ne = 1e6 # cm^-3
EM = emission_measure( ne, ell )
output_array = tauff_Mezger67(freq, Te, EM)
label = 'T$_{e}$=8000 K, N$_{e}$=%e' % ( round(ne,0) )
plt.plot(np.log10( freq / 1e9 ), np.log10(output_array), \
color = (0.1, 0.0, 0.4, 0.6),
linewidth=8, label = label)
plt.title('Electron (free-free optical depth) for 1 AU scale HII region')
plt.xlabel('Log$_{10}$(Frequency [GHz])')
plt.ylabel('Log$_{10}$(Optical depth)')
plt.legend(loc=1)
# initializing arrays
freq1 = np.arange(1, 50000) * 1e9 # frequency in Hz # Hz
freq2 = np.arange(50000, 150000, 1000) * 1e9
freq = np.concatenate((freq1, freq2), axis=0)
# physical constants
c_mks = 299792458.0 # light speed in m/s
cgsflux_to_Jy = 1e23
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
plt.axis([0.5, 2e5, 3e-5, 1e5])
plt.xscale('log')
plt.yscale('log')
# FU Ori
Te_FUOri = 16000.0 # Kelvin
EM_FUOri = 6.98e9
Omega_ff_FUOri = 1.41e-16 # solid angle
FUOri_ff_flux = blackbody_Fnu_cgs(freq, Te_FUOri, tauff_Mezger67(freq, Te_FUOri, EM_FUOri), Omega_ff_FUOri)
plt.plot( ( freq / 1e9 ), (FUOri_ff_flux * cgsflux_to_Jy * 1e3), \
color = (0.1, 0.0, 1, 0.1),
linestyle = 'dashed',
linewidth=8, label = 'Free-free emission')
T_HID_FUOri = 300.0
kappa230Sigma_FUOri = 103.0
Omega_HID_FUOri = 1.38e-14
betaHID_FUOri = 1.75 # dust opacity spectral index
tauHID_FUOri = dustkappa_cgs(freq, 230.0e9, kappa230Sigma_FUOri, betaHID_FUOri)
FUOri_HID_flux = blackbody_Fnu_cgs(freq, T_HID_FUOri, \
tauHID_FUOri, Omega_HID_FUOri)
plt.plot( ( freq / 1e9 ), (FUOri_HID_flux * cgsflux_to_Jy * 1e3), \
color = (0, 0.7, 0.7, 0.1),
linestyle = 'dashdot',
linewidth=8, label = 'Dust emission from HID')
T_disk_FUOri = 60.0
kappa230Sigma_FUOri = 2.06e-2
Omega_disk_FUOri = 3.88e-12
betadisk_FUOri = 1.75 # dust opacity spectral index
taudisk_FUOri = dustkappa_cgs(freq, 230.0e9, kappa230Sigma_FUOri, betadisk_FUOri)
FUOri_disk_flux = blackbody_Fnu_cgs(freq, T_disk_FUOri, \
taudisk_FUOri, Omega_disk_FUOri)
plt.plot( ( freq / 1e9 ), (FUOri_disk_flux * cgsflux_to_Jy * 1e3), \
color = (0.9, 0.05, 0.05, 0.1),
linestyle = 'dotted',
linewidth=8, label = 'Dust emission from extended disk')
# plot summed model
plt.plot( ( freq / 1e9 ), \
( (FUOri_disk_flux + FUOri_HID_flux + FUOri_ff_flux) * cgsflux_to_Jy * 1e3), \
color = (0.1, 0.1, 0.1, 0.5),
linewidth=2, label = 'Summed emission')
# plot observed data
fuori_jvla_freq = np.array([33.48707, 34.51107, 35.48707, 36.51107, 29.42306, 30.51106, 31.48706, 32.51107])
fuori_jvla_freq = fuori_jvla_freq * 1e9
fuori_jvla_mJy = np.array([205.0, 181.0, 199.0, 215.0, 167.0, 137.0, 165.0, 173.0]) * 1e-3
plt.plot( (fuori_jvla_freq / 1e9) , (fuori_jvla_mJy),
'o',
color = (0, 0, 0.9, 0.9))
fuori_alma_freq = np.array([345.784])
fuori_alma_freq = fuori_alma_freq * 1e9
fuori_alma_mJy = np.array([50.1])
plt.plot( (fuori_alma_freq / 1e9) , (fuori_alma_mJy),
'o',
color = (0, 0, 0.9, 0.9))
plt.title('Flux model for FU Ori')
plt.xlabel('Frequency [GHz]')
plt.ylabel('Flux [mJy]')
plt.legend(loc=2)
# FU Ori S
Te_FUOriS = 16000.0 # Kelvin
EM_FUOriS = 4.85e9
Omega_ff_FUOriS = 1.94e-16 # solid angle
# initializing plotting
fig2 = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 2)
plt.axis([0.5, 2e5, 3e-5, 1e5])
plt.xscale('log')
plt.yscale('log')
FUOriS_ff_flux = blackbody_Fnu_cgs(freq, Te_FUOri, tauff_Mezger67(freq, Te_FUOri, EM_FUOri), Omega_ff_FUOri)
plt.plot( ( freq / 1e9 ), (FUOriS_ff_flux * cgsflux_to_Jy * 1e3), \
color = (0.1, 0.0, 1, 0.1),
linestyle = 'dashed',
linewidth=8, label = 'Free-free emission')
T_HID_FUOriS = 360.0
kappa230Sigma_FUOriS = 32.0
Omega_HID_FUOriS = 5.19e-15
betaHID_FUOriS = 1.75 # dust opacity spectral index
tauHID_FUOriS = dustkappa_cgs(freq, 230.0e9, kappa230Sigma_FUOriS, betaHID_FUOriS)
FUOriS_HID_flux = blackbody_Fnu_cgs(freq, T_HID_FUOriS, \
tauHID_FUOriS, Omega_HID_FUOriS)
plt.plot( ( freq / 1e9 ), (FUOriS_HID_flux * cgsflux_to_Jy * 1e3), \
color = (0, 0.7, 0.7, 0.1),
linestyle = 'dashdot',
linewidth=8, label = 'Dust emission from HID')
T_disk_FUOriS = 60.0
kappa230Sigma_FUOriS = 3.87e-2
Omega_disk_FUOriS = 1.04e-12
betadisk_FUOriS = 1.75 # dust opacity spectral index
taudisk_FUOriS = dustkappa_cgs(freq, 230.0e9, kappa230Sigma_FUOriS, betadisk_FUOriS)
FUOriS_disk_flux = blackbody_Fnu_cgs(freq, T_disk_FUOriS, \
taudisk_FUOriS, Omega_disk_FUOriS)
plt.plot( ( freq / 1e9 ), (FUOriS_disk_flux * cgsflux_to_Jy * 1e3), \
color = (0.9, 0.05, 0.05, 0.1),
linestyle = 'dotted',
linewidth=8, label = 'Dust emission from extended disk')
# plot summed model
plt.plot( ( freq / 1e9 ), \
( (FUOriS_disk_flux + FUOriS_HID_flux + FUOriS_ff_flux) * cgsflux_to_Jy * 1e3), \
color = (0.1, 0.1, 0.1, 0.5),
linewidth=2, label = 'Summed emission')
# plot observed data
fuoriS_jvla_freq = np.array([33.48707, 34.51107, 35.48707, 36.51107, 29.42306, 30.51106, 31.48706, 32.51107])
fuoriS_jvla_freq = fuoriS_jvla_freq * 1e9
fuoriS_jvla_mJy = np.array([51.7, 104.0, 110.0, 94.0, 78.0, 81.0, 65.0, 88.0]) * 1e-3
plt.plot(np.log10(fuoriS_jvla_freq / 1e9) , np.log10(fuoriS_jvla_mJy),
'o',
color = (0, 0, 0.9, 0.9))
fuoriS_alma_freq = np.array([345.784])
fuoriS_alma_freq = fuoriS_alma_freq * 1e9
fuoriS_alma_mJy = np.array([21.2])
plt.plot( (fuoriS_alma_freq / 1e9) , (fuoriS_alma_mJy),
'o',
color = (0, 0, 0.9, 0.9))
plt.title('Flux model for FU Ori S')
plt.xlabel('Frequency [GHz]')
plt.ylabel('Flux [mJy]')
plt.legend(loc=2)
# Plot summed SED model for FU Ori and FU Ori S
fig3 = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 2)
plt.axis([0.5, 2e5, 3e-5, 1e5])
plt.xscale('log')
plt.yscale('log')
# plot measurements
sma_freq = np.array([223.7759, 260.3860, 271.2455, 271.7524, 274.3923]) * 1e9
sma_mJy = np.array([17.47, 39.4, 42.5, 42.9, 39.3])
plt.plot( (sma_freq / 1e9) , sma_mJy,
'o',
color = (0, 0, 0.9, 0.9))
# reading Herschel pacs data
pacsfile = 'fuori_pacs_v65_trim.txt'
wavelength_micron = np.loadtxt(pacsfile,
comments='#',
skiprows=0,
usecols=0)
pacs_Jy = np.loadtxt(pacsfile,
comments='#',
skiprows=0,
usecols=1)
pacsfreq = c_mks / ( wavelength_micron * 1e-6 )
plt.plot( (pacsfreq / 1e9) , pacs_Jy * 1e3,
'o',
color = (0, 0, 0.9, 0.9))
# reading Herschel spire data
spirefile = 'fuori_spire_corrected_trim.txt'
wavelength_micron = np.loadtxt(spirefile,
comments='#',
skiprows=0,
usecols=0)
spire_Jy = np.loadtxt(spirefile,
comments='#',
skiprows=0,
usecols=1)
spirefreq = c_mks / ( wavelength_micron * 1e-6 )
plt.plot( (spirefreq / 1e9) , spire_Jy * 1e3,
'o',
color = (0, 0, 0.9, 0.9))
# plot model
plt.plot( ( freq / 1e9 ), \
(FUOri_disk_flux + FUOri_HID_flux + FUOri_ff_flux) \
* cgsflux_to_Jy * 1e3, \
linestyle = 'dashed',
color = (0.1, 0.1, 0.1, 0.2), \
linewidth=4, label = 'FU Ori')
plt.plot( ( freq / 1e9 ), \
(FUOriS_disk_flux + FUOriS_HID_flux + FUOriS_ff_flux) \
* cgsflux_to_Jy * 1e3, \
linestyle = 'dotted',
color = (0.1, 0.1, 0.1, 0.2), \
linewidth=4, label = 'FU Ori S')
plt.plot( ( freq / 1e9 ), \
(FUOri_disk_flux + FUOri_HID_flux + FUOri_ff_flux + \
FUOriS_disk_flux + FUOriS_HID_flux + FUOriS_ff_flux) \
* cgsflux_to_Jy * 1e3, \
color = (0.1, 0.1, 0.1, 0.6), \
linewidth=2, label = 'Summed SED of FU Ori and FU Ori S')
plt.title('Flux model for FU Ori and FU Ori S summed')
plt.xlabel('Frequency [GHz]')
plt.ylabel('Flux [mJy]')
plt.legend(loc=2)
# physical constants
mean_mol_weight = 2.76 # mean molecular weight
mole = 6.02214129e23
year_to_s = 365.0 * 24.0 * 60.0 * 60.0
# initializing arrays
number_density = np.arange(1, 10001, 1) * 1e3
# output_array_cgs = np.arange(1, 10001, 1) * 0.0
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
# plt.axis([4.0, 6.5, 0.01, 0.2])
# evaluate free-fall time
density = number_density * mean_mol_weight / mole
output_array_cgs = freefall_cgs(density) / ( year_to_s * 1e5)
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0.5, 0.0, 0.0, 0.2),
linewidth=8, label = 'Mean molecular weight: 2.76')
plt.title('Free-fall time')
plt.xlabel('Log$_{10}($Molecular gas number density [cm$^{-3}$])')
plt.ylabel('Time [10$^{5}$ year]')
plt.legend(loc=1)
# physical constants
mean_mol_weight = 2.76 # mean molecular weight
mole = 6.02214129e23
parsec_to_cm = 3.08567758e18
# initializing arrays
number_density = np.arange(1, 10001, 1) * 1e3
output_array_cgs = np.arange(1, 10001, 1) * 0.0
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
plt.axis([4.0, 6.5, 0.01, 0.2])
# initial conditions
particlemass = mean_mol_weight / mole
density = number_density * mean_mol_weight / mole
temperature = 10.0
output_array_cgs = Jeanslength_cgs(density, temperature, particlemass) / parsec_to_cm
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0, 1.0, 0.2),
linewidth=8, label = '10 K')
temperature = 20.0
output_array_cgs = Jeanslength_cgs(density, temperature, particlemass) / parsec_to_cm
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0.5, 0.5, 0.2),
linewidth=8, label = '20 K')
temperature = 30.0
output_array_cgs = Jeanslength_cgs(density, temperature, particlemass) / parsec_to_cm
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0.5, 0.0, 0.2),
linewidth=8, label = '30 K')
temperature = 40.0
output_array_cgs = Jeanslength_cgs(density, temperature, particlemass) / parsec_to_cm
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0.5, 0.5, 0.0, 0.2),
linewidth=8, label = '40 K')
temperature = 50.0
output_array_cgs = Jeanslength_cgs(density, temperature, particlemass) / parsec_to_cm
plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0.5, 0.0, 0.0, 0.2),
linewidth=8, label = '50 K')
plt.title('Jeans length')
plt.xlabel('Log$_{10}($Molecular gas number density [cm$^{-3}$])')
plt.ylabel('Jeans length [pc]')
plt.legend(loc=1)
# physical constants
mean_mol_weight = 2.76 # mean molecular weight
mole = 6.02214129e23
parsec_to_cm = 3.08567758e18
solar_mass_cgs = 1.9891e33
# initializing arrays
number_density = np.arange(1, 10001, 1) * 1e3
output_array_cgs = np.arange(1, 10001, 1) * 0.0
# initializing plotting
fig = plt.figure(figsize=(9, 14))
plt.subplot(2, 1, 1)
plt.axis([4.0, 6.5, 0.01, 15])
# initial conditions
particlemass = mean_mol_weight / mole
density = number_density * mean_mol_weight / mole
temperature = 10.0
output_array_cgs = Jeansmass_cgs(density, temperature, particlemass) / solar_mass_cgs
line10K = plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0, 1.0, 0.2), \
linewidth=8, \
label = '10 K')
temperature = 20.0
output_array_cgs = Jeansmass_cgs(density, temperature, particlemass) / solar_mass_cgs
line20K = plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0.5, 0.5, 0.2), \
linewidth=8, \
label = '20 K')
temperature = 30.0
output_array_cgs = Jeansmass_cgs(density, temperature, particlemass) / solar_mass_cgs
line30K = plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0, 0.5, 0.0, 0.2), \
linewidth=8, \
label = '30 K')
temperature = 40.0
output_array_cgs = Jeansmass_cgs(density, temperature, particlemass) / solar_mass_cgs
line40K = plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0.5, 0.5, 0.0, 0.2), \
linewidth = 8, \
label = '40 K')
temperature = 50.0
output_array_cgs = Jeansmass_cgs(density, temperature, particlemass) / solar_mass_cgs
line50K = plt.plot(np.log10( number_density ), output_array_cgs, \
color = (0.5, 0.0, 0.0, 0.2), \
linewidth = 8, \
label = '50 K')
plt.title('Jeans mass')
plt.xlabel('Log$_{10}$(Molecular gas number density [cm$^{-3}$])')
plt.ylabel('Jeans mass [$M_{\odot}$]')
plt.legend(loc=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Defining function
Step8: 2. Opacity
Step10: Motions
Step13: 2. Jeans Length and Jeans mass
Step14: 3. Toomore Q parameter
Step15: Plot Planck function in cgs and mks unit
Step16: Plot electron (free-free) optical depth as a function of emission measure
Step17: Plotting the measurements for FU Ori as an practical example. Reference see Liu, H. B. et al. (2017) [arXiv
Step18: Plot free-fall time as a function of cloud particle number density
Step19: Plot Jeans length as a function of cloud particle number density
Step20: Plot Jeans mass as a function of cloud particle number density
|
11,704
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import matplotlib
from scipy import optimize
import astropy.io.fits
matplotlib.rcParams.update({'font.size': 18})
matplotlib.rcParams.update({'figure.figsize': [12,8]} )
noise_image = "../data/fits/noise_image.fits"
with astropy.io.fits.open(noise_image) as hdu:
data = hdu[0].data[0,0,...]
fig, (image, hist) = plt.subplots(1, 2, figsize=(18,6))
histogram, bins = np.histogram(data.flatten(), bins=401)
dmin = data.min()
dmax = data.max()
x = np.linspace(dmin, dmax, 401)
im = image.imshow(data)
mean = data.mean()
sigma = data.std()
peak = histogram.max()
gauss = lambda x, amp, mean, sigma: amp*np.exp( -(x-mean)**2/(2*sigma**2))
fitdata = gauss(x, peak, mean, sigma)
plt.plot(x, fitdata)
plt.plot(x, histogram, "o")
plt.yscale('log')
plt.ylim(1)
noise_image = "../data/fits/star_model_image.fits"
with astropy.io.fits.open(noise_image) as hdu:
data = hdu[0].data[0,0,...]
fig, (image, hist) = plt.subplots(1, 2, figsize=(18,6))
histogram, bins = np.histogram(data.flatten(), bins=101)
dmin = data.min()
dmax = data.max()
x = np.linspace(dmin, dmax, 101)
im = image.imshow(data)
mean = data.mean()
sigma_std = data.std()
peak = histogram.max()
gauss = lambda x, amp, mean, sigma: amp*np.exp( -(x-mean)**2/(2*sigma**2))
fitdata_std = gauss(x, peak, mean, sigma_std)
plt.plot(x, fitdata_std, label="STD DEV")
plt.plot(x, histogram, "o", label="Data")
plt.legend(loc=1)
plt.yscale('log')
plt.ylim(1)
mean = data.mean()
sigma_std = data.std()
sigma_neg = data[data<0].std() * 2
mad_mean = lambda a: np.mean( abs(a - np.mean(a) ))
sigma_mad_median = np.median( abs(data - np.median(data) ))
mad_mean = lambda a: np.mean( abs(a - np.mean(a) ))
sigma_mad_mean = mad_mean(data)
peak = histogram.max()
gauss = lambda x, amp, mean, sigma: amp*np.exp( -(x-mean)**2/(2*sigma**2))
fitdata_std = gauss(x, peak, mean, sigma_std)
fitdata_mad_median = gauss(x, peak, mean, sigma_mad_median)
fitdata_mad_mean = gauss(x, peak, mean, sigma_mad_mean)
fitdata_neg = gauss(x, peak, mean, sigma_neg)
plt.plot(x, fitdata_std, label="STD DEV")
plt.plot(x, fitdata_mad_median, label="MAD Median")
plt.plot(x, fitdata_mad_mean, label="MAD Mean")
plt.plot(x, fitdata_neg, label="Negative STD DEV")
plt.plot(x, histogram, "o", label="Data")
plt.legend(loc=1)
plt.yscale('log')
plt.ylim(1)
def gauss2D(x, y, amp, mean_x, mean_y, sigma_x, sigma_y):
Generate a 2D Gaussian image
gx = -(x - mean_x)**2/(2*sigma_x**2)
gy = -(y - mean_y)**2/(2*sigma_y**2)
return amp * np.exp( gx + gy)
def err(p, xx, yy, data):
2D Gaussian error function
return gauss2D(xx.flatten(), yy.flatten(), *p) - data.flatten()
def fit_gaussian(data, psf_pix):
Fit a gaussian to a 2D data set
width = data.shape[0]
mean_x, mean_y = width/2, width/2
amp = data.max()
sigma_x, sigma_y = psf_pix, psf_pix
params0 = amp, mean_x, mean_y, sigma_x,sigma_y
npix_x, npix_y = data.shape
x = np.linspace(0, npix_x, npix_x)
y = np.linspace(0, npix_y, npix_y)
xx, yy = np.meshgrid(x, y)
params, pcov, infoDict, errmsg, sucess = optimize.leastsq(err,
params0, args=(xx.flatten(), yy.flatten(),
data.flatten()), full_output=1)
perr = abs(np.diagonal(pcov))**0.5
model = gauss2D(xx, yy, *params)
return params, perr, model
def source_finder(data, peak, boundary, width, psf_pix):
A simple source finding tool
# first we make an estimate of the noise. Lets use the MAD mean
sigma_noise = mad_mean(data)
# Use noise estimate to set peak and boundary thresholds
peak_sigma = sigma_noise*peak
boundary_sigma = sigma_noise*boundary
# Pad the image to avoid hitting the edge of the image
pad = width*2
residual = np.pad(data, pad_width=((pad, pad), (pad, pad)), mode="constant")
model = np.zeros(residual.shape)
# Create slice to remove the padding later on
imslice = [slice(pad, -pad), slice(pad,-pad)]
catalog = []
# We will need to convert the fitted sigma values to a width
FWHM = 2*np.sqrt(2*np.log(2))
while True:
# Check if the brightest pixel is at least as bright as the sigma_peak
# Otherwise stop.
max_pix = residual.max()
if max_pix<peak_sigma:
break
xpix, ypix = np.where(residual==max_pix)
xpix = xpix[0] # Get first element
ypix = ypix[0] # Get first element
# Make slice that selects box of size width centred around bright brightest pixel
subim_slice = [ slice(xpix-width/2, xpix+width/2),
slice(ypix-width/2, ypix+width/2) ]
# apply slice to get subimage
subimage = residual[subim_slice]
# blank out pixels below the boundary threshold
mask = subimage > boundary_sigma
# Fit gaussian to submimage
params, perr, _model = fit_gaussian(subimage*mask, psf_pix)
amp, mean_x, mean_y, sigma_x,sigma_y = params
amp_err, mean_x_err, mean_y_err, sigma_x_err, sigma_y_err = perr
# Remember to reposition the source in original image
pos_x = xpix + (width/2 - mean_x) - pad
pos_y = ypix + (width/2 - mean_y) - pad
# Convert sigma values to FWHM lengths
size_x = FWHM*sigma_x
size_y = FWHM*sigma_y
# Add modelled source to model image
model[subim_slice] = _model
# create new source
source = (
amp,
pos_x,
pos_y,
size_x,
size_y
)
# add source to catalogue
catalog.append(source)
# update residual image
residual[subim_slice] -= _model
return catalog, model[imslice], residual[imslice], sigma_noise
test_image = "../data/fits/star_model_image.fits"
with astropy.io.fits.open(test_image) as hdu:
data = hdu[0].data[0,0,...]
catalog, model, residual, sigma_noise = source_finder(data, 5, 2, 50, 10)
print "Peak_Flux Pix_x Pix_y Size_x Size_y"
for source in catalog:
print " %.4f %.1f %.1f %.2f %.2f"%source
fig, (img, mod, res) = plt.subplots(1, 3, figsize=(24,12))
vmin, vmax = sigma_noise, data.max()
im = img.imshow(data, vmin=vmin, vmax=vmax)
img.set_title("Data")
mod.imshow(model, vmin=vmin, vmax=vmax)
mod.set_title("Model")
res.imshow(residual, vmin=vmin, vmax=vmax)
res.set_title("Residual")
cbar_ax = fig.add_axes([0.92, 0.25, 0.02, 0.5])
fig.colorbar(im, cax=cbar_ax, format="%.2g")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 6.5 Source Finding
Step2: Now, in reality the noise has to measured in the presence of astrophysical emission. Futhermore, radio iamges are also contaminated by various instrumental effects which can manifest as spurious emission in the image domain. All these factors make it difficult to charercterize the noise in a synthesized image. Since the noise generally dominates the images, the mean and standard deviation of the entire image are still fairly good approximations of the noise. Let us now insert a few sources (image and flux distribution shown below) in the noise image from earlier and then try to estimate noise.
Step3: The pixel statistics of the image are no longer Gaussian as apparent from the long trail of the flux distribution. Constructing a Gaussian model from the mean and standard deviation results in a poor fit (blue line in the figure on the right). A better method to estimate the variance is to measure the dispersion of the data points about the mean (or median), this is the mean/median absolute deviation (MAD) technique. We will refer to the to median absolute deviation as the MAD Median, and the mean absolute deviation as the MAD Mean. A synthesis imaging specfic method to estimate the varience of the noise is to only consider the negative pixels. This works under the assumption that all the astrophysical emission (at least in Stokes I) has a positive flux density. The Figure below shows noise estimates from methods mentioned above.
Step8: The MAD and negtive value standard deviation methods produce a better solution to the noise distribution in the presence of sources.
Step9: Using this source finder we can produce a sky model which contains all 17 sources in our test image from earlier in the section.
|
11,705
|
<ASSISTANT_TASK:>
Python Code:
import csv
from datetime import datetime
from IPython.display import display, Markdown, Latex, HTML
import json
import math
import pandas as pd
from pathlib import Path
site: str
arm: str
def get_special_columns(file_path):
f = open(file_path, "r")
data = json.load(f)
return data[0]
new_column_names = get_special_columns("/fs/ncanda-share/beta/chris/ncanda-data-integration/scripts/dashboards/reference/redcap_col_names.json")
def create_dictionary(sites, arms):
# Create blank dictionary
subjects = {}
form_statuses = {}
completeness_statuses = {}
visit_dates = {}
missingness_statuses = {}
# Parse through all sites, arms, and forms
for site in sites:
p = Path('/fs/ncanda-share/log/make_all_inventories/inventory_by_site/' + str(site))
for arm_year in p.iterdir():
if (arm_year.is_dir() and arm_year.stem in arms):
for form in arm_year.iterdir():
dates_df = pd.read_csv("/fs/ncanda-share/log/status_reports/inventory_dates/" + arm_year.stem + ".csv")
if (form.is_dir() == False):
df = pd.read_csv(form)
# Create blank dictionary if not already a part, or simply set the count
for index, row in df.iterrows():
missing_status: Float # For use in setting the missing status
# Define a new row in each dictionary if not already been parsed
if row['study_id'] not in subjects:
date_row = (dates_df.loc[dates_df['study_id'] == row['study_id']])
missingness_statuses[row['study_id']] = {
'year': arm_year.stem
}
form_statuses[row['study_id']] = {
'year': arm_year.stem
}
subjects[row['study_id']] = {
'year': arm_year.stem,
'date': (date_row['visit_date'].values[0])
}
completeness_statuses[row['study_id']] = {
'year': arm_year.stem
}
# Update all individual fields
subjects[row['study_id']][new_column_names[form.stem]] = int(row['non_nan_count'])
form_statuses[row['study_id']][new_column_names[form.stem]] = row['status']
completeness_statuses[row['study_id']][new_column_names[form.stem]] = row['complete']
# Try to get missing status
try:
if (type(row['missing']) == str):
missing_status = 0
try:
missing_status = int(row['missing'])
except ValueError:
missing_status = 0
except KeyError:
missing_status = 0
# Resolve nan errors
if (missing_status != missing_status):
missing_status = 0
missingness_statuses[row['study_id']][new_column_names[form.stem]] = missing_status
return subjects, form_statuses, completeness_statuses, visit_dates, missingness_statuses
success_color = "#39b33f"
light_success_color = "#29f705"
neutral_color = "#b8b8b8"
failure_color = "#e03647"
def determine_cell_style(v, completeness, missingness, form_name):
style_str: str = "border-style: solid; border-width: thin;" # Initial style string
# All cases regarding status
if ('PRESENT' in v): # If Present, Color Green
style_str += " background-color: " + success_color + ";"
elif ('EMPTY' in v): # If Empty, mark Gray
style_str += " background-color: " + neutral_color + ";"
elif ('MISSING' in v and 0 in v): # If Missing with no content, mark light green
style_str += " background-color: " + light_success_color + ";"
elif ('MISSING' in v and 0 not in v ): # If Missing with content, mark Red
style_str += " background-color: " + failure_color + ";"
elif ('EXCLUDED' in v and 0 not in v and (1 in v or 2 in v) and form_name == "visit_date"): # If visit_date is excluded with 1, light green
style_str += " background-color: " + light_success_color + ";"
elif ('EXCLUDED' in v and 0 not in v): # If Excluded and non-zero, mark Red
style_str += " background-color: " + failure_color + ";"
elif ('EXCLUDED' in v and 0 in v): # If Excluded and zero, mark light Green
style_str += " background-color: " + light_success_color + ";"
return style_str
def style_row(x, form_statuses, completeness_statuses, missingness_statuses):
# Create array of style as well as dataframe for data
style = [None] * x.size
sample_df = pd.DataFrame(data=x)
# Initialize beginning to be clear background
style[0] = 'background-color: #ffffff;'
# Loop through and change each style
for i in range(1, x.size):
# Grab value and form name
value = sample_df.iloc[i][0]
form_name = sample_df.iloc[i].name
# Get status, completeness, and missingness
form_status = form_statuses[x.name][form_name]
completeness = completeness_statuses[x.name][form_name]
missingness = missingness_statuses[x.name][form_name]
style[i] = determine_cell_style((value, form_status), completeness, missingness, form_name)
return style
def display_table(subjects, form_statuses, completeness_statuses, missingness_statuses):
# Convert to Pandas Dataframe and sort by date
final_df = pd.DataFrame(data=subjects).T
final_df['date'] = pd.to_datetime(final_df['date'], format='%Y-%m %d', errors='ignore')
final_df = final_df.sort_values(by="date", ascending=False)
# Sort columns
first_columns = ['year', 'date', 'visit_date']
column_order = first_columns + [x for x in sorted(final_df.columns.tolist()) if x not in first_columns]
final_df = final_df[column_order]
final_df = final_df.rename(columns=new_column_names)
final_df = final_df.drop(['date'], axis=1)
final_df = final_df.applymap(lambda x: x[:2] if "_visit_arm_1" in str(x) else x)
# Add Pandas.style
s3 = final_df.style.apply(lambda x: style_row(x, form_statuses, completeness_statuses, missingness_statuses), axis=1).set_table_styles(
[dict(selector="th",props=[('max-width', '50px')]),
dict(selector="th.col_heading",props=[("writing-mode", "vertical-lr")]),
dict(selector="td", props=[('margin', '3px')])])
# Display Dashboard Heading
display(Markdown('## Sample Dashboard'))
display(Markdown('For ' + site.capitalize() + ' and arm ' + arm))
# Display Cell Coloring Key
display(Markdown('### Cell Coloring Key'))
display(HTML('<div style="background-color: ' + success_color + '; float: left;">"PRESENT"</div>'))
display(HTML('<div style="background-color: ' + neutral_color + '; float: left;">"EMPTY"</div>'))
display(HTML('<div style="background-color: ' + failure_color + '; float: left;">"MISSING"</div>'))
display(HTML('<div style="background-color: ' + light_success_color + '; float: left;">"MISSING WITH NO CONTENT"</div>'))
display(HTML('<div style="background-color: ' + failure_color + '; float: left;">"EXCLUDED WITH CONTENT"</div>'))
display(HTML('<div style="background-color: ' + light_success_color + '; float: left;">"EXCLUDED WITH NO CONTENT"</div>'))
display(HTML('<div style="background-color: ' + light_success_color + '; float: left;">"EXCLUDED FOR VISIT_DATE WITH 1 OR 2 VALUES"</div>'))
# Display
display(Markdown('### Table'))
display(s3)
def main():
sites = [site]
arms = [arm]
subjects, form_statuses, completeness_statuses, visit_dates, missingness_statuses = create_dictionary(sites, arms)
display_table(subjects, form_statuses, completeness_statuses, missingness_statuses)
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parametrization
Step2: Create Special Columns
Step3: Create Data Dictionary
Step4: Functions to Style Table
Step5: Display Table
Step6: Main Function
Step7: Run main
|
11,706
|
<ASSISTANT_TASK:>
Python Code:
# BE SURE TO RUN THIS CELL BEFORE ANY OF THE OTHER CELLS
import psycopg2
import pandas as pd
# query database
statement =
SELECT DISTINCT text, COUNT(*)
FROM
(SELECT text
FROM twitter.hashtag
LIMIT 10000) AS hashtag_text
GROUP BY text
ORDER BY count DESC
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
# query database
statement =
SELECT DISTINCT lower(text), COUNT(*)
FROM
(SELECT h.text
FROM twitter.hashtag h, twitter.tweet t, twitter.job j
WHERE h.tweet_id = t.tweet_id_str AND t.job_id = j.job_id AND j.description LIKE 'Provo%'
LIMIT 10000) AS hashtag_text
GROUP BY lower(text)
ORDER BY count DESC
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
statement = '''
SELECT lower(word), COUNT(DISTINCT rn) AS num_rows
FROM(
SELECT UNNEST(STRING_TO_ARRAY(text, ' ')) AS word,
ROW_NUMBER() OVER(ORDER BY text) AS rn
FROM
(SELECT text FROM
twitter.tweet LIMIT 10000) y
) x
GROUP BY lower(word)
ORDER BY num_rows DESC'''
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
statement = '''
SELECT UNNEST(STRING_TO_ARRAY(text, ' ')) AS word,
ROW_NUMBER() OVER(ORDER BY text) AS rn
FROM
(SELECT text FROM
twitter.tweet LIMIT 1000) y
'''
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
import nltk
from nltk.corpus import stopwords
stops = stopwords.words('english')
print(stops)
statement = '''
SELECT lower(word), COUNT(DISTINCT rn) AS num_rows
FROM
(SELECT * FROM
(SELECT UNNEST(STRING_TO_ARRAY(text, ' ')) AS word,
ROW_NUMBER() OVER(ORDER BY text) AS rn
FROM
(SELECT text FROM
twitter.tweet LIMIT 1000) y ) x) z
WHERE lower(word) NOT IN ('rt',{})
GROUP BY lower(word)
ORDER BY num_rows DESC;
'''.format(', '.join(map("'{}'".format, stops)))
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
statement = '''
SELECT lower(word), COUNT(DISTINCT rn) AS num_rows
FROM
(SELECT * FROM
(SELECT UNNEST(STRING_TO_ARRAY(text, ' ')) AS word,
ROW_NUMBER() OVER(ORDER BY text) AS rn
FROM
(SELECT t.text FROM
twitter.tweet t, twitter.hashtag h
WHERE t.tweet_id_str = h.tweet_id AND lower(h.text) = 'tornado'
LIMIT 1000) y ) x) z
WHERE lower(word) NOT IN ('rt',{})
GROUP BY lower(word)
ORDER BY num_rows DESC;
'''.format(', '.join(map("'{}'".format, stops)))
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job = {}
for i in list(range(len(column_names))):
job['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(job)
# Your code here
# --------------
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twitter
Step3: Now, lets find the most popular hashtags for the city of Provo, Utah!
Step4: Notice that we used lower(text) in our group by. Again, hashtags can be typed out different ways but they will all be the same hashtag. For example, #GameOfThrones is the same as #gameofthrones.
Step5: There are a couple of things going on here so we will break it down piece by piece and then put the components back together again.
Step6: The problem with just counting words is that some words don't hold a lot of meaning (or they aren't that interesting when doing analysis). These are words like 'I', 'your', 'is', etc... These are known as stop words. We can actually load in a list of stop words from the Natural Language Toolkit (nltk) library. Now, this list is tokenized and we haven't tokenized the text of our tweets, so some probably won't apply, but it will allow us to remove a lot of these stop words from the scope of this analysis.
Step7: We are going to integrate this list into our statement. If the word is in this list, we don't want it returned.
Step8: So this isn't perfect in terms of cleaning up our results, but it did remove quite a bit of unwanted information. It is important to keep in mind that Twitter is very messy, more so than other natural language documents. That is because it is informal and users are constrained to write tweets within 140 characters. This lends itself to some creative spelling.
Step9: We chose to see what words most commonly occur with "#tornado" and the results make sense.
|
11,707
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize']=15,10
df = pd.read_csv('data/data.csv')
df.head()
df.shape
df_model = pd.DataFrame(df.model.unique(),columns=['model'])
df_model.head()
df_model.count()[0]
print "Total number of distinct models : "+ str(df_model.count()[0])
# Exerice 1: Find the distinct number of serial numbers
# Exercise 2: Find the distinct number of capacity bytes
df_model_serial = pd.DataFrame(df.groupby(['model']).serial.nunique())
df_model_serial.head()
df_model_serial = df_model_serial.reset_index()
df_model_serial.head()
df_model_serial.columns = ['model','total_HD']
df_model_serial.head(39)
df_model_serial.plot(kind="barh",x="model",y="total_HD")
df_model_serial.sort_values(by='total_HD',inplace=True)
df_model_serial.plot(kind="barh",x="model",y="total_HD")
#Exercise 3: Find the count of different capacity bytes for a model and plot with and without sorting
df_fail = pd.DataFrame(df.groupby('model').failure.sum())
df_fail.head()
df_fail = df_fail.reset_index()
df_fail.head()
df_fail.plot(kind="barh",x="model",y="failure",figsize=(18,10))
# Exercise 4 : sort the above data frame and plot it
merged_df = df_model_serial.merge(df_fail,how='inner',on='model')
merged_df.head()
merged_df['success'] = merged_df.total_HD - merged_df.failure
merged_df.head()
merged_df.plot(kind="bar",x="model",y=["failure","success"],subplots=True)
merged_df['ratio_failure'] = merged_df.failure / merged_df.total_HD
merged_df.head(25)
merged_df.sort_values(by="ratio_failure",ascending=False,inplace=True)
merged_df.head()
merged_df.plot(kind="bar",x="model",y="ratio_failure")
#Exercise: Find ratio of success and plot it
#Exercise : Plot multiple bar charts comparing ratio of success and failure
df_capacity = pd.DataFrame(df.capacity.unique(),columns=['capacity'])
df_capacity.head()
df_capacity.shape
#Exercise : For a given capacity bytes, find the total number of failures and plot it
df_fail_mod_cap = pd.DataFrame(df.groupby(['model','capacity']).failure.sum())
df_fail_mod_cap.head()
df_fail_mod_cap = df_fail_mod_cap.reset_index()
df_fail_mod_cap.head(25)
df_fail_mod_cap.plot(x="capacity",y="failure",kind="bar",figsize=(20,5))
df_fail_mod_cap.head()
df_fail_mod_cap_pivot = df_fail_mod_cap.pivot("model","capacity","failure")
df_fail_mod_cap_pivot.head()
df_fail_mod_cap.fillna(0,inplace=True)
df_fail_mod_cap.head()
sns.heatmap(df_fail_mod_cap_pivot)
#Exercise : Find count of success for a model with different capacities and plot it
df_days = pd.DataFrame(df.groupby(['capacity','serial']).date.count())
df_days = df_days.reset_index()
df_days.head()
df_days.columns = ['capacity','serial','total_days']
df_days.head()
df_days.capacity.value_counts()
df_days.shape
df_days_pivot = df_days.pivot('capacity','serial','total_days')
df_days_pivot.head()
df_days_pivot.fillna(0,inplace=True)
df_days_pivot.head()
# Exercise : Visualize the above dataframe
df_fail_days = pd.DataFrame(df[['capacity','serial','failure']].loc[df['failure'] == 1 ])
df_fail_days.head()
df_fail_count = df_days.merge(df_fail_days,how="left",on=['capacity','serial'])
df_fail_count.head()
df_fail_count.fillna(0,inplace=True)
df_fail_count.head()
df_fail_count.dtypes
g = sns.FacetGrid(df_fail_count, col="failure",hue='failure',size=5,aspect=1.5)
g.map_dataframe(plt.scatter,x='capacity',y='total_days')
df_fail_count_avg = pd.DataFrame(df_fail_count.groupby(['capacity','failure']).total_days.mean())
df_fail_count_avg.head()
df_fail_count_avg = df_fail_count_avg.reset_index()
df_fail_count_avg.head()
df_fail_count_avg_pivot = df_fail_count_avg.pivot('capacity','failure','total_days')
df_fail_count_avg_pivot.head()
df_fail_count_avg_pivot.plot(kind="bar")
df_hours = df[['serial','capacity','failure','smart_9']]
df_hours.head()
df_hours.shape
df_hours_max = pd.DataFrame(df_hours.groupby(['serial','capacity']).smart_9.max())
df_hours_max.head()
df_hours_max.shape
df_hours_max = df_hours_max.reset_index()
df_hours_max_merge = df_hours_max.merge(df_hours,on=['serial','capacity','smart_9'],how='inner')
df_hours_max_merge.head()
df_hours_max_merge_pivot = pd.pivot_table(df_hours_max_merge,index='capacity',columns='failure',values='smart_9'
,aggfunc='mean')
df_hours_max_merge_pivot.head()
df_hours_max_merge_pivot.plot(kind='bar')
df_model_capacity_hours = df[['model','capacity','failure','smart_9']]
df_model_capacity_hours.head()
df_model_capacity_hours.capacity = df_model_capacity_hours.capacity / 1024 ** 3
df_model_capacity_hours.head()
df_model_capacity_hours.capacity = df_model_capacity_hours.capacity.astype(np.int64)
df_model_capacity_hours.head()
df_model_capacity_hours_pivot = pd.pivot_table(data=df_model_capacity_hours,index='model',columns=['failure','capacity'],
values='smart_9',aggfunc='mean')
df_model_capacity_hours_pivot.head()
df_model_capacity_hours_pivot.fillna(0,inplace=True)
df_model_capacity_hours_pivot.head()
df_model_capacity_hours_pivot.plot(kind="barh")
sns.heatmap(df_model_capacity_hours_pivot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us take a sneak peek at the data
Step2: What is the size of the dataset?
Step3: Now we see that there are different models of hard disks, let us list them
Step4: let us see how many models are there in total
Step5: <img style="float
Step6: Sort and plot
Step7: Question 2. Find failures for a hard disk models
Step8: Question 3. How do you compute failure rate for a model
Step9: let us see in total hard disks for a model, how many failed and how many did not
Step10: now let us compute the ratio of failure number/total_hard_disk of hard disk
Step11: The higher the ratio value is , the model is prone to failure
Step12: Now we know which models fail the most, let us introduce a new feature in our analysis, capacity.
Step13: Question 4. Given a model and capacity bytes, what does failure count look like
Step14: Looking at this chart can you tell what is not being represented right?
Step15: we see that for some models and their respective capacitys we do not have a fail count, lets fill it with 0
Step16: This heat map gives us a better understanding of model, capacity vs failure
Step17: The above charts give us an explanation of which models failed the most, which models had the most number of hard disks running , the ratio of hard disk
Step18: Question 6. Find the average running time for failed hard disks and average running time for hard disks that have not failed
Step19: <img style="float
Step20: Now what can we do with this data? Is this useful? What can I generate from the above data that gives
Step21: Question 7. How about using hours (SMART_9) column now and co-relate it with failure
Step22: Now we want to know upto when for a given hard disk and capacity , how long the hard disk ran
Step23: Question 8. Given the data , identify the model and capacity of the hard disk to buy based on how long it runs
Step24: Let us convert bytes to gigabytes and round it to the nearest number
Step25: The above visualization is confusing as the bars reflect combination of failure and hours count
|
11,708
|
<ASSISTANT_TASK:>
Python Code:
This code file creates homework assignment #2
Gary Gregg
DATA 512A
University of Washington
Autumn 2017
import numpy as np
import csv
import matplotlib.pyplot as plt
plt.rcdefaults()
import os.path
import requests
# Country Map
COUNTRY_MAP = {
"East Timorese" : "Timor-Leste",
"Hondura" : "Honduras",
"Rhodesian" : "Zimbabwe",
"Salvadoran" : "El Salvador",
"Samoan" : "Samoa",
"São Tomé and Príncipe" : "Sao Tome and Principe",
"Somaliland" : "Somalia",
"South African Republic" : "South Africa",
"South Korean" : "Korea, South"
}
# Augmented Page Data Fields
AUG_COUNTRY = 0
AUG_PAGE = 1
AUG_REVISION_ID = 2
AUG_QUALITY = 3
AUG_POPULATION = 4
# Count Fields
CNT_COUNTRY = 0
CNT_POPULATION = 1
CNT_ARTICLES = 2
CNT_ARTICLE_PCT = 3
CNT_HQ_ARTICLES = 4
CNT_HQ_ARTICLE_PCT = 5
# Page Data Fields
PDT_COUNTRY = 0
PDT_PAGE = 1
PDT_REVISION_ID = 2
# Population Fields
POP_COUNTRY = 0
POP_CNT_TYPE = 1
POP_TIMEFRAME = 2
POP_DATATYPE = 3
POP_DATA = 4
POP_FOOTNOTES = 5
# Miscellaneous Constants
AUGMENTED_PAGE_DATA_PATH = 'data-512-a2.csv'
DEFAULT_ROW_COUNT = 2
MODEL = 'wp10'
PAGE_DATA_PATH = 'page_data.csv'
POPULATION_DATA_PATH = 'Population Mid-2015.csv'
PER_CALL = 140
PROJECT = 'enwiki'
def augment_page_data(page_data, quality_dictionary, population_dictionary):
Augments page data with article quality and the population of the country
in which the subject resides.
@param page_data: The original page data
@type page_data: list
@param quality_dictionary: An article quality dictionary, indexed by
revision ID
@type quality_dictionary: dict
@param population_dictionary: A population dictionary, indexed by country
@type population_dictionary: dict
@return: Page data augmented with article quality and the population of the
country in which the subject resides
@rtype: list
# Declare and initialize a dictionary of missing countries, and a list to
# received the augmented page data.
missing = {}
new_page_data = [['country',
'article_name',
'revision_id',
'article_quality',
'population']]
# Cycle for each row in the page data.
for index in range(1, len(page_data)):
# Get the indexed row. Get the article revision and country name
# for the first/next row.
row = page_data[index]
article_revision = row[PDT_REVISION_ID]
country_name = get_country(row[PDT_COUNTRY])
# Write a message if the article revision is not in the quality
# dictionary. This really should not happen.
if article_revision not in quality_dictionary:
print('Missing quality entry for revision ID \'%s\'.' %
article_revision)
# The article revision is in the quality dictionary.
else:
# Initialize, or increment the count of articles for the
# given country name if the country name is not in the
# population dictionary.
if country_name not in population_dictionary:
if country_name not in missing:
missing[country_name] = 1
else:
missing[country_name] += 1
# The country is in the population dictionary. Create
# an augmented page data row.
else:
new_page_data.append([country_name,
row[PDT_PAGE],
article_revision,
quality_dictionary[article_revision],
population_dictionary[country_name]])
# Describe the counts of articles for 'countries' that were missing
# a population in the population dictionary. Return the augmented page
# data.
print('The following is the counts of articles about persons in countries '
'that are missing a registered population: %s' % missing)
return new_page_data
def build_country_to_population(country_data):
Builds a dictionary of countries to their populations.
@param country_data: A list of countries with name as the first field,
and population as the fifth field
@type country_data: list
@return: A dictionary of countries to their population
@rtype: dict
# Declare and initialize the population dictionary, and cycle for each
# country in the list.
population_dictionary = {}
for index in range(3, len(country_data) - 1):
# Add a new dictionary for the first/next country.
population_dictionary[country_data[index][POP_COUNTRY]] =\
int(country_data[index][POP_DATA].replace(',', ''))
# Return the population dictionary.
return population_dictionary
def calculate_percentages(counts):
Calculates the percentage of articles per population, and the percentage of
high-quality articles for a country dictionary, list or tuple.
@param counts: A country dictionary, list or tuple
@type counts: dict, list or tuple
@return: None
@rtype: None
# Declare and initialize a percent multiplier. Cycle for each country.
percent = 100.
for country in counts:
# Get the value list for the first/next country. Get the article count
# and population from the list.
value = counts[country]
article_count = value[CNT_ARTICLES]
population = value[CNT_POPULATION]
# Calculate the percentage of articles per population if the population
# is greater than zero.
if population > 0:
value[CNT_ARTICLE_PCT] = article_count / population * percent
# Calculate the percentage of high-quality articles if there are one or
# more articles.
if article_count > 0:
value[CNT_HQ_ARTICLE_PCT] = value[CNT_HQ_ARTICLES] / article_count * percent
# Done, so return.
return
def construct_display_values(value_list, horizontal, vertical):
Constructs two lists of display values, one for the horizontal axis, and
one for the vertical axis.
@param value_list: A list containing the display values
@type value_list: list or tuple
@param horizontal: The index of the horizontal display values
@type horizontal: int
@param vertical: The index of the vertical display values
@type vertical: int
@return: Two lists
@rtype: list
# Declare and initialize the lists to be returned. Cycle for the number of
# items to be displayed.
horizontal_list = []
vertical_list = []
for i in range(0, 10):
# Add the values for the first/next item in the list.
horizontal_list.append(value_list[i][horizontal])
vertical_list.append(value_list[i][vertical])
return horizontal_list, vertical_list
def create_assignment_2():
Creates homework assignment #2. No input parameters or return value.
Everything this function does is a side-effect.
@return: None
@rtype: None
# Create the country list.
country_list = list(create_country_dictionary().values())
# Sort the country list by descending article/population percentage, and
# graph it.
sort_and_display(country_list, get_article_percentage, CNT_ARTICLE_PCT,
True,
'Highest-Ranked Countries in Terms of Number of Politician '
'Articles as a Proportion of Country Population')
# Sort the country list by ascending article/population percentage, and
# graph it.
sort_and_display(country_list, get_article_percentage, CNT_ARTICLE_PCT,
False,
'Lowest-Ranked Countries in Terms of Number of Politician '
'Articles as a Proportion of Country Population')
# Sort the country list by descending high-quality/all-article percentage,
# and graph it.
sort_and_display(country_list, get_hq_article_percentage, CNT_HQ_ARTICLE_PCT,
True,
'Highest-Ranked Countries in Terms of Number of GA and '
'FA-Quality Articles as a Proportion of all Articles '
'About Politicians from that Country')
# Sort the country list by ascending high-quality/all-article percentage,
# and graph it.
sort_and_display(country_list, get_hq_article_percentage, CNT_HQ_ARTICLE_PCT,
False,
'Lowest-Ranked Countries in Terms of Number of GA and '
'FA-Quality Articles as a Proportion of all Articles '
'About Politicians from that Country')
def create_augmented_page_data():
Creates the augmented page data file.
@return: None
@rtype: None
# Read the page data from CSV. Create the page quality map, and the
# country-to-population map. Using all of these, create the augmented
# page data and write it to CSV.
page_data = read_from_csv(PAGE_DATA_PATH)
write_to_csv(AUGMENTED_PAGE_DATA_PATH,
augment_page_data(page_data,
get_quality_all(page_data, 101),
build_country_to_population(
read_from_csv(POPULATION_DATA_PATH))))
def create_country_dictionary():
Creates a dictionary of countries, and statistics about them.
Precondition: The augmented page data file exists, and is formatted
correctly.
@return: A dictionary of countries, and statistics about them
@rtype: dict
# Here is the current list of fields for values in the dictionary:
#
# CNT_COUNTRY
# CNT_POPULATION
# CNT_ARTICLES
# CNT_ARTICLE_PCT
# CNT_HQ_ARTICLES
# CNT_HQ_ARTICLE_PCT
# Initialize an empty country dictionary. Read rows from the augmented
# page data file.
country_dictionary = {}
augmented_page_data = read_augmented_csv()
# Delete the header row from the augmented page data. Cycle for each
# remaining row in the file.
del augmented_page_data[0]
for data_row in augmented_page_data:
# Extract the country name from the row. Is there an existing entry
# in the country dictionary. Get it if so.
country = data_row[AUG_COUNTRY]
if country in country_dictionary:
country_row = country_dictionary[country]
# There is no existing entry in the country dictionary. Create one
# with initial values.
else:
country_row = [country, int(data_row[AUG_POPULATION]),
0, 0., 0, 0.]
# Increment the count of articles for the given country.
country_row[CNT_ARTICLES] += 1
# Get the quality from the data row. Increment the count of high-
# quality articles if the article has a high-quality rating.
quality = data_row[AUG_QUALITY]
if quality == 'FA' or quality == 'GA':
country_row[CNT_HQ_ARTICLES] += 1
# Return, or add the country value to the country dictionary indexed
# by country.
country_dictionary[country] = country_row
# Calculate the percentage of articles per population, and the percentage
# of high-quality articles.
calculate_percentages(country_dictionary)
return country_dictionary
def display_barchart(horizontal_values, vertical_values, title):
Displays a barchart of country to percentage.
@param horizontal_values: Country names to be displayed along the
horizontal axis
@type horizontal_values: str
@param vertical_values: Percentages to be displayed along the vertical axis
@type vertical_values: float
@param title: Title for the graph
@type title: str
@return: None
@rtype: None
# Set the figure size. Declare and initialize an array of evenly spaced
# values. Construct the plot.
plt.figure(figsize=(20, 10))
y_position = np.arange(len(horizontal_values))
plt.bar(y_position, vertical_values, align='center', alpha=1.0,
color=['#66cdaa']) # Color is Medium Aquamarine
# Set the x-ticks, the x-label and the y-label.
plt.xticks(y_position, horizontal_values)
plt.xlabel('Country Name')
plt.ylabel('Percentage')
# Set the title, and show the graph.
plt.title(title)
plt.show()
def get_article_percentage(country):
Gets the percentage of articles to population from a list.
@param country: A country attributes entry
@type country: list
@return: Percentage of articles to population
@rtype: float
return country[CNT_ARTICLE_PCT]
def get_article_quality(article_quality, revision_ids):
Gets predicted article quality for a series of revision IDs. Returns a dictionary
indexed by revision ID. Possible values for each revision ID are:
FA - Featured article
GA - Good article
B - B-class article
C - C-class article
Start - Start-class article
Stub - Stub-class article
@param article_quality: An existing dictionary of revision IDs to
article quality
@type article_quality: dictionary
@param revision_ids: A series of revision IDs
@type revision_ids: list or tuple
@return: article_quality
@rtype: dict
# Hardcoded endpoint for the ORES API
endpoint = 'https://ores.wikimedia.org/v3/scores/{project}/?models={model}&revids={revids}'
# The parameters to be passed to the ORES API
params = {'project': PROJECT,
'model': MODEL,
'revids': '|'.join(str(x) for x in revision_ids)
}
# Call the API, and return the response as JSON.
api_call = requests.get(endpoint.format(**params))
response = api_call.json()
# Build and return a dictionary of article quality predictions
# indexed by revision ID. Return the article quality dictionary.
for key, value in response[PROJECT]['scores'].items():
article_quality[key] = value[MODEL]['score']['prediction']
return article_quality
def get_country(country):
Determines if a given country is mapped to another name.
@param country: A given country
@type country: str
@return: A mapped country name if a name exists in the country map,
the unmapped parameter otherwise
@rtype: str
# Reset the country name if a name exists in the country map, and
# return the country.
if country in COUNTRY_MAP:
country = COUNTRY_MAP[country]
return country
def get_hq_article_percentage(country):
Gets the percentage of high-quality articles from a list.
@param country: A country attributes entry
@type country: list
@return: Percentage of high-quality articles from a list
@rtype: float
return country[CNT_HQ_ARTICLE_PCT]
def get_quality_all(page_data, last_index=DEFAULT_ROW_COUNT):
Gets article quality for all revision IDs in a page data list, up
to a given maximum.
@param page_data: A page data list, formatted with revision ID as the
third element in each row
@type page_data: list or tuple
@param last_index: The last index to consider
@type last_index: int
@return: article_quality
@rtype: dict
# Use the the full length of the page data if the last index is less than
# a minimum number of rows.
if last_index <= DEFAULT_ROW_COUNT:
last_index = len(page_data)
# Declare and initialize the quality dictionary, and determine the number
# of iterative calls.
quality_dictionary = {}
calls = last_index // PER_CALL
# Declare and initialize the base index, and cycle for the given number of
# full calls required to retrieve the indicated number of rows.
base = 1
for _ in range(0, calls):
# Calculate the last index, and print a message.
count = base + PER_CALL
print('Retrieving quality rating for articles %d to %d...'
% (base, count - 1))
# Update the quality dictionary.
quality_dictionary = make_quality_call(quality_dictionary,
page_data,
base,
count)
# Update the base index.
base = count
# Is the base index less than the last index? If so, there is
# a remaining number of rows...
if base < last_index:
# Print a message.
print('Retrieving quality rating for articles %d to %d...' %
(base, last_index - 1))
# Update the quality dictionary with the remaining number of rows.
quality_dictionary = make_quality_call(quality_dictionary,
page_data,
base,
last_index)
# Describe how long the dictionary is, and return is.
print('Length of quality dictionary is %d' % len(quality_dictionary))
return quality_dictionary
def make_quality_call(existing_dictionary, page_data, start, stop):
Makes a call to get article quality for a given set of indices into a page
data list.
@param existing_dictionary: An existing dictionary of quality entries
indexed by revision ID
@type existing_dictionary: dictionary
@param page_data: A page data list, formatted with revision ID as the
third element in each row
@type page_data: list or tuple
@param start: The first index to use, inclusive
@type start: int
@param stop: The last index, exclusive
@type stop: int
@return: article_quality
@rtype: dict
# Declare and initialize an empty list of revision IDs. Cycle for each row
# in the given range. Append the first/next ID to the list.
ids = []
for row in range(start, stop):
ids.append(page_data[row][PDT_REVISION_ID])
# Get article quality for the selected revision IDs.
return get_article_quality(existing_dictionary, ids)
def read_augmented_csv():
Reads fields from the augmented page data file.
@return: The rows read from the file
@rtype: list
# Create the augmented page data file if it does not already exist.
if not os.path.isfile(AUGMENTED_PAGE_DATA_PATH):
create_augmented_page_data()
# Read the file, and return the rows.
return read_from_csv(AUGMENTED_PAGE_DATA_PATH)
def read_from_csv(file_name):
Reads fields from a CSV file.
@param file_name: A file path.
@type file_name: str
@return: The rows read from the file
@rtype: list
# Declare and initialize a empty row list. Open a CSV reader using the
# given file name.
row_list = []
with (open(file_name)) as csvfile:
reader = csv.reader(csvfile)
# Append the row for each row read by the reader.
for row in reader:
row_list.append(row)
# Return the row list.
return row_list
def sort_and_display(value_list, sorter, percentage_index, descending, title):
Sorts a values list, and displays a barchart.
@param value_list: A list of values
@type value_list: list
@param sorter: The key function to use for sort
@type sorter: function
@param percentage_index: The index of the desired percentage in the value
list
@type percentage_index: int
@param descending: True to sort largest to smallest, false otherwise
@type descending: bool
@param title: The title of the resulting graph
@type title: str
@return: None
@rtype: None
# Sort the value list. Extract values for the horizontal and vertical
# axes.
value_list = sorted(value_list, key=sorter, reverse=descending)
horizontal, vertical = construct_display_values(value_list,
CNT_COUNTRY,
percentage_index)
# Display a barchart with the extracted values and the given title.
display_barchart(horizontal, vertical, title)
def write_to_csv(file_name, row_list):
Writes fields to a CSV file.
@param file_name: A file path.
@type file_name: str
@param row_list: The rows to write to the file
@type row_list: list
# Open a CSV writer using the given file name. Write the given rows.
with(open(file_name, 'w')) as csvfile:
writer = csv.writer(csvfile)
writer.writerows(row_list)
# Create the products for homework assignment #2.
create_assignment_2()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declare all required import packages.
Step2: Although they refer to the same country, some of the country names in the article data file do not match those in the population data file. Create a map from one to the other.
Step3: Declare below field offsets for each kind of record list we use. The offsets for the 'augmented' data file will be the same as those for the article (or page) data file, except that they append article quality and country population. The count record will contain synthesized data that needs to be graphed including the two required percentage values. The article (or page) data file is one of our input files, as is the population data file.
Step4: Declare and initialize global constants.
Step6: The augment_page_data function augments rows from the article data file with a quality rating and a country population. The quality rating is supplied by a quality dictionary indexed by revision ID, and the population dictionary is indexed by country name.
Step8: The build_country_to_population function builds a dictionary of country names to the corresponding country's population.
Step10: The calculate_percentages function calculates the article count to population percentage, and the good article count to article count percentage for each country.
Step12: The construct_display_values function constructs two lists, one for the top (or bottom) ten values to be displayed along the horizontal axis (which will be country names), and the top (or bottom) ten values to be displayed along the vertical axis (which will be percentages). The function receives the indices of the display fields as its last two arguments.
Step14: The function create_assignment_2 is the starting point for this show. It creates the augmented article (or page) data file, calculates the percentages to be displayed, then creates the four bar graphs required by the assignment.
Step16: The function create_augmented_page_data creates the augmented page data file from the article data file and the population data file.
Step18: The function create_country_dictionary creates a dictionary of statistics about a country, indexed by the country name.
Step20: The function display_barchart displays a barchart given the horizontal axis values, the vertical axis values, and a graph title.
Step22: The function get_article_percentage gets the article percentage field from a row in a country statistics list. This method is used by 'sorted' to sort the country statistics list by the article percentage field.
Step24: The function get_article_quality uses the Wikimedia ORES API to retrieve article quality for a series of articles given by revision ID.
Step26: The function get_country uses the COUNTRY_MAP to return possibly alternate country names used in the population data file. If the names used are different, it returns the different name. Otherwise it returns the name used in the article data file.
Step28: The function get_hq_article_percentage gets the high-quality article percentage field from a row in a country statistics list. This method is used by 'sorted' to sort the country statistics list by the high-quality article percentage field.
Step30: I discovered that the Wikimedia ORES API will not successfully return quality statistics for more than about 140 articles at a time. The get_quality_all function will get quality statistics for all rows in the article data file by calling the ORES API as many times as it needs to, asking for no more than 140 articles at a time.
Step32: The function make_quality_call assists in batch calling of the ORES API by creating a list of only the desired 140 revision IDs. It then calls ORES with these revision IDs, and adds the results to an existing article quality dictionary, which is slowly built up until the quality ratings have been retrieved all the articles.
Step34: The read_augmented_csv function reads the augmented data file, which is the article data file with quality ratings and country population counts appended to each row. Note that the function will use an existing augmented data file if its exists, or create a new one if it does not.
Step36: The function read_from_csv reads rows from a CSV file.
Step38: The function sort_and_display sorts a country statistics list using a supplied sort function, then display the top (or bottom) rows of the list with a percentage indicated by the percentage_index argument. The sort and occur either ascending or descending, and the resulting display has the indicated title.
Step40: The function write_to_csv writes a row list in CSV format to a file with the indicated name. In particular, this function is used to create the augmented data file.
Step41: Here is where the whole thing starts
|
11,709
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from pathlib import Path
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
takeo = takeo.as_greyscale(mode='luminosity')
# Use a bounding box rather than the facial shape
takeo.landmarks['bounding_box'] = takeo.landmarks['PTS'].lms.bounding_box()
takeo.view();
template = takeo.crop_to_landmarks(group='bounding_box')
template.view();
from menpofit.lk import LucasKanadeFitter
fitter = LucasKanadeFitter(template, group='bounding_box')
from menpofit.fitter import noisy_shape_from_bounding_box
gt_bb = takeo.landmarks['bounding_box'].lms
# generate perturbed bounding box
init_bb = noisy_shape_from_bounding_box(fitter.reference_shape, gt_bb)
# fit image
fr = fitter.fit_from_bb(takeo, init_bb, gt_shape=gt_bb)
# print fitting error
print(fr)
fr.view_widget()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lucas-Kanade methods align a given template onto a provided image. Therefore, we must create a template that we will seek within a given input image. For example, a sensible template for the Takeo image might be the facial region.
Step2: 2. Build a Lucas Kanade fitter
Step3: 3. Fit using the LK fitter
|
11,710
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from fbprophet import Prophet
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize']=(20,10)
plt.style.use('ggplot')
sales_df = pd.read_csv('../examples/retail_sales.csv', index_col='date', parse_dates=True)
sales_df.head()
df = sales_df.reset_index()
df.head()
df=df.rename(columns={'date':'ds', 'sales':'y'})
df.head()
df.set_index('ds').y.plot()
promotions = pd.DataFrame({
'holiday': 'december_promotion',
'ds': pd.to_datetime(['2009-12-01', '2010-12-01', '2011-12-01', '2012-12-01',
'2013-12-01', '2014-12-01', '2015-12-01']),
'lower_window': 0,
'upper_window': 0,
})
promotions
df['y'] = np.log(df['y'])
df.tail()
model = Prophet(holidays=promotions)
model.fit(df);
future = model.make_future_dataframe(periods=24, freq = 'm')
future.tail()
forecast = model.predict(future)
forecast.tail()
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
model.plot(forecast);
model.plot_components(forecast);
model_no_holiday = Prophet()
model_no_holiday.fit(df);
future_no_holiday = model_no_holiday.make_future_dataframe(periods=24, freq = 'm')
future_no_holiday.tail()
forecast_no_holiday = model_no_holiday.predict(future)
forecast.set_index('ds', inplace=True)
forecast_no_holiday.set_index('ds', inplace=True)
compared_df = forecast.join(forecast_no_holiday, rsuffix="_no_holiday")
compared_df= np.exp(compared_df[['yhat', 'yhat_no_holiday']])
compared_df['diff_per'] = 100*(compared_df['yhat'] - compared_df['yhat_no_holiday']) / compared_df['yhat_no_holiday']
compared_df.tail()
compared_df['diff_per'].mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in the data
Step2: Prepare for Prophet
Step3: Let's rename the columns as required by fbprophet. Additioinally, fbprophet doesn't like the index to be a datetime...it wants to see 'ds' as a non-index column, so we won't set an index differnetly than the integer index.
Step4: Now's a good time to take a look at your data. Plot the data using pandas' plot function
Step5: Reviewing the Data
Step6: To continue, we need to log-transform our data
Step7: Running Prophet
Step8: We've instantiated the model, now we need to build some future dates to forecast into.
Step9: To forecast this future data, we need to run it through Prophet's model.
Step10: The resulting forecast dataframe contains quite a bit of data, but we really only care about a few columns. First, let's look at the full dataframe
Step11: We really only want to look at yhat, yhat_lower and yhat_upper, so we can do that with
Step12: Plotting Prophet results
Step13: Personally, I'm not a fan of this visualization but I'm not going to build my own...you can see how I do that here
Step14: Comparing holidays vs no-holidays forecasts
Step15: Let's compare the two forecasts now. Note
Step16: We are only really insterested in the yhat values, so let's remove all the rest and convert the logged values back to their original scale.
Step17: Now, let's take the percentage difference and the average difference for the model with holidays vs that without.
|
11,711
|
<ASSISTANT_TASK:>
Python Code:
import os
print(os.getcwd())
os.chdir(os.getcwd() + "/Physique/") # change current working directory
print(os.getcwd())
%run -i ./Scripts/Refresh.py # this is the main, important, command to run
import Physique
import sys
sys.executable # Check which Python you are running in case you have ImportError's
print(dir(Physique))
from Physique import FundamentalPhysicalConstants as FPC
print(FPC.columns)
print(FPC)
g_0pd = FPC[FPC["Quantity"].str.contains("gravity") ]
# standard acceleration of gravity as a panda DataFrame
print(g_0pd)
g_0 = g_0pd["Value"].values[0]
print(type(g_0))
print(g_0)
# access the values you're interested in
print(g_0pd.Quantity)
print(g_0pd.Value.get_values()[0])
print(g_0pd.Unit.get_values()[0])
# you can also grab just the 1 entry from this DataFrame using the .loc module
FPC[FPC["Quantity"].str.contains("Boltzmann")].loc[49,:]
g_0pd.loc[303,:]
from Physique import Conversions
print(Conversions.columns)
Conversions[Conversions['Toconvertfrom'].str.contains("pound-force ")]
Conversions[Conversions['to'].str.contains("newton ")]
lbf2N = Conversions.loc[340,:];
print(lbf2N)
print(lbf2N.Toconvertfrom)
print(lbf2N.to)
print(lbf2N.Multiplyby)
print(470000 * lbf2N.Multiplyby, lbf2N.to)
Conversions[Conversions['Toconvertfrom'].str.match("psi")]
psi2Pa = Conversions.loc[372,:]
print(3028 * psi2Pa.Multiplyby, psi2Pa.to)
Conversions[Conversions['Toconvertfrom'].str.match("atm")]
atm2Pa = Conversions.loc[15,:]
print(3028 * psi2Pa.Multiplyby / atm2Pa.Multiplyby, atm2Pa.Toconvertfrom)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NIST Fundamental Constants
Step2: Find a Fundamental Constant you are interested in using the usual panda modules
Step3: NIST Official Conversions (to metric)
Step4: From the list of columns, search for the quantity you desired by trying out different search terms
Step5: Or we can look up the SI unit we want to convert to.
Step6: Look at what you want and see the index; it happens to be 340 in this example.
Step7: Then the attributes can accessed by the column names.
Step8: So for example, the reusable SSME delivers a vacuum thrust of 470000 lb or
Step9: To obtain the conversion for pressure in psia, which we search for with "psi"
Step10: So for a chamber pressure of 3028 psia for the SSME,
Step11: Also, get the conversion for atmospheres (atm)
|
11,712
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
poly_sframe['power_1'] = feature
# first check if degree > 1fea
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
#name = name.apply(lambda name: names**power)
poly_sframe[name] = feature #poly_sframe.apply(lambda name : name**power)
tmp = poly_sframe[name]
tmp_power = tmp.apply(lambda x: x**power)
poly_sframe[name] = tmp_power
#print tmp_power
return poly_sframe
import matplotlib.pyplot as plt
%matplotlib inline
sales = graphlab.SFrame('kc_house_data.gl/')
sales = sales.sort(['sqft_living','price'])
l2_small_penalty = 1e-5
poly15_data = polynomial_sframe(sales['sqft_living'], 15)
my_features15 = poly15_data.column_names() # get the name of the features
poly15_data['price'] = sales['price'] # add price to the data since it's the target
model15 = graphlab.linear_regression.create(poly15_data, target = 'price', features = my_features15,
l2_penalty=l2_small_penalty,
validation_set = None)
model15.get('coefficients')
(semi_split1, semi_split2) = sales.random_split(.5,seed=0)
(set_1, set_2) = semi_split1.random_split(0.5, seed=0)
(set_3, set_4) = semi_split2.random_split(0.5, seed=0)
poly15_set1 = polynomial_sframe(set_1['sqft_living'], 15)
my_features15 = poly15_set1.column_names() # get the name of the features
poly15_set1['price'] = set_1['price'] # add price to the data since it's the target
modelset1 = graphlab.linear_regression.create(poly15_set1, target = 'price', features = my_features15,
l2_penalty=l2_small_penalty,
validation_set = None)
modelset1.get('coefficients')
plt.plot(poly15_set1['power_1'],poly15_set1['price'],'.',
poly15_set1['power_1'], modelset1.predict(poly15_set1),'-')
poly15_set2 = polynomial_sframe(set_2['sqft_living'], 15)
my_features15 = poly15_set2.column_names() # get the name of the features
poly15_set2['price'] = set_2['price'] # add price to the data since it's the target
modelset2 = graphlab.linear_regression.create(poly15_set2, target = 'price', features = my_features15,
l2_penalty=l2_small_penalty,
validation_set = None)
modelset2.get('coefficients')
plt.plot(poly15_set2['power_1'],poly15_set2['price'],'.',
poly15_set2['power_1'], modelset2.predict(poly15_set2),'-')
poly15_set3 = polynomial_sframe(set_3['sqft_living'], 15)
my_features15 = poly15_set3.column_names() # get the name of the features
poly15_set3['price'] = set_3['price'] # add price to the data since it's the target
modelset3 = graphlab.linear_regression.create(poly15_set3, target = 'price', features = my_features15,
l2_penalty=l2_small_penalty,
validation_set = None)
modelset3.get('coefficients')
plt.plot(poly15_set3['power_1'],poly15_set3['price'],'.',
poly15_set3['power_1'], modelset3.predict(poly15_set3),'-')
poly15_set4 = polynomial_sframe(set_4['sqft_living'], 15)
my_features15 = poly15_set4.column_names() # get the name of the features
poly15_set4['price'] = set_4['price'] # add price to the data since it's the target
modelset4 = graphlab.linear_regression.create(poly15_set4, target = 'price', features = my_features15,
l2_penalty=l2_small_penalty,
validation_set = None)
modelset4.get('coefficients')
plt.plot(poly15_set4['power_1'],poly15_set4['price'],'.',
poly15_set4['power_1'], modelset4.predict(poly15_set4),'-')
poly15_set1 = polynomial_sframe(set_1['sqft_living'], 15)
my_features15 = poly15_set1.column_names() # get the name of the features
poly15_set1['price'] = set_1['price'] # add price to the data since it's the target
modelset1 = graphlab.linear_regression.create(poly15_set1, target = 'price', features = my_features15,
l2_penalty=1e5,
validation_set = None)
modelset1.get('coefficients')
plt.plot(poly15_set1['power_1'],poly15_set1['price'],'.',
poly15_set1['power_1'], modelset1.predict(poly15_set1),'-')
poly15_set2 = polynomial_sframe(set_2['sqft_living'], 15)
my_features15 = poly15_set2.column_names() # get the name of the features
poly15_set2['price'] = set_2['price'] # add price to the data since it's the target
modelset2 = graphlab.linear_regression.create(poly15_set2, target = 'price', features = my_features15,
l2_penalty=1e5,
validation_set = None)
modelset2.get('coefficients')
plt.plot(poly15_set2['power_1'],poly15_set2['price'],'.',
poly15_set2['power_1'], modelset2.predict(poly15_set2),'-')
poly15_set3 = polynomial_sframe(set_3['sqft_living'], 15)
my_features15 = poly15_set3.column_names() # get the name of the features
poly15_set3['price'] = set_3['price'] # add price to the data since it's the target
modelset3 = graphlab.linear_regression.create(poly15_set3, target = 'price', features = my_features15,
l2_penalty=1e5,
validation_set = None)
modelset3.get('coefficients')
plt.plot(poly15_set3['power_1'],poly15_set3['price'],'.',
poly15_set3['power_1'], modelset3.predict(poly15_set3),'-')
poly15_set4 = polynomial_sframe(set_4['sqft_living'], 15)
my_features15 = poly15_set4.column_names() # get the name of the features
poly15_set4['price'] = set_4['price'] # add price to the data since it's the target
modelset4 = graphlab.linear_regression.create(poly15_set4, target = 'price', features = my_features15,
l2_penalty=1e5,
validation_set = None)
modelset4.get('coefficients')
plt.plot(poly15_set4['power_1'],poly15_set4['price'],'.',
poly15_set4['power_1'], modelset4.predict(poly15_set4),'-')
(train_valid, test) = sales.random_split(.9, seed=1)
train_valid_shuffled = graphlab.toolkits.cross_validation.shuffle(train_valid, random_seed=1)
n = len(train_valid_shuffled)
k = 10 # 10-fold cross-validation
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
print i, (start, end)
#train_valid_shuffled[0:9] # rows 0 to 1
n = len(train_valid_shuffled)
k = 10 # 10-fold cross-validation
validation4 =graphlab.SFrame()
for i in xrange(k):
if i<4:
start = (n*i)/k
end = (n*(i+1))/k-1
validation4 = validation4.append(train_valid_shuffled[start:end+1])
print i, (start, end)
#validation4 = validation4[:-1]
validation4 = validation4[5818:7758]
print int(round(train_valid_shuffled[5818:7757+1]['price'].mean(), 0))
print int(round(validation4['price'].mean(), 0))
n = len(train_valid_shuffled)
first_two = train_valid_shuffled[0:2]
last_two = train_valid_shuffled[n-2:n]
#print first_two.append(last_two)
n = len(train_valid_shuffled)
first_segment = train_valid_shuffled[0:5818] ## before the segment 3
last_segment = train_valid_shuffled[7758:n] ## after the segment 3
train4 = first_segment.append(last_segment) ## train4 dataset contain all the data excluding fourth
print int(round(train4['price'].mean(), 0))
def k_fold_cross_validation(k, l2_penalty, data, output_name, features_list):
n = len(data)
validation_errors = []
for i in xrange(k):
start = (n*i)/k
end = (n*(i+1))/k-1
validation_set = data[start:end+1]
training_set = data[end+1:n].append(data[0:start])
## train a linear model using training set just formend, with given l2_penalty
model = graphlab.linear_regression.create(training_set,
target = output_name,
features = features_list,
l2_penalty=l2_penalty,
validation_set = None,
verbose = False)
# predict on validation set
pred = model.predict(validation_set)
validation_error = pred - validation_set['price']
RSS = sum(validation_error * validation_error)
validation_errors.append(RSS)
return sum(validation_errors)/len(validation_errors)
import numpy as np
poly15_train_valid_shuffled = polynomial_sframe(train_valid_shuffled['sqft_living'], 15)
my_features15 = poly15_train_valid_shuffled.column_names() # get the name of the features
poly15_train_valid_shuffled['price'] = train_valid_shuffled['price'] # add price to the data since it's the target
l2_penalty_list = np.logspace(1, 7, num=13)
min_error = None
best_l2_penalty = None
cross_validation_errors = []
for l2_penalty in l2_penalty_values:
avg_val_error = k_fold_cross_validation(10, l2_penalty, poly15_train_valid_shuffled, 'price', set15_features)
print "For l2_penalty: " + str(l2_penalty) + " ---> Avg validation penalty : " + str(avg_val_error)
cross_validation_errors.append(avg_val_error)
if min_error is None or avg_val_error < min_error:
min_error = avg_val_error
best_l2_penalty = l2_penalty
print "Best l2_penalty --->: " + str(best_l2_penalty)
poly15_sales = polynomial_sframe(sales['sqft_living'], 15)
my_features15 = poly15_sales.column_names() # get the name of the features
poly15_sales['price'] = sales['price'] # add price to the data since it's the target
modelset4 = graphlab.linear_regression.create(poly15_sales,
target = 'price',
features = my_features15,
l2_penalty=best_l2_penalty,
validation_set = None)
modelset4.get('coefficients')
plt.plot(poly15_sales['power_1'],poly15_sales['price'],'.',
poly15_sales['power_1'], modelset4.predict(poly15_sales),'-')
test_data = polynomial_sframe(test['sqft_living'], 15)
predictions_test = modelset4.predict(test_data)
test_errors = predictions_test - test['price']
RSS_test = sum(test_errors * test_errors)
print RSS_test
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polynomial regression, revisited
Step2: Let's use matplotlib to visualize what a polynomial regression looks like on the house data.
Step3: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
Step4: Let us revisit the 15th-order polynomial model using the 'sqft_living' input. Generate polynomial features up to degree 15 using polynomial_sframe() and fit a model with these features. When fitting the model, use an L2 penalty of 1e-5
Step5: Note
Step6: QUIZ QUESTION
Step7: Observe overfitting
Step8: Next, fit a 15th degree polynomial on set_1, set_2, set_3, and set_4, using 'sqft_living' to predict prices. Print the weights and make a plot of the resulting model.
Step9: The four curves should differ from one another a lot, as should the coefficients you learned.
Step10: These curves should vary a lot less, now that you applied a high degree of regularization.
Step11: Once the data is shuffled, we divide it into equal segments. Each segment should receive n/k elements, where n is the number of observations in the training set and k is the number of segments. Since the segment 0 starts at index 0 and contains n/k elements, it ends at index (n/k)-1. The segment 1 starts where the segment 0 left off, at index (n/k). With n/k elements, the segment 1 ends at index (n*2/k)-1. Continuing in this fashion, we deduce that the segment i starts at index (n*i/k) and ends at (n*(i+1)/k)-1.
Step12: Let us familiarize ourselves with array slicing with SFrame. To extract a continuous slice from an SFrame, use colon in square brackets. For instance, the following cell extracts rows 0 to 9 of train_valid_shuffled. Notice that the first index (0) is included in the slice but the last index (10) is omitted.
Step13: Now let us extract individual segments with array slicing. Consider the scenario where we group the houses in the train_valid_shuffled dataframe into k=10 segments of roughly equal size, with starting and ending indices computed as above.
Step14: To verify that we have the right elements extracted, run the following cell, which computes the average price of the fourth segment. When rounded to nearest whole number, the average should be $536,234.
Step15: After designating one of the k segments as the validation set, we train a model using the rest of the data. To choose the remainder, we slice (0
Step16: Extract the remainder of the data after excluding fourth segment (segment 3) and assign the subset to train4.
Step17: To verify that we have the right elements extracted, run the following cell, which computes the average price of the data with fourth segment excluded. When rounded to nearest whole number, the average should be $539,450.
Step18: Now we are ready to implement k-fold cross-validation. Write a function that computes k validation errors by designating each of the k segments as the validation set. It accepts as parameters (i) k, (ii) l2_penalty, (iii) dataframe, (iv) name of output column (e.g. price) and (v) list of feature names. The function returns the average validation error using k segments as validation sets.
Step19: Once we have a function to compute the average validation error for a model, we can write a loop to find the model that minimizes the average validation error. Write a loop that does the following
Step20: QUIZ QUESTIONS
Step21: QUIZ QUESTION
|
11,713
|
<ASSISTANT_TASK:>
Python Code:
# Grab the NYT's homepage
response = requests.get("http://nytimes.com")
doc = BeautifulSoup(response.text)
# Snag all of the headlines (h3 tags with 'story-heading' class)
headlines = doc.find_all("h3", {'class': 'story-heading'})
# Getting the headline text out using list comprehensions
# is a lot more fun but I guess you just learned those
# like a day ago, so we'll go ahead and use a for loop.
# But for the curious:
# [headline.text.strip() for headline in headlines]
# Print the text of the headlines
for headline in headlines:
print(headline.)
from selenium import webdriver
# Just in case you didn't run it up there, I'll import again
import requests
# Using .select instead of .find is a little more
# readable to people from the web dev world, maybe?
# Imports, of course
!pip install selenium
# Initialize a Firefox webdriver
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWai
driver = webdriver.Firefox()
# Grab the web page
# You'll use selenium.webdriver.support.ui.Select
# that we imported above to grab the Seelct element called
# t_web_lookup__license_type_name, then select Acupuncturists
# We use .find_element_by_name here because we know the name
# We use .find_element_by_id here because we know the id
# Then we'll fake typing into it
# Now we can grab the search button and click it
# Instead of using requests.get, we just look at .page_source of the driver
# We can feed that into Beautiful Soup
# It's a tricky table, but this grabs the linked names inside of the A
#rows = doc.select("#datagrid_results tr")
# Close the webdriver
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: But... forms!
Step2: Submitting forms with requests
Step3: It's magic, I swear!
Step4: Closing the webdriver
|
11,714
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import logging
from scipy.io.matlab import loadmat
from scipy.sparse import csr_matrix
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
import rescal
from almc.bayesian_rescal import BayesianRescal
%matplotlib inline
#logger = logging.getLogger()
#logger.setLevel(logging.DEBUG)
dataset = 'kinship'
mat = loadmat('../data/%s/alyawarradata.mat' % (dataset))
T = np.array(mat['Rs'], np.float32)
max_iter = 20
n_dim = 10
T = np.swapaxes(T, 1, 2)
T = np.swapaxes(T, 0, 1) # [relation, entity, entity]
n_relation, n_entity, _ = T.shape
print(T.shape)
import itertools
trainT = np.zeros_like(T)
p = 0.1
train_mask = np.random.binomial(1, p, T.shape)
trainT[train_mask==1] = T[train_mask==1]
test_mask = np.ones_like(T)
test_mask[train_mask==1] = 0
print('training size %d' % np.sum(trainT))
print('test size %d' % np.sum(T[test_mask==1]))
var_list = [0.01, 0.001]
var_e = 1
var_r = 1
best_roc = 0
for var_x in var_list:
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r)
model.fit(trainT, max_iter=max_iter)
_T = model._reconstruct()
score = roc_auc_score(T[test_mask==1], _T[test_mask==1])
print('var_x:{0:3.3f}, var_e:{1:3.3f}, var_r:{2:3.3f}, AUC-ROC:{3:.3f}'.format(var_x, var_e, var_r, score))
if score > best_roc:
best_vars = (var_x, var_e, var_r)
best_model = model
best_roc = score
var_x, var_e, var_r = best_vars
print(best_vars)
X = list()
for k in range(n_relation):
X.append(csr_matrix(trainT[k]))
A, R, f, itr, exectimes = rescal.rescal_als(X, n_dim)
_T = best_model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
_X = np.zeros_like(T)
for k in range(T.shape[0]):
_X[k] = np.dot(np.dot(A, R[k]), A.T)
print(roc_auc_score(T[test_mask==1], _X[test_mask==1]))
A, R, f, itr, exectimes = rescal.rescal_als(X, n_dim)
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r)
model.n_relations = n_relation
model.n_entities = n_entity
model.E = A
model.R = np.zeros([n_relation, n_dim, n_dim])
for k in range(n_relation):
model.R[k] = R[k]
model._gibbs(trainT, max_iter=5)
_T = model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
observed_var = 0.001
unobserved_var = 1.
n_trial = 1
for i in range(n_trial):
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r, controlled_var=True,
obs_var=observed_var, unobs_var=unobserved_var)
model.fit(trainT, max_iter=max_iter)
_T = model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
observed_var = 0.01
unobserved_var = 1.
for i in range(n_trial):
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r, controlled_var=True,
obs_var=observed_var, unobs_var=unobserved_var)
model.fit(trainT, max_iter=max_iter)
_T = model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
observed_var = 0.001
unobserved_var = 10.
for i in range(n_trial):
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r, controlled_var=True,
obs_var=observed_var, unobs_var=unobserved_var)
model.fit(trainT, max_iter=max_iter)
_T = model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
observed_var = 0.001
unobserved_var = 100.
for i in range(n_trial):
model = BayesianRescal(n_dim, var_e=var_e, var_x=var_x, var_r=var_r, controlled_var=True,
obs_var=observed_var, unobs_var=unobserved_var)
model.fit(trainT, max_iter=max_iter)
_T = model._reconstruct()
print(roc_auc_score(T[test_mask==1], _T[test_mask==1]))
var_list = [0.001, 0.01, 0.1, 1., 10., 100.]
best_roc = 0
for (var_x, var_e, var_r) in itertools.product(var_list,repeat=3):
A, R, f, itr, exectimes = rescal.rescal_als(X, n_dim, lambda_A=var_x, lambda_R=var_e, lambda_V=var_r)
_X = np.zeros_like(T)
for k in range(T.shape[0]):
_X[k] = np.dot(np.dot(A, R[k]), A.T)
score = roc_auc_score(T.flatten(), _X.flatten())
print('var_x:{0:3.3f}, var_e:{1:3.3f}, var_r:{2:3.3f}, AUC-ROC:{3:.3f}'.format(var_x, var_e, var_r, score))
if score > best_roc:
best_vars = (var_x, var_e, var_r)
best_model = model
best_roc = score
lambda_a, lambda_r, lambda_v = best_vars
print(best_vars, best_roc)
import numpy as np
import os
import itertools
from scipy.sparse import csr_matrix
import logging
import pickle
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
import time
import rescal
from brescal import BayesianRescal
from runner import load_dataset
from seq_brescal import PFBayesianRescal
from seq_logit_brescal import PFBayesianLogitRescal
%matplotlib inline
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
datasets = ['kinship', 'umls', 'nation']
result = dict()
train_test_validation = dict()
n_trial = 10
max_iter = 20
n_dim = 10
models = ['rescal', 'brescal', 'blogit']
for dataset in datasets:
train_test_validation[(dataset)] = list()
for model, dataset in itertools.product(models, datasets):
result[(dataset, model)] = list()
if not os.path.exists('../result/rescal_vs_brescal.pkl'):
for nt in range(n_trial):
tic = time.time()
for dataset in datasets:
T = load_dataset(dataset)
n_relation, n_entity, _ = T.shape
samples = [(i, j, k) for i, j, k in itertools.product(range(n_relation), range(n_entity), range(n_entity))]
total = np.prod(T.shape)
np.random.shuffle(samples)
train_p = 0.1
validation_p = 0.3
test_p = 1. - validation_p - train_p
train_test_validation[dataset].append(samples)
train_mask = np.zeros_like(T)
validation_mask = np.zeros_like(T)
test_mask = np.zeros_like(T)
for idx in range(int(total * train_p)):
i, j, k = samples[idx]
train_mask[i, j, k] = 1
for idx in range(int(total * validation_p)):
i, j, k = samples[int(total * train_p) + idx]
validation_mask[i, j, k] = 1
for idx in range(int(total * test_p)):
i, j, k = samples[int(total * train_p) + int(total * validation_p) + idx]
test_mask[i, j, k] = 1
trainT = np.zeros_like(T)
trainT[train_mask == 1] = T[train_mask == 1]
lambda_As = [0.01, 0.1, 1.]
lambda_Rs = [0.01, 0.1, 1.]
best_roc = 0
for lambda_A, lambda_R in itertools.product(lambda_As, lambda_Rs):
X = list()
for k in range(n_relation):
X.append(csr_matrix(trainT[k]))
A, R, f, itr, exectimes = rescal.rescal_als(X, n_dim, lambda_A=lambda_A, lambda_R=lambda_R)
_X = np.zeros_like(T)
for k in range(T.shape[0]):
_X[k] = np.dot(np.dot(A, R[k]), A.T)
score = roc_auc_score(T[validation_mask == 1], _X[validation_mask == 1])
if score > best_roc:
best_vars = (lambda_A, lambda_R)
best_A, best_R = A, R
best_roc = score
_X = np.zeros_like(T)
for k in range(T.shape[0]):
_X[k] = np.dot(np.dot(best_A, best_R[k]), best_A.T)
score = roc_auc_score(T[test_mask == 1], _X[test_mask == 1])
print('RESCAL', dataset, best_vars, score)
result[(dataset, models[0])].append(score)
var_list = [0.001, 0.01, 0.1]
var_e = 1
var_r = 1
best_roc = 0
for var_x in var_list:
_model = PFBayesianRescal(n_dim, var_x=var_x, n_particles=1, compute_score=False,
parallel=False, sample_all=True)
seq = _model.fit(T, obs_mask=train_mask.copy(), max_iter=0)
_T = _model._reconstruct(_model.E[0], _model.R[0])
score = roc_auc_score(T[validation_mask == 1], _T[validation_mask == 1])
if score > best_roc:
best_vars = (var_x, var_e, var_r)
best_model = _model
best_roc = score
_T = best_model._reconstruct(best_model.E[0], best_model.R[0])
score = roc_auc_score(T[test_mask == 1], _T[test_mask == 1])
print('BRESCAL', dataset, best_vars, score)
result[(dataset, models[1])].append(score)
# observed_vars = [0.01, 0.001]
# unobserved_vars = [1., 10.]
# best_roc = 0
# for observed_var, unobserved_var in itertools.product(observed_vars, unobserved_vars):
# model = BayesianRescal(n_dim, var_e=var_e, var_r=var_r, controlled_var=True,
# obs_var=observed_var, unobs_var=unobserved_var)
# model.fit(trainT, max_iter=max_iter)
# _T = model._reconstruct()
# score = roc_auc_score(T[validation_mask == 1], _T[validation_mask == 1])
# if score > best_roc:
# best_vars = (observed_var, unobserved_var)
# best_model = model
# best_roc = score
# _T = best_model._reconstruct()
# score = roc_auc_score(T[test_mask == 1], _T[test_mask == 1])
# print('BRESCAL_CTRL', dataset, score)
# result[(dataset,'brescal_ctrl')].append(score)
_model = PFBayesianLogitRescal(n_dim, n_particles=1, compute_score=False)
seq = _model.fit(T, obs_mask=train_mask.copy(), max_iter=0)
particle = _model.p_weights.argmax()
_T = _model._reconstruct(_model.E[0], _model.R[0])
score = roc_auc_score(T[test_mask == 1], _T[test_mask == 1])
print('BRESCAL_LOGIT', dataset, score)
result[(dataset, models[2])].append(score)
print(time.time() - tic)
pickle.dump([result, train_test_validation], open('../result/rescal_vs_brescal.pkl', 'wb'))
result, samples = pickle.load(open('../result/rescal_vs_brescal.pkl', 'rb'))
plt.figure(figsize=(8, 2.4))
width = 1
title_size = 14
label_size = 12
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
for plt_no, dataset in enumerate(datasets):
plt.subplot(1, len(datasets), plt_no+1)
for model in models:
mean = np.mean(result[(dataset,model)])
std = np.std(result[(dataset,model)])
label = model.upper()
if model == 'brescal_logit':
label = 'BLOGIT'
plt.bar(models.index(model)+0.15, mean, width-0.3, color=tableau20[models.index(model)], yerr=std, label=label)
plt.xticks(np.arange(len(models))+0.50, ['%s'% (model.upper()) for model in models], rotation=45)
if plt_no == 0:
# plt.legend(loc='lower left')
# plt.setp(plt.gca().get_legend().get_texts(), fontsize='10')
plt.ylabel('ROC-AUC score', size=label_size)
# plt.tick_params(
# axis='x', # changes apply to the x-axis
# which='both', # both major and minor ticks are affected
# bottom='off', # ticks along the bottom edge are off
# top='off', # ticks along the top edge are off
# labelbottom='off') # labels along the bottom edge are off
plt.title('%s' % dataset.upper(), size=title_size)
plt.ylim((0.4,1))
# plt.savefig('../paper/images/rescal_vs_brescal.pdf', format='PDF', bbox_inches='tight', pad_inches=0.1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Splitting the kinship dataset into train/test sets
Step2: 3. Training BayesianRESCAL
Step3: 3.2. Training RESCAL
Step4: 3.3. Compare both models by ROC-AUC
Step5: ROC-AUC score of RESCAL
Step6: 4. Initialize Bayesian_RESCAL with RESCAL
Step7: ROC-AUC score of BRESCAL initialized by RESCAL
Step8: 5. Place difference variances on observed / unobserved data points
Step9: A. Parameter optimization for RESCAL
Step10: For paper
|
11,715
|
<ASSISTANT_TASK:>
Python Code:
import os
import csv
import pandas as pd
import numpy as np
from scipy import stats
data_path = '../../data'
tmp_path = '../../tmp'
feature_path = os.path.join(data_path, 'evaluation/semcor/tsvetkov_semcor.csv')
subset = pd.read_csv(feature_path, index_col=0)
subset.columns = [c.replace('semcor.', '') for c in subset.columns]
subset.head()
subset.set_index('words', inplace=True)
#subset.drop('count_in_semcor', inplace=True, axis=1)
subset = subset.T
subset.head()
size = 50
fname = 'embeddings/glove.6B.{}d.txt'.format(size)
embedding_path = os.path.join(data_path, fname)
embeddings = pd.read_csv(embedding_path, sep=' ', header=None, index_col=0, quoting=csv.QUOTE_NONE).T
embeddings.head()
common_words = embeddings.columns.intersection(subset.columns)
embeddings = embeddings[common_words]
fname = os.path.join(tmp_path, 'glove_embeddings.csv')
embeddings.to_csv(fname)
from sklearn.metrics.pairwise import cosine_similarity as cos
pairwise = cos(embeddings.T)
distances = pd.DataFrame(pairwise, columns=common_words, index=common_words)
distances.to_csv(os.path.join(data_path, 'pairwise_sim.csv'))
S = subset[common_words]
X = embeddings[common_words]
correlations = pd.DataFrame({i:X.corrwith(S.iloc[i], axis=1) for i in range(len(S))})
correlations.columns = S.index
alignments = correlations.idxmax(axis=1)
correlations.max(axis=1).head()
qvec = correlations.max(axis=1).sum()
qvec
A = pd.DataFrame(0, index=range(len(X)), columns=S.index)
for dim, feat in alignments.iteritems():
A[feat][dim] = 1
A.head()
from sklearn.cross_decomposition import CCA
cca = CCA(n_components=1)
cca = cca.fit(X.T, S.T)
a = np.dot(X.T, cca.x_weights_)
b = np.dot(S.T, cca.y_weights_)
stats.pearsonr(a, b)
def qvec(features, embeddings):
Returns correlations between columns of `features` and `embeddings`.
The aligned feature is the one with the highest correlation.
The qvec score is the sum of correlations of aligned features.
common_words = embeddings.columns.intersection(subset.columns)
S = features[common_words]
X = embeddings[common_words]
correlations = pd.DataFrame({i:X.corrwith(S.iloc[i], axis=1) for i in range(len(S))})
correlations.columns = S.index
return correlations
qvec(subset, embeddings).head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: QVEC model
Step2: Learnt word embeddings
Step3: The Python variables S and X refer to $S$ and $X$ exactly as above.
Step4: Now we want the correlation between the rows of S and the rows of X. This may not be the easiest way to do it but it works.
Step5: For each row of this correlation matrix (i.e. for each of the dimenions of the embeddings), we want the linguistic feature that it is most correlated with. We also get the value of that correlation.
Step6: The score of the embeddings relative to the linguistic features is the sum of the maximum correlations. Note how this value depends on how many dimensions in the embeddings there are. For 300 dimension vectors trained (by them) from GloVe, the authors get 34.4, while I get 32.4. Note that our linguistic features are still different, so the fact that the discrepancy here is not too big is encouraging.
Step7: We don't really need it, but just to be explicit let's get the matrix $A$ of alignments.
Step8: The rest of the paper is a series of experiments training large models and evaluating them on both instrinic and extrinsic tasks, including QVEC. I'm not going to replicate that here, but the QVEC implementation is complete.
Step9: I believe the linear combinations I want are stored in the x_weights_ and y_weights_ attributes.
Step11: Succint implementation
|
11,716
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.time import Time
from toolkit import EchelleSpectrum
kic8462852_1_url = 'http://staff.washington.edu/bmmorris/docs/KIC8462852.0001.wfrmcpc.fits'
kic8462852_2_url = 'http://staff.washington.edu/bmmorris/docs/KIC8462852.0003.wfrmcpc.fits'
kic8462852_3_url = 'http://staff.washington.edu/bmmorris/docs/KIC8462852.0065.wfrmcpc.fits'
spectroscopic_standard_url = 'http://staff.washington.edu/bmmorris/docs/BD28_4211.0034.wfrmcpc.fits'
telluric_standard_url = 'http://staff.washington.edu/bmmorris/docs/HR7916.0002.wfrmcpc.fits'
target_spectrum_1 = EchelleSpectrum.from_fits_url(kic8462852_1_url)
target_spectrum_2 = EchelleSpectrum.from_fits_url(kic8462852_2_url)
target_spectrum_3 = EchelleSpectrum.from_fits_url(kic8462852_3_url)
spectroscopic_standard = EchelleSpectrum.from_fits_url(spectroscopic_standard_url)
telluric_standard = EchelleSpectrum.from_fits_url(telluric_standard_url)
only_orders = np.arange(len(target_spectrum_1.spectrum_list))
target_spectrum_1.continuum_normalize(spectroscopic_standard,
polynomial_order=10,
only_orders=only_orders,
plot_masking=False)
only_orders = np.arange(len(target_spectrum_2.spectrum_list))
target_spectrum_2.continuum_normalize(spectroscopic_standard,
polynomial_order=10,
only_orders=only_orders,
plot_masking=False)
only_orders = np.arange(len(target_spectrum_3.spectrum_list))
target_spectrum_3.continuum_normalize(spectroscopic_standard,
polynomial_order=10,
only_orders=only_orders,
plot_masking=False)
telluric_standard.continuum_normalize(spectroscopic_standard,
polynomial_order=10,
only_orders=only_orders,
plot_masking=False)
rv_shifts = u.Quantity([target_spectrum_1.rv_wavelength_shift(order)
for order in only_orders])
median_rv_shift = np.median(rv_shifts)
target_spectrum_1.offset_wavelength_solution(median_rv_shift)
rv_shifts = u.Quantity([target_spectrum_2.rv_wavelength_shift(order)
for order in only_orders])
median_rv_shift = np.median(rv_shifts)
target_spectrum_2.offset_wavelength_solution(median_rv_shift)
rv_shifts = u.Quantity([target_spectrum_3.rv_wavelength_shift(order)
for order in only_orders])
median_rv_shift = np.median(rv_shifts)
target_spectrum_3.offset_wavelength_solution(median_rv_shift)
rv_shifts = u.Quantity([telluric_standard.rv_wavelength_shift(order)
for order in only_orders])
median_rv_shift = np.median(rv_shifts)
telluric_standard.offset_wavelength_solution(median_rv_shift)
from toolkit import get_phoenix_model_spectrum
phoenix_6800_40 = get_phoenix_model_spectrum(6800, 4.0)
def get_nearest_order(feature_wavelength):
return np.argmin([np.abs(feature_wavelength - target_spectrum_1.get_order(i).wavelength.mean().value)
for i in range(len(target_spectrum_1.spectrum_list))])
def plot_spectral_feature(spectrum, center_wavelength, width_angstroms,
phoenix_model=phoenix_6800_40, plot_model=True,
label=None, title=None, ax=None, legend=True,
spectrum_kwargs=None, model_kwargs=None):
Plot the spectrum, centered on wavelength ``center_wavelength``, with width
``width_angstroms``.
if ax is None:
ax = plt.gca()
if title is None:
title = 'APO/ARCES (Brett Morris)'
if spectrum_kwargs is None:
spectrum_kwargs = dict(lw=1, color='k')
if model_kwargs is None:
model_kwargs = dict(label='PHOENIX model', color='r', alpha=0.5)
feature_order = spectrum.get_order(get_nearest_order(center_wavelength))
normed_flux = feature_order.masked_flux / np.median(feature_order.masked_flux)
ax.plot(feature_order.masked_wavelength, normed_flux,
label=spectrum.name, **spectrum_kwargs)
model_already_plotted = any([line.get_label().startswith('PHOENIX')
for line in ax.get_lines()])
if plot_model and not model_already_plotted:
model_wavelength_range = ((phoenix_6800_40.wavelength.value < center_wavelength + width_angstroms/2) &
(phoenix_6800_40.wavelength.value > center_wavelength - width_angstroms/2))
normed_model_flux = phoenix_6800_40.flux / np.median(phoenix_6800_40.flux)
normed_model_flux *= (np.median(normed_flux) /
normed_model_flux[model_wavelength_range].max())
ax.plot(phoenix_6800_40.wavelength, normed_model_flux,
**model_kwargs)
ax.set_title(title)
ax.set_xlabel('Wavelength [Angstrom]')
ax.set_ylabel('Flux')
ax.set_xlim([center_wavelength - width_angstroms/2,
center_wavelength + width_angstroms/2])
ax.set_ylim([0, 1.1*normed_flux.max()])
if legend:
ax.legend(fontsize=10, bbox_to_anchor=(0, 1, 1.4, 0))
ax.get_xaxis().get_major_formatter().set_useOffset(False)
return ax
obs_time = Time(target_spectrum_1.header['DATE-OBS'], format='isot')
print("Date of observation (UTC): ", obs_time.datetime)
target_spectrum_1.name += " " + str(obs_time.datetime.date())
obs_time = Time(target_spectrum_2.header['DATE-OBS'], format='isot')
print("Date of observation (UTC): ", obs_time.datetime)
target_spectrum_2.name += " " + str(obs_time.datetime.date())
obs_time = Time(target_spectrum_3.header['DATE-OBS'], format='isot')
print("Date of observation (UTC): ", obs_time.datetime)
target_spectrum_3.name += " " + str(obs_time.datetime.date())
plot_spectral_feature(target_spectrum_1, 5892, 10, spectrum_kwargs=dict(lw=1.5, color='#36C400'))
plot_spectral_feature(target_spectrum_2, 5892, 10, spectrum_kwargs=dict(lw=1.5, color='#2D0CE8'))
plot_spectral_feature(target_spectrum_3, 5892, 10, spectrum_kwargs=dict(lw=1.5, color='#EA00FF'))
plot_spectral_feature(telluric_standard, 5892, 10, spectrum_kwargs=dict(lw=1.5, color='#0B70E8'))
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
# H-alpha
plot_spectral_feature(target_spectrum_1, 6562.3, 20, ax=ax[0], legend=False)
plot_spectral_feature(target_spectrum_2, 6562.3, 20, ax=ax[0], legend=False)
plot_spectral_feature(target_spectrum_3, 6562.3, 20, ax=ax[0], legend=False)
plot_spectral_feature(telluric_standard, 6562.3, 20, ax=ax[0],
spectrum_kwargs=dict(lw=1, color='b'), legend=False,
title=r'H$\alpha$')
# H-beta
plot_spectral_feature(target_spectrum_1, 4861, 20, ax=ax[1], legend=False)
plot_spectral_feature(target_spectrum_2, 4861, 20, ax=ax[1], legend=False)
plot_spectral_feature(target_spectrum_3, 4861, 20, ax=ax[1], legend=False)
plot_spectral_feature(telluric_standard, 4861, 20, ax=ax[1],
spectrum_kwargs=dict(lw=1, color='b'), legend=True,
title=r'H$\beta$')
from scipy.optimize import fmin_powell
def gaussian(x, amp, mean, sigma):
return amp * np.exp(-0.5 * (mean - x)**2 / sigma**2)
def four_gaussians(wavelength, amp1, amp2, amp3, amp4,
mean1, mean2, mean3, mean4,
sig1, sig2, sig3, sig4):
return (1 + gaussian(wavelength, amp1, mean1, sig1) +
gaussian(wavelength, amp2, mean2, sig2) +
gaussian(wavelength, amp3, mean3, sig3) +
gaussian(wavelength, amp4, mean4, sig4))
def chi2(params, wavelength, flux):
return np.sum((four_gaussians(wavelength, *params) - flux)**2)
# Get the spectral order with the NaD feature
na_feature_wavelength = 5892
na_order_target = target_spectrum_1.get_order(get_nearest_order(na_feature_wavelength))
na_order_telluric = telluric_standard.get_order(get_nearest_order(na_feature_wavelength))
# Normalize the spectrum by a low order polynomial
lam = na_order_target.masked_wavelength.value
flux = na_order_target.masked_flux.value / na_order_telluric.masked_flux.value
continuum_params = np.polyfit(lam-lam.mean(), flux, 2)
flux /= np.polyval(continuum_params, lam-lam.mean())
# Initial gaussian amplitudes, means, and standard deviations
init_params = [-0.8, -0.5, -0.5, -0.4,
5889.5, 5889.2, 5895.5, 5895.2,
0.1, 0.1, 0.1, 0.1]
# Minimize the chi2
best_params = fmin_powell(chi2, init_params, args=(lam, flux))
# Plot the results
fig, ax = plt.subplots(1, 2, figsize=(12, 5))
ax[0].plot(lam, flux, 'k-', lw=2, label='ARCES')
ax[0].set_xlim([5888.5, 5890.5])
ax[0].plot(lam, four_gaussians(lam, *best_params), 'r', label='Simple fit')
ax[0].set_xlabel('Wavelength')
ax[0].set_ylabel('Flux')
ax[1].plot(lam, flux, 'k-', lw=2, label='ARCES')
ax[1].set_xlim([5894, 5896.5])
ax[1].plot(lam, four_gaussians(lam, *best_params), 'r', label='Simple fit')
ax[1].set_xlabel('Wavelength')
ax[1].set_ylabel('Flux')
ax[1].legend(loc='lower right')
fig.suptitle('NaD absorption features')
for axis in ax:
axis.get_xaxis().get_major_formatter().set_useOffset(False)
from scipy.integrate import quad
a = 5888.5
b = 5890
c = 5894.5
d = 5896.5
integral1, err = quad(lambda x: four_gaussians(x, *best_params), a, b)
integral2, err = quad(lambda x: four_gaussians(x, *best_params), c, d)
equivalent_width1 = ((b-a) - integral1)
equivalent_width2 = ((d-c) - integral2)
print("NaD equivalent width (at 5889.5 A):", equivalent_width1, "Angstrom")
print("NaD equivalent width (at 5895.5 A):", equivalent_width2, "Angstrom")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and cache spectra
Step2: Fit a polynomial of order polynomial_order to each spectral order of the spectrum of spectroscopic_standard, then normalize each spectral order by that polynomial to remove the blaze function.
Step3: Calculate the wavelength offset necessary to shift the spectra into the star's rest frame, then shift the wavelengths accordingly.
Step4: Download a PHOENIX model atmosphere (Husser 2013) for comparison with Boyajian's star, with $T_{eff}=6800$ K and $\log g = 4.0$.
Step6: Define some convenience functions for plotting spectral features
Step7: Plot the NaD absorption features.
Step8: Plot the spectrum at H$\alpha$ and H$\beta$.
Step9: Fit the NaD absorption features to measure the approximate equivalent width, for comparison with these results Jason Curtis on Jason Wright's blog
|
11,717
|
<ASSISTANT_TASK:>
Python Code:
#Create references to important directories we will use over and over
import os, sys
DATA_HOME_DIR = '/home/nathan/olin/spring2017/line-follower/line-follower/data'
#import modules
import numpy as np
from glob import glob
from PIL import Image
from tqdm import tqdm
import bcolz
from matplotlib import pyplot as plt
import seaborn as sns
%matplotlib inline
from keras.layers import *
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers.wrappers import TimeDistributed
from keras.metrics import categorical_crossentropy, categorical_accuracy
%cd $DATA_HOME_DIR
path = DATA_HOME_DIR
train_path1=path + '/sun_apr_16_office_full_line_1'
train_path2=path + '/qea_blob_1'
# valid_path1=path + '/sun_apr_16_office_full_line_2'
# valid_path2=path + '/qea_blob_2'
valid_path1=path + '/qea-square_3'#+ '/sun_apr_16_office_full_line_2'
# train_path=path + '/qea_blob_1'
# valid_path=path + '/qea_blob_2'
INPUT_LEN = 512 # The number of columns in the CSV
WINDOW_SIZE = 16
def load_array(fname):
return bcolz.open(fname)[:]
def windows(X, Y, seq_len=10):
assert len(X) == len(Y)
result = []
for index in range(X.shape[0] - seq_len):
result.append([X[index:index+seq_len], Y[index+seq_len-1]])
result = np.array(result)
# np.random.shuffle(result)
return np.array(list(result[:,0])), np.array(list(result[:,1]))
def get_data(paths):
Y_return = []
for path in paths:
%cd $path
Y_train = np.genfromtxt('cmd_vel.csv', delimiter=',')[:,1] # only use turning angle
Y_train = np.concatenate((Y_train, Y_train*-1))
Y_return.extend(Y_train)
X_all = load_array(paths[-1]+'/X_train_features3.b')
X_all = np.reshape(X_all, (len(X_all), INPUT_LEN))
print (len(X_all), len(Y_return))
X_windowed, Y_windowed = windows(X_all, Y_return, WINDOW_SIZE)
return np.array(X_windowed), np.array(Y_windowed)
X_train, Y_train = get_data([train_path1, train_path2])
X_valid, Y_valid = get_data([valid_path1])
X_train.shape
in_shape = (WINDOW_SIZE, INPUT_LEN)
def get_model():
model = Sequential([
LSTM(2048, return_sequences=False, input_shape=in_shape),
Dropout(0.5),
# Flatten(input_shape=in_shape),
Dense(512, activation='relu'),
# Dense(512, activation='relu'),
# Dense(512, activation='relu'),
Dropout(0.5),
Dense(1)
])
model.compile(loss='mean_absolute_error', optimizer='adam')
return model
model = get_model()
model.summary()
%cd $DATA_HOME_DIR
model.load_weights('LSTM_postprocessor_v1.h5')
history = model.fit(X_train, Y_train,#X_train[:,-1],
batch_size = 96,
nb_epoch=150,
validation_data=(X_valid, Y_valid),
verbose=True)
conv_predictions = X_train[:,-1]
recurrent_predictions = model.predict(X_train)
ground_truth = Y_train
for x,y,z in zip(conv_predictions[:,0], ground_truth, recurrent_predictions[:,0]):
print ("{:07f}\t{:07f}\t{:07f}\t".format(x,y,z))
conv_predictions = X_valid[:,-1]
recurrent_predictions = model.predict(X_valid)
ground_truth = Y_valid
for x,y,z in zip(conv_predictions[:,0], ground_truth, recurrent_predictions[:,0]):
print ("{:07f}\t{:07f}\t{:07f}\t".format(x,y,z))
plt.plot(ground_truth[:100])
sns.tsplot(history.history['val_loss'])
for x,y in zip(X_valid, Y_valid):
print ("{}\t{}".format(np.reshape(x,(10,)),y))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gather data
Step2: Network
Step3: Train the model
Step4: Analyze training
|
11,718
|
<ASSISTANT_TASK:>
Python Code:
import json
import pandas as pd
import os
from os.path import join
import numpy as np
from joblib import Parallel, delayed
import sys
cwd = os.getcwd()
data_path = join(cwd, '..', 'Data storage')
file_date = '2018-03-06'
%load_ext watermark
%watermark -iv -v
# Load the "autoreload" extension
%load_ext autoreload
# always reload modules marked with "%aimport"
%autoreload 1
# add the 'src' directory as one where we can import modules
src_dir = join(os.getcwd(), os.pardir, 'src')
sys.path.append(src_dir)
%aimport Data.data_extraction
from Data.data_extraction import facility_line_to_df
%aimport Analysis.index
from Analysis.index import add_datetime, add_quarter
path = join(data_path, 'Raw EIA bulk', '{} ELEC.txt'.format(file_date))
with open(path, 'r') as f:
raw_txt = f.readlines()
gen_rows = [row for row in raw_txt if 'ELEC.PLANT.GEN' in row
and 'series_id' in row
and 'ALL.M' in row
and 'ALL-' not in row]
total_fuel_rows = [row for row in raw_txt if 'ELEC.PLANT.CONS_TOT_BTU' in row
and 'series_id' in row
and 'ALL.M' in row
and 'ALL-' not in row]
eg_fuel_rows = [row for row in raw_txt if 'ELEC.PLANT.CONS_EG_BTU' in row
and 'series_id' in row
and 'ALL.M' in row
and 'ALL-' not in row]
if __name__ == '__main__':
exception_list = []
facility_gen = pd.concat(Parallel(n_jobs=-1)(delayed(facility_line_to_df)(json.loads(row)) for row in gen_rows))
facility_gen.reset_index(drop=True, inplace=True)
facility_gen.rename({'value':'generation (MWh)'}, axis=1, inplace=True)
facility_gen.loc[:,'lat'] = facility_gen.loc[:,'lat'].astype(float)
facility_gen.loc[:,'lon'] = facility_gen.loc[:,'lon'].astype(float)
facility_gen.loc[:, 'plant id'] = facility_gen.loc[:, 'plant id'].astype(int)
#drop
facility_gen.tail()
if __name__ == '__main__':
exception_list = []
facility_all_fuel = pd.concat(Parallel(n_jobs=-1)(delayed(facility_line_to_df)(json.loads(row)) for row in total_fuel_rows))
facility_all_fuel.reset_index(drop=True, inplace=True)
facility_all_fuel.rename({'value':'total fuel (mmbtu)'}, axis=1, inplace=True)
facility_all_fuel.loc[:,'lat'] = facility_all_fuel.loc[:,'lat'].astype(float)
facility_all_fuel.loc[:,'lon'] = facility_all_fuel.loc[:,'lon'].astype(float)
facility_all_fuel.loc[:,'plant id'] = facility_all_fuel.loc[:,'plant id'].astype(int)
if __name__ == '__main__':
exception_list = []
facility_eg_fuel = pd.concat(Parallel(n_jobs=-1)(delayed(facility_line_to_df)(json.loads(row)) for row in eg_fuel_rows))
facility_eg_fuel.reset_index(drop=True, inplace=True)
facility_eg_fuel.rename({'value':'elec fuel (mmbtu)'}, axis=1, inplace=True)
facility_eg_fuel.loc[:,'lat'] = facility_eg_fuel.loc[:,'lat'].astype(float)
facility_eg_fuel.loc[:,'lon'] = facility_eg_fuel.loc[:,'lon'].astype(float)
facility_eg_fuel.loc[:,'plant id'] = facility_eg_fuel.loc[:,'plant id'].astype(int)
keep_cols = ['fuel', 'generation (MWh)', 'month', 'plant id', 'prime mover', 'year',
'geography', 'lat', 'lon', 'last_updated']
merge_cols = ['fuel', 'month', 'plant id', 'year']
gen_total_fuel = facility_all_fuel.merge(facility_gen.loc[:,keep_cols],
how='outer', on=merge_cols)
def fill_missing(df):
cols = [col[:-2] for col in df.columns if '_x' in col]
# Create new column from the _x version, fill missing values from the _y version
for col in cols:
df[col] = df.loc[:, col + '_x']
df.loc[df[col].isnull(), col] = df.loc[df[col].isnull(), col + '_y']
df.drop([col+'_x', col+'_y'], axis=1, inplace=True)
fill_missing(gen_total_fuel)
keep_cols = ['fuel', 'elec fuel (mmbtu)', 'month', 'plant id', 'prime mover', 'year',
'geography', 'lat', 'lon', 'last_updated']
all_facility_data = gen_total_fuel.merge(facility_eg_fuel.loc[:,keep_cols],
how='outer', on=merge_cols)
fill_missing(all_facility_data)
all_facility_data.drop(['units', 'series_id'], axis=1, inplace=True)
all_facility_data.head()
add_quarter(all_facility_data)
path = join(data_path, 'Final emission factors.csv')
ef = pd.read_csv(path, index_col=0)
fossil_factors = dict(zip(ef.index, ef['Fossil Factor']))
total_factors = dict(zip(ef.index, ef['Total Factor']))
fossil_factors, total_factors
# Start with 0 emissions in all rows
# For fuels where we have an emission factor, replace the 0 with the calculated value
all_facility_data['all fuel fossil CO2 (kg)'] = 0
all_facility_data['elec fuel fossil CO2 (kg)'] = 0
all_facility_data['all fuel total CO2 (kg)'] = 0
all_facility_data['elec fuel total CO2 (kg)'] = 0
for fuel in total_factors.keys():
# All fuel CO2 emissions
all_facility_data.loc[all_facility_data['fuel']==fuel,'all fuel fossil CO2 (kg)'] = \
all_facility_data.loc[all_facility_data['fuel']==fuel,'total fuel (mmbtu)'] * fossil_factors[fuel]
all_facility_data.loc[all_facility_data['fuel']==fuel,'all fuel total CO2 (kg)'] = \
all_facility_data.loc[all_facility_data['fuel']==fuel,'total fuel (mmbtu)'] * total_factors[fuel]
# Electric fuel CO2 emissions
all_facility_data.loc[all_facility_data['fuel']==fuel,'elec fuel fossil CO2 (kg)'] = \
all_facility_data.loc[all_facility_data['fuel']==fuel,'elec fuel (mmbtu)'] * fossil_factors[fuel]
all_facility_data.loc[all_facility_data['fuel']==fuel,'elec fuel total CO2 (kg)'] = \
all_facility_data.loc[all_facility_data['fuel']==fuel,'elec fuel (mmbtu)'] * total_factors[fuel]
# Fossil CO2
all_facility_data.loc[~(all_facility_data['all fuel fossil CO2 (kg)']>=0),
'all fuel fossil CO2 (kg)'] = 0
all_facility_data.loc[~(all_facility_data['elec fuel fossil CO2 (kg)']>=0),
'elec fuel fossil CO2 (kg)'] = 0
# Total CO2
all_facility_data.loc[~(all_facility_data['all fuel total CO2 (kg)']>=0),
'all fuel total CO2 (kg)'] = 0
all_facility_data.loc[~(all_facility_data['elec fuel total CO2 (kg)']>=0),
'elec fuel total CO2 (kg)'] = 0
path = join(data_path, 'Derived data',
'Facility gen fuels and CO2 {}.csv'.format(file_date))
all_facility_data.to_csv(path, index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Date string for filenames
Step2: Read ELEC.txt file
Step3: Filter lines to only include facility generation
Step4: Combine generation into one large dataframe
Step5: Combine total fuel use into one large dataframe
Step6: Combine total fuel use for electricity into one large dataframe
Step7: Merge dataframes
Step8: Fill in missing values from the first merge
Step9: FIll in missing values from second merge and drop units/series_id columns
Step10: Add datetime and quarter columns
Step11: Load emission factors
Step12: Apply factors to facility generation
Step13: Apply emission factors
Step14: Set nan and negative emissions to 0
Step15: Export
|
11,719
|
<ASSISTANT_TASK:>
Python Code:
import keras
import numpy as np
path = keras.utils.get_file(
'nietzsche.txt',
origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
text = open(path).read().lower()
print('Corpus length:', len(text))
# Length of extracted character sequences
maxlen = 60
# We sample a new sequence every `step` characters
step = 3
# This holds our extracted sequences
sentences = []
# This holds the targets (the follow-up characters)
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('Number of sequences:', len(sentences))
# List of unique characters in the corpus
chars = sorted(list(set(text)))
print('Unique characters:', len(chars))
# Dictionary mapping unique characters to their index in `chars`
char_indices = dict((char, chars.index(char)) for char in chars)
# Next, one-hot encode the characters into binary arrays.
print('Vectorization...')
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
from keras import layers
model = keras.models.Sequential()
model.add(layers.LSTM(128, input_shape=(maxlen, len(chars))))
model.add(layers.Dense(len(chars), activation='softmax'))
optimizer = keras.optimizers.RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
import random
import sys
for epoch in range(1, 60):
print('epoch', epoch)
# Fit the model for 1 epoch on the available training data
model.fit(x, y,
batch_size=128,
epochs=1)
# Select a text seed at random
start_index = random.randint(0, len(text) - maxlen - 1)
generated_text = text[start_index: start_index + maxlen]
print('--- Generating with seed: "' + generated_text + '"')
for temperature in [0.2, 0.5, 1.0, 1.2]:
print('------ temperature:', temperature)
sys.stdout.write(generated_text)
# We generate 400 characters
for i in range(400):
sampled = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(generated_text):
sampled[0, t, char_indices[char]] = 1.
preds = model.predict(sampled, verbose=0)[0]
next_index = sample(preds, temperature)
next_char = chars[next_index]
generated_text += next_char
generated_text = generated_text[1:]
sys.stdout.write(next_char)
sys.stdout.flush()
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we will extract partially-overlapping sequences of length maxlen, one-hot encode them and pack them in a 3D Numpy array x of
Step2: Building the network
Step3: Since our targets are one-hot encoded, we will use categorical_crossentropy as the loss to train the model
Step4: Training the language model and sampling from it
Step5: Finally, this is the loop where we repeatedly train and generated text. We start generating text using a range of different temperatures
|
11,720
|
<ASSISTANT_TASK:>
Python Code:
# To better match the math equations above, collection starts at index 1 instead of 0
def partition(collection, n, k):
if n == 0:
return "No elements in collection to partition"
# initialize matrix
m = [[float('inf')] * k for _ in range(n+1)]
d = [[-1] * k for _ in range(n+1)]
# create prefix sums
prefix_sum = [0] * (n+1)
for i in range(1, n+1):
prefix_sum[i] = prefix_sum[i-1] + collection[i]
# Base case from eq (1) above
for i in range(1, n+1):
m[1][i] = collection[1]
# Base case from eq (2) above
for i in range(1, n+1):
m[i][1] = prefix_sum[i]
# eq 0
for i in range(2, n+1):
for j in range(2, k+1):
for x in range(1, i):
cost = max(m[x][j-1].cost, prefix_sum[i]-prefix_sum[x])
if m[i][j] > cost:
m[i][j] = cost
d[i][j] = x
reconstruct_partition(collection, d, n, k)
def reconstruct_partition(collection, dividers, n, k):
def print_items(collection, start, end):
def partition()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Less imperative
|
11,721
|
<ASSISTANT_TASK:>
Python Code:
import processing_tools as pt
filepath = './example/example.h5'
data = pt.ParticleDistribution(filepath)
data.su2si
data.dict['x']
panda_data = data.DistFrame()
panda_data[0:5]
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot') #optional
x_axis = 'py'
y_axis = 'px'
plot = panda_data.plot(kind='scatter',x=x_axis,y=y_axis)
#sets axis limits
plot.set_xlim([panda_data[x_axis].min(),panda_data[x_axis].max()])
plot.set_ylim([panda_data[y_axis].min(),panda_data[y_axis].max()])
plt.show(plot)
stats = pt.Statistics(filepath)
#preparing the statistics
stats.slice(100)
stats.calc_emittance()
stats.calc_CoM()
stats.calc_current()
#display pandas example
panda_stats = stats.StatsFrame()
panda_stats[0:5]
ax = panda_stats.plot(x='z_pos',y='CoM_y')
panda_stats.plot(ax=ax, x='z_pos',y='std_y',c='b') #first option allows shared axes
plt.show()
FEL = pt.ProcessedData(filepath,num_slices=100,undulator_period=0.00275,k_fact=2.7)
panda_FEL = FEL.FELFrame()
panda_stats= FEL.StatsFrame()
panda_FEL[0:5]
import pandas as pd
cat = pd.concat([panda_FEL,panda_stats], axis=1, join_axes=[panda_FEL.index]) #joins the two if you need to plot
#FEL parameters as well as slicel statistics on the same plot
cat['1D_gain']=cat['1D_gain']*40000000000 #one can scale to allow for visual comparison if needed
az = cat.plot(x='z_pos',y='1D_gain')
cat.plot(ax=az, x='z_pos',y='MX_gain',c='b')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The module consists of a class 'ParticleDistribution' that initializes to a dictionary containing the following entries given a filepath
Step2: Alternatively one can ask for a pandas dataframe where each column is one of the above properties of a macroparticle per row.
Step3: This allows for quick plotting using the inbuilt pandas methods
Step4: If further statistical analysis is required, the class 'Statistics' is provided. This contains methods to process standard properties of the electron bunch. This is called by giving a filepath to 'Statistics' The following operations can be performed
Step5: And finally there is the FEL_Approximations which calculate simple FEL properties per slice. This is a subclass of statistics and as such every method described above is callable.
Step6: If it is important to plot the statistical data alongside the FEL data, that can be easily achieved by concatinating the two sets as shown below
|
11,722
|
<ASSISTANT_TASK:>
Python Code:
from shapely.geometry import Point
import pyproj
import geopandas as gpd
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
entr_points = sqlContext.read.load('../why_yellow_taxi/Data/2016_(May)_New_York_City_Subway_Station_Entrances.json', \
format='json', header=True, inferSchema=True).collect()[0].asDict()['features']
routes = ['route_'+str(i) for i in range(1,12)]
entr_geo = gpd.GeoDataFrame(columns=['geometry', 'lines'])
for i in range(len(entr_points)):
entr_coor = entr_points[i].asDict()['geometry'].asDict()['coordinates']
entr_buffer = Point(proj(float(entr_coor[0]), float(entr_coor[1]))).buffer(100)
entr_prop = entr_points[i].asDict()['properties'].asDict()
entr_lines = [entr_prop[r] for r in routes if entr_prop[r]]
entr_geo = entr_geo.append({'geometry':entr_buffer, 'lines':entr_lines}, ignore_index=True)
shp = gpd.read_file('../why_yellow_taxi/Buffer/entr_buffer_100_feet_epsg4269_nad83/entr_buffer_100_feet_epsg4269_nad83.shp')
entr_geo.head(2)
shp.head(2)
entr_geo.head(2).geometry[1] == shp.head(2).geometry[1]
shp.head(2).geometry[0].centroid.x
shp.head(2).geometry[0].centroid.y
entr_geo.head(2).geometry[0].centroid.x
entr_geo.head(2).geometry[0].centroid.y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List
Step2: Identical or Not?
Step3: Detail
|
11,723
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pynucastro as pyrl
files = ["p-p-d-ec",
"d-pg-he3-de04",
"he3-he3pp-he4-nacr",
"c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12",
"o14-ap-f17-Ha96c",
"f17-pg-ne18-cb09",
"ne18--f18-wc12",
"f18-pa-o15-il10"]
rc = pyrl.RateCollection(files)
comp = pyrl.Composition(rc.get_nuclei())
comp.set_solar_like()
re = pyrl.Explorer(rc, comp, size=(1000,1000),
ydot_cutoff_value=1.e-20,
always_show_alpha=True)
re.explore()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This collection of rates has the main CNO rates plus a breakout rate into the hot CNO cycle
Step2: To evaluate the rates, we need a composition. This is defined using a list of Nuceli objects.
Step3: Interactive exploration is enabled through the Explorer class, which takes a RateCollection and a Composition
|
11,724
|
<ASSISTANT_TASK:>
Python Code:
Tc_mf = meV_to_K(0.5*250)
print meV_to_K(pi/2.0)
print 1.0/0.89
print cst.physical_constants["Boltzmann constant"]
print '$T_c^{MF} = $', Tc_mf, "K"
T_KT = meV_to_K(0.1*250)
print r"$T_{KT} = $", T_KT, "K"
T_CST = 0.25
BCS_PARAMS = {"width":4, "chem_potential": 0.0,
"hopping_constant": T_CST, "J_constant": 0.1 * T_CST / 0.89,
"g_constant": 0.25, "delta": 1.0 * T_CST, "use_assaad": True,
"uniform_phase": True, "temperature": 100}
MY_DWAVE_MODEL = DWaveModel(BCS_PARAMS)
print MY_DWAVE_MODEL
BCS_PARAMS = {"width":20, "use_assaad": True,
"uniform_phase": True, "temperature": 1.75*145.0}
MY_DWAVE_MODEL.set_params(BCS_PARAMS)
print MY_DWAVE_MODEL
print "temp: ", K_to_meV(MY_DWAVE_MODEL.temperature), "meV"
BCS_PARAMS = {"width":20, "use_assaad": True,
"uniform_phase": False, "temperature": 1.75*145.0}
MY_DWAVE_MODEL.set_params(BCS_PARAMS)
print MY_DWAVE_MODEL._uniform_phase
MC_Params = {"seed": 222315, "intervals": 100,
"target_snapshots": 15, "observable_list":["correlation_length"]}
MY_DRIVER = MCMCDriver(MY_DWAVE_MODEL, MC_Params)
MC_PARAMS_MP = {"intervals": BCS_PARAMS["width"]**2 / 2,
"target_snapshots": 25,
"algorithm":"metropolis"}
MC_PARAMS_CLUSTER = {"intervals": 5,
"target_snapshots": 25,
"algorithm":"cluster"}
MY_DRIVER.set_params(MC_PARAMS_CLUSTER)
print MY_DWAVE_MODEL._uniform_phase
print MY_DRIVER
print MY_DRIVER.params
#MY_DRIVER.mc_object.set_params({"temperature": 2.0 * 145.0})
#MY_DRIVER.thermalize(20000)
MY_DRIVER.mc_object.set_params({"temperature": 1.05 * 290.0 * 1.05 / 1.12})
MY_DRIVER.thermalize(50)
MY_DRIVER.execute()
result = MY_DRIVER.result
data = result.observable_results["correlation_length"]
print data["length_values"]
print data["correlation_values"]
print result
x_data = np.sqrt(data["length_values"])
y_data = data["correlation_values"]
fig, ax = plt.subplots(figsize = (10, 8), dpi=100, frameon=False)
ymin = 0.0
ymax = 1.0
ax.plot(x_data, y_data)
ax.set_ylim([ymin, ymax])
popt, pcov = curve_fit(func_short, x_data[1:], y_data[1:])
print popt
ax.plot(x_data, func_short(x_data, 2.0*popt[0], 2.0*popt[1]))
print "corr length:", 1.0/popt[1]
fig, ax = plt.subplots(figsize = (10, 8), dpi=100, frameon=False)
ax.plot(x_data[1:], np.log(y_data[1:]))
ax.plot(x_data[1:], np.log(popt[0]) - popt[1] * x_data[1:])
fig, ax = plt.subplots(figsize = (10, 8), dpi=100, frameon=False)
plt.imshow(MY_DRIVER.mc_object.xy_lattice, cmap=plt.cm.hot, interpolation='none')
plt.colorbar()
#Cf http://matplotlib.org/examples/pylab_examples/quiver_demo.html
fig, ax = plt.subplots(figsize = (10, 8), dpi=100, frameon=False)
plt.quiver(np.cos(MY_DRIVER.mc_object.xy_lattice), np.sin(MY_DRIVER.mc_object.xy_lattice))
MY_DRIVER.mc_object.make_wolff_step(np.pi/2.0)
dimension = MY_DRIVER.mc_object.xy_lattice.shape[0]
cluster = np.reshape(MY_DRIVER.mc_object.cluster, (dimension, dimension))
plt.imshow(cluster, cmap=plt.cm.hot, interpolation='none')
plt.colorbar()
print pi
print 5.65226755763 / (2.0 * pi), 3.77251040313 / (2.0 * pi)
neigh = MY_DRIVER.mc_object.lattice.get_neighbors()
results = pickle.load( open( "data_new.txt", "rb" ) )
data = results[0].observable_results["correlation_length"]
datas = {}
temps =np.array([])
for elem in results:
temps = np.append(temps, elem.bcs_params['temperature'])
temps = np.unique(temps)
for temp in temps:
datas[temp] = np.array([elem for elem in results if elem.bcs_params['temperature']==temp])
x_datas = {}
y_datas = {}
for temp in temps:
x_datas[temp] = np.sqrt(datas[temp][0].observable_results["correlation_length"]["length_values"])
y_datas[temp] = np.zeros((x_datas[temp].size))
total_sum = 0
for elem in datas[temp]:
y_datas[temp] +=\
elem.observable_results["correlation_length"]["correlation_values"]
y_datas[temp] /= datas[temp].size
fig, ax = plt.subplots(figsize = (14, 12), dpi=100, frameon=False)
corr_lens = {}
chosen_fun = func_power
for temp in temps[:1]:
x_data = x_datas[temp]
y_data = y_datas[temp]
print temp
ax.plot(x_data, y_data, label=str(temp))
#popt, pcov = curve_fit(func, x_data[0:], y_data[0:])
popt, pcov = curve_fit(chosen_fun, x_data[1:], y_data[1:])
print "temp: ", temp, "params: ", popt, r"$\eta$: ", -popt[1]
corr_lens[temp] = 1.0/popt[1]
ax.plot(x_data, chosen_fun(x_data, popt[0], popt[1]))
chosen_fun = func_full
for temp in temps[1:]:
x_data = x_datas[temp]
y_data = y_datas[temp]
print temp
ax.plot(x_data, y_data, label=str(temp))
#popt, pcov = curve_fit(func, x_data[0:], y_data[0:])
popt, pcov = curve_fit(chosen_fun, x_data[1:], y_data[1:])
print "temp: ", temp, "params: ", popt, "length: ", 1.0/popt[1], 1.0/popt[-1]
corr_lens[temp] = 1.0/popt[1]
ax.plot(x_data, chosen_fun(x_data, popt[0], popt[1], popt[2]))
ax.legend()
plt.savefig("transition.pdf")
fig, ax = plt.subplots(figsize = (14, 12), dpi=100, frameon=False)
x_es = np.sort(np.array(corr_lens.keys()))
y_es = np.array([corr_lens[elem] for elem in x_es])
ax.plot(x_es[1:], y_es[1:])
ax.grid(True)
plt.savefig("corr_length.pdf")
popt, pcov = curve_fit(func_exponent, x_es[1:], y_es[1:])
nu = popt[1]
print "nu", popt[1]
fig, ax = plt.subplots(figsize = (14, 12), dpi=100, frameon=False)
ax.plot(x_es[1:], y_es[1:])
ax.plot(x_es[1:-1], func_exponent(x_es[1:-1], popt[0], popt[1]))
ax.grid(True)
fig, ax = plt.subplots(figsize = (10, 8), dpi=100, frameon=False)
temp = 275.0
x_data = x_datas[temp]
y_data = y_datas[temp]
print x_data.shape
print y_data.shape
print x_es.shape
print data["length_values"].shape
l_values = y_data
popt, pcov = curve_fit(func, x_data, y_data)
ax.plot(np.sqrt(data["length_values"]), np.log(l_values - popt[2]))
ax.plot(np.sqrt(data["length_values"]), np.log(popt[0]) - popt[1] * np.sqrt(data["length_values"]))
results = pickle.load( open( "dos_alltemps.txt", "rb" ) )
datas = {}
temps =np.array([])
for elem in results:
temps = np.append(temps, elem.bcs_params['temperature'])
temps = np.unique(temps)
for temp in temps:
datas[temp] = np.array([elem for elem in results if elem.bcs_params['temperature']==temp])
print temps
print datas[275.0][0]
x_datas = {}
y_datas = {}
for temp in temps:
x_datas[temp] = datas[temp][0].observable_results["DOS"]["omega_mesh"]
y_datas[temp] = np.zeros((x_datas[temp].size))
total_sum = 0
for elem in datas[temp]:
y_datas[temp] +=\
elem.observable_results["DOS"]["DOS_values"]
y_datas[temp] /= datas[temp].size
fig, ax = plt.subplots(figsize = (8, 14), dpi=100, frameon=False)
#for i in range(len(temps)):
selected_temps = [0,1, 2, 3, 4, 6, 8]
for i in range(len(selected_temps)):
temp = temps[selected_temps[i]]
x_data = x_datas[temp]
y_data = y_datas[temp] + i * 0.7
ax.plot(x_data, y_data, label=(r'T={:3.2f}$T_{{KT}}$').format(temp/T_KT))
ax.legend()
plt.savefig("all_dos.pdf")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: d Wave
Step2: Modification
Step3: MC Driver
Step4: Modification
|
11,725
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
import numpy as np
from ipywidgets import widgets, fixed
from ipywidgets import interact
from exact_solvers import nonconvex
from exact_solvers import nonconvex_demos
nonconvex_demos.demo1()
f = lambda q: q*(1-q)
q_left = 0.1
q_right = 0.6
nonconvex_demos.plot_flux(f, q_left, q_right)
a = 0.5
f_buckley_leverett = lambda q: q**2 / (q**2 + a*(1-q)**2)
q_left = 1.
q_right = 0.
nonconvex_demos.plot_flux(f_buckley_leverett, q_left, q_right)
q_left = 1.
q_right = 0.
plot_function = nonconvex_demos.make_plot_function(f_buckley_leverett,
q_left, q_right, xi_left=-2, xi_right=2)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0,max=.9),
fig=fixed(0));
q_left = 0.
q_right = 1.
plot_function = nonconvex_demos.make_plot_function(f_buckley_leverett,
q_left, q_right, xi_left=-2, xi_right=2)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0,max=.9),
fig=fixed(0));
f_BL_leftward = lambda q: -f_buckley_leverett(q)
q_left = 0.
q_right = 1.
plot_function = nonconvex_demos.make_plot_function(f_BL_leftward,
q_left, q_right, xi_left=-2, xi_right=2)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0,max=.9),
fig=fixed(0));
f_sin = lambda q: np.sin(q)
q_left = np.pi/4.
q_right = 15*np.pi/4.
nonconvex_demos.plot_flux(f_sin, q_left, q_right)
plot_function = \
nonconvex_demos.make_plot_function(f_sin, q_left, q_right, -1.5, 1.5)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0.,max=.9),
fig=fixed(0));
f_sin = lambda q: np.sin(q)
q_left = 15*np.pi/4.
q_right = np.pi/4.
plot_function = \
nonconvex_demos.make_plot_function(f_sin, q_left, q_right,
-1.5, 1.5)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0.,max=.9),
fig=fixed(0));
f = lambda q: 0.25*(1. - q)*np.sin(1.5*q)
plot_function = nonconvex_demos.make_plot_function_qsliders(f)
interact(plot_function,
t=widgets.FloatSlider(value=0.8,min=0.,max=.9),
q_left=widgets.FloatSlider(value=-3.5,min=-4,max=4),
q_right=widgets.FloatSlider(value=3.5,min=-4,max=4),
fig=fixed(0));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you wish to examine the Python code for this chapter, please see
Step2: The plot on the left above shows a case where the solution is a rarefaction wave that can be computed by tracing characteristics. On the right we see the case for which tracing characteristics would give an multivalued solution (as a dashed line) whereas the correct Riemann solution consists of a shock wave (solid line). As discussed in Traffic_flow, the shock location is determined by the Rankine-Hugniot condition, or, equivalently, by the equal area condition discussed in Burgers.
Step3: Buckley-Leverett Equation
Step4: Again the third plot above shows $q$ on the vertical axis and $f'(q)$ on the horizontal axis (it's the middle figure turned sideways). You can think of this as showing the characteristic velocity for each point on a jump discontinuity from $q=0$ to $q=1$ (indicated by the dashed line), and hence a triple valued solution of the Riemann problem at $t=1$ when each $q$ value has propagated this far.
Step5: Note from the plot on the left above that the triple-valued solution suggested by tracing characteristics (the dashed line) has been partially replaced by a shock wave. By conservation, the areas of the two regions cut off by the shock must cancel out. Moreover, the shock speed coincides with the characteristic speed at the edge of the rarefaction wave that ends at the shock. In terms of the flux function shown by the dashed curve in the right-most figure above, we see that the shock wave connects $q_r=0$ to the point where the slope of the linear segment of the solid line (which is the shock speed, by the Rankine-Hugoniot condition) agrees with the slope of the flux function (which is the characteristic speed at this edge of the rarefaction wave).
Step6: Again the shock replaces the triple-valued part of the solution in such a way that mass is conserved (equal areas are cut off), and the shock speed again agrees with the limiting characteristic speed at edge of the adjacent rarefaction wave. Since $q_\ell < q_r$ in this case, the correct Riemann solution corresponds to the lower convex hull of the flux function, i.e. the convex hull of the set of points lying above $f(q)$.
Step7: Sinusoidal flux
Step8: Below is a recreation of Figure 16.4 of <cite data-cite="fvmhp"><a href="riemann.html#fvmhp">(LeVeque 2002)</a></cite>, illustrating where shocks must be inserted to make the Riemann solution single valued. In the live notebook you can see how the solution evolves with time.
Step9: In the figure above, note that the shocks in the Riemann solution correspond to linear segments of the lower convex hull the flux function $f(q)$. This is because we chose $q_\ell < q_r$ in this example.
Step10: Yet another example
|
11,726
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
a_true = 0.5
b_true = 2.0
c_true = -4.0
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this cell for grading the raw data generation and plot
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a quadratic curve
Step2: First, generate a dataset using this model using these parameters and the following characteristics
Step3: Now fit the model to the dataset to recover estimates for the model's parameters
|
11,727
|
<ASSISTANT_TASK:>
Python Code:
from qrays import Vector # see Chapter 6
class Polyhedron:
def __init__(self, name, volume, faces : set,
vertexes : dict, center = Vector((0,0,0))):
self.name = name
self.vertexes = vertexes
self.volume = volume
self.faces = faces
self.edges = self._distill() # derive unique edges from faces
self.center = center
def _distill(self):
get all edge pairs from faces
edges = set()
for face in self.faces:
new_pairs = {tuple(sorted([a, b])) # sorting pairs keeps them unique
for a,b in zip(face, face[1:] + (face[0],))}
# ('B','A') won't sneak in as another ('A','B')
edges = edges.union(new_pairs)
return edges
def translate(self, slider : Vector):
slide to a new position, keep track of the center
pass
def rotate(self, degrees, axis):
We will use a matrix to rotate around x, y or z axis
pass
def scale(self, scale_factor):
resize by scale_factor and emit a new polyhedron
Volume changes as a 3rd power of scale_factor.
pass
def render(self, color, filename = "output.pov"):
build a povray file in Scene Description Language
pass
def __repr__(self):
return "Polyhedron({})".format(self.name)
from qrays import Qvector, Vector
tet1 = {}
tet1['A'] = Qvector((1,0,0,0))
tet1['B'] = Qvector((0,1,0,0))
tet1['C'] = Qvector((0,0,1,0))
tet1['D'] = Qvector((0,0,0,1))
for v in tet1:
print(tet1[v].xyz())
Tet1 = Polyhedron("Tetrahedron", volume = 1,
faces = {('A', 'B', 'C'), ('A', 'B', 'D'),
('A', 'C', 'D'), ('B', 'C', 'D')},
vertexes = tet1)
Tet1.edges # lets make sure _distill() did its job
tet2 = {} # now for the dual tetrahedron
tet2['E'] = -tet1['A']
tet2['F'] = -tet1['B']
tet2['G'] = -Qvector((0,0,1,0)) # same as -tet1['C']
tet2['H'] = -Qvector((0,0,0,1)) # same as -tet1['D']
Tet2 = Polyhedron("Dual Tetrahedron", volume = 1,
faces = {('E', 'F', 'G'), ('E', 'F', 'H'),
('E', 'F', 'G'), ('F', 'G', 'H')},
vertexes = tet2)
Tet2.edges
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Oregon Curriculum Network <br />
Step6: The polyhedrons we've talked about will be instances of our Polyhedron class. Once instantiated, they each keep their vertex labels and pass them on to their progeny. To transform a polyhedron, as we will do in the next chapter, is to create a new one, not to change an existing one in place. Once defined, a polyhedron should be considered immutable.
Step7: Qvector A is in the all-positive XYZ octant (+,+,+), whereas Qvector B is still above the XY plane but kitty-corner in octant (-,-,+). The other two points form a segment below the XY plane and perpendicular to the first, in (-,+,-) and (+,-,-) respectively.
|
11,728
|
<ASSISTANT_TASK:>
Python Code:
from regraph import NXGraph, Rule
from regraph import plot_graph, plot_instance, plot_rule
%matplotlib inline
# Create an empty graph object
graph = NXGraph()
# Add a list of nodes, optionally with attributes
graph.add_nodes_from(
[
'Alice',
('Bob', {'age': 15, 'gender': 'male'}),
('Jane', {'age': 40, 'gender': 'female'}),
('Eric', {'age': 55, 'gender': 'male'})
])
# Add a list of edges, optionally with attributes
graph.add_edges_from([
("Alice", "Bob"),
("Jane", "Bob", {"type": "parent", "since": 1993}),
("Eric", "Jane", {"type": "friend", "since": 1985}),
("Eric", "Alice", {"type": "parent", "since": 1992}),
])
# Print a list of nodes and edges with data attached to them
print("List of nodes: ")
for n, attrs in graph.nodes(data=True):
print("\t", n, attrs)
print("List of edges: ")
for s, t, attrs in graph.edges(data=True):
print("\t{}->{}".format(s, t), attrs)
# Add individual nodes and edges
graph.add_node('Sandra', {'age': 45, 'gender': 'female'})
graph.add_edge("Sandra", "Eric", {"type": "spouse", "since": 1990})
graph.add_edge("Eric", "Sandra", {"type": "spouse", "since": 1990})
graph.add_edge("Sandra", "Alice", {"type": "parent", "since": 1992})
# Add node and edge attributes
graph.add_node_attrs("Alice", {"age": 18, "gender": "female"})
graph.add_edge_attrs("Alice", "Bob", {"type": "friend", "since": 2004})
# Get attributes of nodes and edges
print("New Alice attibutes: ", graph.get_node("Alice"))
print("New Alice->Bob attributes: ", graph.get_edge("Alice", "Bob"))
for k, v in graph.get_node("Alice").items():
print(k, ": ", v, ", type: ", type(v))
positioning = plot_graph(graph)
graph.to_json()
# Initialize a pattern graph
pattern = NXGraph()
pattern.add_nodes_from(["x", "y", "z"])
pattern.add_edges_from([
("x", "y"),
("z", "y")
])
# Find matchings of the pattern in the graph
instances = graph.find_matching(pattern)
print(instances)
pattern.add_edge_attrs("x", "y", {"type": "parent"})
pattern.add_edge_attrs("z", "y", {"type": "parent"})
instances = graph.find_matching(pattern)
print(instances)
print("Instances:")
for instance in instances:
print(instance)
plot_instance(graph, pattern, instance, parent_pos=positioning) #filename=("instance_example_%d.png" % i))
rule = Rule.from_transform(pattern)
rule.inject_add_edge("y", "x", {"type": "child_of"})
rule.inject_add_edge("y", "z", {"type": "child_of"})
plot_rule(rule)
# Back-up the graph
graph_backup = NXGraph.copy(graph)
# Rewrite using the first instances
rhs_graph = graph.rewrite(rule, instances[0])
# Plot old instances in the backed-up graph
plot_instance(graph_backup, rule.lhs, instances[0], parent_pos=positioning)
# Plot RHS instance in the transformed graph
new_pos = plot_instance(graph, rule.rhs, rhs_graph, parent_pos=positioning)
# Create a pattern
pattern = NXGraph()
pattern.add_nodes_from(["x", "y"])
pattern.add_edge("x", "y", {"type": "parent"})
# Initialize a rule that clones `x`, note that tha variable `rhs_clone_id`
# corresponds to the ID of the newly produced clone in the RHS of the rule
rule = Rule.from_transform(pattern)
_, rhs_clone_id = rule.inject_clone_node("x")
rule.inject_add_edge("x", rhs_clone_id, {"type": "spouse"})
rule.inject_add_edge(rhs_clone_id, "x", {"type": "spouse"})
plot_rule(rule)
# Find matching in the graph
instances = graph.find_matching(rule.lhs)
print(instances)
# Let us fix an instace
instance = {'x': 'Jane', 'y': 'Bob'}
new_pos = plot_instance(graph, rule.lhs, instance, parent_pos=new_pos)
rhs_graph = graph.rewrite(rule, instance)
new_pos = plot_instance(graph, rule.rhs, rhs_graph, parent_pos=new_pos)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Creating and modifying a graph object
Step2: Note that the attributes of the nodes/edges are converted to regraph.attribute_sets.FiniteSet objects. See the tutorial on advanced attribute values (Tutorial_advanced_attributes.ipynb) for more details on the underlying data structures.
Step3: ReGraph provides some utils for plotting NetworkX-based graphs
Step4: Graph objects can me dumped to dictionaries following the JSON format (note how the attribute values are encoded).
Step5: 2. Finding graph patterns
Step6: We can equip pattern nodes and edges with attributes, then ReGraph will look for all subgraphs matching to the structure of the pattern and whose elements contain respective attributes.
Step7: We can plot matchings inside the graph using plot_instance.
Step8: 3. Rewriting graph objects
Step9: Graph rewriting can be performed with the rewrite method of NXGraph. It takes as an input a rule and an instance of this rule. Rewriting is performed in-place, the provided graph object is modified and a dictionary corresponding to the $RHS$ matching in the rewritten graph ($RHS \rightarrowtail G'$) is returned.
Step10: Let us consider another example of a rewriting rule
|
11,729
|
<ASSISTANT_TASK:>
Python Code:
import urllib2
page = urllib2.urlopen("http://beans-r-us.appspot.com/prices.html")
text_str = page.read()
text_str
type(text_str)
text = text_str.decode("utf8")
type(text)
text
print(text)
a_food = "kebap"
a_food[0]
a_food[1]
a_food[2]
a_food[-1]
a_food[-2]
a_food[5]
len(a_food)
a_food
a_food[0 : 2 : 1]
a_food[0 : 4 : 2]
a_food[0 : 2]
a_food[: 2]
a_food[: 4 : 2]
a_food[ : : 2]
a_food[ : -1 : 2]
a_food[: 10]
a_food[:]
a_food[3 : 1]
a_food[3 : 1 : -1]
a_food[-1 : : -1]
text.find(">$")
print(text[234: 238])
price_index = text.find(">$") + 2
bean_price_str = text[price_index : price_index + 4]
print(bean_price_str)
type(bean_price_str)
a_number = int('4')
print(a_number)
print(type(a_number))
float('4.2') * 2
'4.2' * 2
int('4.2') * 2
float('4.5GB')
import urllib2
import time # 시간과 관련된 함수들의 모듈
bean_price = 5.0
while bean_price < 6.0:
time.sleep(1) # 코드 실행을 1초 정지한다.
page = urllib2.urlopen("http://beans-r-us.appspot.com/prices.html")
text = page.read().decode("utf8")
price_index = text.find(">$") + 2
bean_price_str = text[price_index : price_index + 4]
bean_price = float(bean_price_str)
print(bean_price)
print("커피콩 현재 가격이 %.2f입니다. 아메리카노 가격을 인상하세요!" % bean_price)
from __future__ import print_function
import urllib2
import time # 시간과 관련된 함수들의 모듈
bean_price = 5.0
while bean_price < 6.0:
time.sleep(1) # 코드 실행을 1초 정지한다.
page = urllib2.urlopen("http://beans-r-us.appspot.com/prices.html")
text = page.read().decode("utf8")
price_index = text.find(">$") + 2
bean_price_str = text[price_index : price_index + 4]
bean_price = float(bean_price_str)
print(bean_price)
print("커피콩 현재 가격이", bean_price, "입니다. 아메리카노 가격을 인상하세요!")
week_days = " Mon, Tue, Wed, Thu, Fri, Sat, Sun "
week_days.strip(" ")
week_days.strip()
week_days.split(", ")
week_days.strip(" ").split(", ")
week_days.replace(" Mon", "Mon")
week_days.upper()
week_days.strip().upper()
week_days.lower()
week_days.strip().lower()
week_days.strip().lower().split(", ")
week_days.capitalize()
week_days.strip().capitalize()
week_days.title()
week_days.strip().title()
week_days.startswith(" M")
week_days.endswith("n ")
week_days
stripped_week_days = week_days.strip()
stripped_week_days
pets = 'dog cat hedgehog pig swan fish bird'
pets.title()
pets.title()[4]
pets[8 : 16]
pets[8 : 16 : 2]
pets[15: 7 : -1]
dogs, cats = '8', '4'
print(int(dogs))
print(int(cats))
print(abs(int(dogs) - int(cats)))
'ab' in 'abc'
'cat' in 'casting'
def find_dog(word):
if 'dog' in word:
found_dog = True
else:
found_dog = False
return found_dog
find_dog('Bull dog')
find_dog('강아지')
from __future__ import print_function
import urllib2
import time # 시간과 관련된 함수들의 모듈
def price_setter(b_price, a_price):
bean_price = b_price
while 5.5 < bean_price < 6.0:
time.sleep(1) # 코드 실행을 1초 정지한다.
page = urllib2.urlopen("http://beans-r-us.appspot.com/prices.html")
text = page.read().decode("utf8")
price_index = text.find(">$") + 2
bean_price_str = text[price_index : price_index + 4]
bean_price = float(bean_price_str)
print("현재 커피콩 가격이", bean_price, "달러 입니다.")
if bean_price <= 5.5:
print("아메리카노 가격을", a_price, "달러만큼 인하하세요!")
else:
print("아메리카노 가격을", a_price, "달러만큼 인상하세요!")
price_setter(5.7, 0.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 실제로 확인하면 웹사이트의 내용 전체가 하나의 문자열로 저장되어 있다.
Step2: 문자열 자료형
Step3: 유니코드(unicode)
Step4: 주의
Step5: 위 문자열에서 원하는 정보인 커피콩의 가격을 어떻게 추출할 것인가?
Step6: 특정 인덱스에 위치한 문자의 정보는 다음과 같이 확인한다.
Step7: 1번 인덱스 값, 즉, 둘째 문자
Step8: 2번 인덱스 값, 즉, 셋째 문자
Step9: 등등.
Step10: 등등.
Step11: 슬라이싱
Step12: kebap에서 ke 부분을 추출하고 싶다면 다음과 같이 하면 된다
Step13: 즉, 문자열 처음부터 2번 인덱스 전까지, 즉 두 번째 문자까지 모두 추출하는 것이다.
Step14: 시작인덱스, 끝인덱스, 계단 각각의 인자가 경우에 따라 생략될 수도 있다.
Step15: 양수와 음수를 인덱스로 섞어서 사용할 수도 있다.
Step16: 주의
Step17: 아래와 같이 아무 것도 입력하지 않으면 해당 문자열 전체를 추출한다.
Step18: 시작인덱스 값이 끝 인덱스 값보다 같거나 작아야 제대로 추출한다.
Step19: 이유는 슬라이싱은 기본적으로 작은 인덱스에 큰 인덱스 방향으로 확인하기 때문이다.
Step20: find() 메소드 활용하기
Step21: 이제, 찾고자 하는 ">$" 문자열이 232번 인덱스에서 시작한다는 것을 알았다.
Step22: 하지만, 여기서 234를 사용하기 보다는 find() 메소드를
Step23: 주의
Step24: 그래서 예를 들어 커피콩 가격이 6달러 이상이면 커피숍의 아메리카노 가격을 올리고,
Step25: float() 함수를 이용하면 부동소수점 모양의 문자열을 부동소수점으로 형변환시킬 수 있다.
Step26: 주의
Step27: 주의
Step28: 부동소수점 모양의 문자열이 아니면 float() 함수도 오류를 발생시킨다.
Step29: 커피콩 가격 정보 활용 코드 예제
Step30: 주의
Step31: 문자열 관련 메소드
Step32: strip() 메소드는 문자열의 양 끝을 지정한 문자열 기준으로 삭제하는 방식으로 정리한다.
Step33: strip() 메소드를 인자 없이 호출하는 경우와 동일하다.
Step34: split() 메소드는 지정된 부분문자열을 기준으로 문자열을 쪼개어 문자열들의 리스트로 반환한다.
Step35: 두 개 이상의 메소드를 조합해서 활용할 수도 있다.
Step36: replace() 메소드는 하나의 문자열을 다른 문자열로 대체한다.
Step37: upper() 메소드는 모든 문자를 대문자로 변환시킨다.
Step38: lower() 메소드는 모든 문자를 소문자로 변환시킨다.
Step39: capitalize() 메소드는 제일 첫 문자를 대문자로 변환시킨다.
Step40: title() 메소드는 각각의 단어의 첫 문자를 대문자로 변환시킨다.
Step41: startswith() 메소드는 문자열이 특정 문자열로 시작하는지 여부를 판단해준다.
Step42: endswith() 메소드는 문자열이 특정 문자열로 끝나는지 여부를 판단해준다.
Step43: 불변 자료형
Step44: 이와 같이 한 번 정해지면 절대 변경이 불가능한 자료형을 불변(immutable) 자료형이라 부른다.
Step45: 연습문제
Step46: 연습
Step47: 연습
Step48: 연습
Step49: 연습 (이전 문제 이어서)
Step50: 연습
Step51: 연습
Step52: 강아지와 고양이를 몇 마리씩 갖고 있는지 확인하는 방법은?
Step53: 연습
Step54: 견본답안
Step55: 연습
Step56: 예를 들어, 현재 커피콩의 가격이 5.7달러이고, 커피콩의 실시간 가격이
|
11,730
|
<ASSISTANT_TASK:>
Python Code:
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten, Activation, add
from keras.layers.core import Dropout
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.models import Model, Sequential, load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import backend as ktf
from keras.preprocessing.image import ImageDataGenerator
from lib.data_utils import get_MNIST_data
import matplotlib.pyplot as plt
import numpy as np
%matplotlib notebook
# by default, there would be 41000 training data, 1000 test data and 1000 validation data(within traning set)
data = get_MNIST_data(num_validation=4000)
# see if we get the data correctly
print('image size: ', data['X_train'].shape)
# model architecture
# [batchnorm-Conv-Conv-maxpool]x2 - [dense]x2 - [softmax]
# new lowest: 1.01 0.79 (0.0121, 0.76, 1974, True, False)
# new lowest: 1.23 0.73 (0.0044, 0.45, 1392, True, False)
simple_CNN = Sequential()
simple_CNN.add(BatchNormalization(input_shape=(28, 28, 1)))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (14,14,32)
simple_CNN.add(Dropout(0.2))
simple_CNN.add(BatchNormalization())
simple_CNN.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (7,7,64)
simple_CNN.add(Dropout(0.2))
simple_CNN.add(Flatten())
simple_CNN.add(Dense(1392, activation='relu'))
simple_CNN.add(Dropout(0.45))
simple_CNN.add(Dense(10, activation='softmax'))
# set loss and optimizer
rmsprop = RMSprop(lr=0.0044, decay=0.99)
simple_CNN.compile(loss='sparse_categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('../models/simpleCNN_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=3)
# use test data to monitor early stopping
simple_CNN.fit(data['X_train'], data['y_train'].reshape(-1,1),
batch_size=64,
epochs=200,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
callbacks=[checkpoint, earlystop],
initial_epoch=0)
# resume training
model = load_model('../models/simpleCNN_86-0.0034.h5')
# set the loss and optimizer
rmsprop = RMSprop(lr=0.0000000044)
model.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('../models/simpleCNN_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=5)
model.fit(data['X_train'], data['y_train'].reshape(-1,1),
batch_size=64,
epochs=200,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
callbacks=[checkpoint, earlystop],
initial_epoch=87)
# model architecture
# [Conv-batchnorm-relu]x4 - [residual: [Conv-batchnorm-relu]x2-Conv-batchnorm-add-relu]x6
# 4
inputs = Input(shape=(28, 28, 1))
x = Conv2D(64, (7, 7), padding='same')(inputs)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
res = MaxPooling2D((2, 2))(x) # (14, 14, 64)
# repeated residual modules
for i in range(6): # 6x3 = 18
x = Conv2D(64, (1, 1), padding='same')(res)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = add([x, res])
res = Activation('relu')(x)
x = GlobalAveragePooling2D(data_format='channels_last')(res) #(,256)
predictions = Dense(10, activation='softmax')(x)
# connect the model
mini_ResNet = Model(inputs=inputs, outputs=predictions)
# set loss and optimizer
rmsprop = RMSprop(lr=0.1, decay=0.9999)
mini_ResNet.compile(loss='sparse_categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('miniResNet_{epoch:02d}-{accuracy:.2f}.h5',
monitor='accuracy',
save_best_only=True)
plateau = ReduceLROnPlateau(factor=0.1, patience=3, min_lr=0.0001)
mini_ResNet.fit(data['X_train'], data['y_train'].reshape(-1, 1),
batch_size=32, epochs=10,
callbacks=[checkpoint, plateau])
# test the model and see accuracy
score = mini_ResNet.evaluate(data['X_test'], data['y_test'].reshape(-1, 1), batch_size=32)
print(score)
# save the model: 0.903
mini_ResNet.save('mini_ResNet.h5')
# model architecture
# [Conv] - [batchnorm-Conv-Conv-add-maxpool]x2 - [dense]x2 - [softmax]
inputs = Input(shape=(28,28,1))
x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
res = BatchNormalization()(x) # (28, 28, 64)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(res)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = add([res, x])
x = MaxPooling2D((2, 2))(x)
res = BatchNormalization()(x) # (14, 14, 64)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(res)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = add([res, x])
x = MaxPooling2D((2, 2))(x)
x = GlobalAveragePooling2D(data_format='channels_last')(x)
predictions = Dense(10, activation='softmax')(x)
simple_resCNN = Model(inputs=inputs,outputs=predictions)
# set loss and optimizer
rmsprop = RMSprop(lr=0.01, decay=0.978)
simple_resCNN.compile(loss='sparse_categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('../models/simpleResCNN_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=5)
# use test data to monitor early stopping
simple_resCNN.fit(data['X_train'], data['y_train'].reshape(-1,1),
batch_size=64,
epochs=200,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
callbacks=[checkpoint, earlystop],
initial_epoch=0)
# resume training
model = load_model('../models/simpleCNN_29-0.4773.h5')
# set the loss and optimizer
rmsprop = RMSprop(lr=0.00001,decay=0.978)
model.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('../models/simpleCNN_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=5)
model.fit(data['X_train'], data['y_train'].reshape(-1,1),
batch_size=64,
epochs=200,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
callbacks=[checkpoint, earlystop],
initial_epoch=26)
# validate the structure
inputs = Input(shape=(28,28,1))
x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
res = BatchNormalization()(x) # (28, 28, 64)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(res)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = add([res, x])
x = MaxPooling2D((2, 2))(x)
res = BatchNormalization()(x) # (14, 14, 64)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(res)
x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
x = add([res, x])
x = MaxPooling2D((2, 2))(x)
x = GlobalAveragePooling2D(data_format='channels_last')(x)
predictions = Dense(10, activation='softmax')(x)
simple_resCNN = Model(inputs=inputs,outputs=predictions)
# set loss and optimizer
rmsprop = RMSprop(lr=0.001, decay=0.978)
simple_resCNN.compile(loss='sparse_categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])
# use test data to monitor early stopping
simple_resCNN.fit(data['X_val'], data['y_val'].reshape(-1,1),
batch_size=64,
epochs=1,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
initial_epoch=0)
model = load_model('../models/simpleResCNN.h5')
pred = np.argmax(model.predict(data['X_test']), axis=1)
wrong_idx = [i for i in range(len(pred)) if pred[i] != data['y_test'][i]]
np.random.shuffle(wrong_idx)
fig = plt.figure(figsize=(4, 5))
for i in range(1,6):
for j in range(1,3):
idx = wrong_idx.pop()
a = fig.add_subplot(j,5,i)
plt.imshow(data['X_test'][idx].reshape((28,28)))
plt.axis('off')
plt.title(pred[idx])
plt.show()
# validate the model and return the test error
def simpleCNN_model(lr=0.001, dropout=0.5, dense_dim=1024, drop_conv=True, avgpool=True):
simple_CNN = Sequential()
simple_CNN.add(BatchNormalization(input_shape=(28, 28, 1)))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (14,14,32)
if drop_conv:
simple_CNN.add(Dropout(0.2))
simple_CNN.add(BatchNormalization())
simple_CNN.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (7,7,64)
if drop_conv:
simple_CNN.add(Dropout(0.2))
if avgpool:
simple_CNN.add(GlobalAveragePooling2D())
else:
simple_CNN.add(Flatten())
simple_CNN.add(Dense(dense_dim, activation='relu'))
simple_CNN.add(Dropout(dropout))
simple_CNN.add(Dense(10, activation='softmax'))
# set loss and optimizer
rmsprop = RMSprop(lr=lr, decay=0.999)
simple_CNN.compile(loss='sparse_categorical_crossentropy', optimizer=rmsprop, metrics=['accuracy'])
# use test data to monitor early stopping
history = simple_CNN.fit(data['X_val'], data['y_val'].reshape(-1,1),
batch_size=64,
epochs=1,
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)),
initial_epoch=0,
verbose=False)
return history.history['val_loss'][0], history.history['val_acc'][0]
# validation: 4000; 1 epoch
# finetune list: initial learning rate, dropout rate, hidden unit size
best_parameters={'lr': 0.001, 'dropout': 0.5, 'dense_dim': 1024}
lowest_err = 1000
lr_range = (0.1,0.0001); dropout_range = (0.3,0.8); dense_range = (512,2048)
while True:
lr = np.random.uniform(lr_range[0], lr_range[1])
dropout = np.random.uniform(dropout_range[0], dropout_range[1])
dense_dim = int(np.random.uniform(dense_range[0], dense_range[1]))
drop_conv, avgpool = np.random.binomial(1,0.5,2)
ktf.clear_session()
test_err, test_acc = simpleCNN_model(lr, dropout, dense_dim, drop_conv, avgpool)
if test_err < lowest_err:
print('new lowest: ', round(test_err,2), round(test_acc,2),
(round(lr,4), round(dropout,2), dense_dim, bool(drop_conv), bool(avgpool)))
lowest_err = test_err
best_parameters['lr'] = lr
best_parameters['dropout'] = dropout
best_parameters['dense_dim'] = dense_dim
# set the data generator to transform the data
idg = ImageDataGenerator(width_shift_range=0.05,
fill_mode='constant')
# build the model
simple_CNN = Sequential()
simple_CNN.add(BatchNormalization(input_shape=(28, 28, 1)))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (14,14,32)
simple_CNN.add(Dropout(0.2))
simple_CNN.add(BatchNormalization())
simple_CNN.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
simple_CNN.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
simple_CNN.add(MaxPooling2D((2, 2))) # (7,7,64)
simple_CNN.add(Dropout(0.2))
simple_CNN.add(Flatten())
simple_CNN.add(Dense(1392, activation='relu'))
simple_CNN.add(Dropout(0.45))
simple_CNN.add(Dense(10, activation='softmax'))
# set loss and optimizer
rmsprop = RMSprop(lr=0.0044, decay=0.99)
simple_CNN.compile(loss='sparse_categorical_crossentropy',
optimizer=rmsprop,
metrics=['accuracy'])
# train the model using indefinite number of training data
checkpoint = ModelCheckpoint('../models/simpleCNN_aug_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=3)
simple_CNN.fit_generator(idg.flow(data['X_train'],
data['y_train'].reshape(-1, 1),
batch_size=64),
steps_per_epoch=len(data['X_train'])/64,
initial_epoch=0,
epochs=100,
callbacks=[checkpoint, earlystop],
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)))
# resume training
model = load_model('../models/simpleCNN_aug_44-0.9536.h5')
# set the loss and optimizer
rmsprop = RMSprop(lr=0.00044,decay=0.99)
model.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# train the model
checkpoint = ModelCheckpoint('../models/simpleCNN_aug_{epoch:02d}-{loss:.4f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=5)
model.fit_generator(idg.flow(data['X_train'],
data['y_train'].reshape(-1, 1),
batch_size=64),
steps_per_epoch=len(data['X_train'])/64,
initial_epoch=45,
epochs=100,
callbacks=[checkpoint, earlystop],
validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)))
from lib.data_utils import create_submission
from keras.models import load_model
# for simple CNN model
model = load_model('../models/simpleCNN_86-0.0034.h5')
print('Load model successfully.')
create_submission(model, '../data/test.csv', '../submission/submission_simpleCNN_tuned_87.csv', 128)
history = simpleCNN_model()
print(history.history)
new lowest: 1.85 0.76 (0.0009, 0.58, 1892, False, False)
new lowest: 1.78 0.52 (0.0077, 0.34, 867, True, False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the MNIST data. Notice that we assume that it's 'kaggle-DigitRecognizer/data/train.csv', and we use helper function to read into a dictionary.
Step2: Simple CNN model
Step3: Mini ResNet
Step4: Simple CNN with residual connections
Step5: Inspect the wrong answers
Step6: Hyperparameter finetuning
Step7: First, random search. Each run is only 1 epoch with 4000 validation data.
Step8: SimpleCNN with data augmentation
Step9: Create submissions
|
11,731
|
<ASSISTANT_TASK:>
Python Code:
dgm = gm.DGM.read('../networks/earthquake.bif')
dgm.draw() # you can move the cursor on a node to see it's CPD
nx.dag_longest_path(dgm)
nx.average_neighbor_degree(dgm)
list(dgm.immoralities) # list of all immoralities in graph
list(dgm.v_structures) # list of all v_structures in graph
changed_dgm1 = gm.DGM()
changed_dgm1.add_edges_from(dgm.edges())
changed_dgm1.add_edge('JohnCalls', 'MaryCalls')
changed_dgm1.draw()
changed_dgm1.is_I_equivalent(dgm)
changed_dgm1.is_I_map(dgm)
dgm.is_I_map(changed_dgm1)
dgm.reachable('JohnCalls', observed=[])
dgm.reachable('JohnCalls', observed=['Alarm'])
dgm.factor('JohnCalls') # get a factor assigned to variable JohnCalls
dgm.cpd('Alarm') # get a cpd of variable Alarm (i.e. normalized factor, see below)
alarm = dgm.factor('Alarm')
john_calls = dgm.factor('JohnCalls')
burglary = dgm.factor('Burglary')
print(alarm.arguments) # all variables in the network
print(alarm.scope) # variables in the factor
alarm * john_calls # note that the result is NOT a normalized CPD
(alarm * john_calls).normalize('JohnCalls') # now the result is a valid CPD: P(JohnCalls | Burglary, Earthquake, Alarm)
factor = burglary * alarm
factor
factor.marginalize('Burglary')
alarm
factor = burglary * alarm
factor
factor / burglary
f = gm.TableFactor(arguments=['a', 'b'], scope=['a', 'b'])
f.table = np.array([[0.5, 0.5], [0.8, 0.2]])
f.normalize(*f.scope, copy=False)
f
# sampling
rv = f.rvs(size=1e+6)
# Check if the sampling is correct
f1 = gm.TableFactor(['a', 'b'], ['a', 'b']).fit(rv)
f1
# conditional sampling
f.rvs(observed={'a': [0, 1, 0]})
dgm.factor('Alarm').rvs(observed={
'Earthquake': ['True', 'False', 'True'],
'Burglary': ['False', 'False', 'True']
})
dgm.draw()
dgm.rvs(size=10)
# generating data
data = dgm.rvs(size=1000000)
# copying graph
dgm1 = gm.DGM()
dgm1.add_nodes_from(dgm.nodes(), cpd=gm.TableFactor) # here we also need to specify cpd type (TableFactor)
dgm1.add_edges_from(dgm.edges())
dgm1.draw()
# fitting
dgm1.fit(data)
dgm1.draw()
dgm.draw()
factor = dgm.factor('Alarm') * dgm.factor('Burglary') * dgm.factor('Earthquake')
# generating data
data = factor.rvs(size=1000000)
new_factor = gm.TableFactor(['Alarm', 'Burglary', 'Earthquake'], ['Alarm', 'Burglary', 'Earthquake'])
# fitting
new_factor.fit(data)
factor
# creating inference object
inference = gm.SumProductInference(dgm)
# making inference queries
inference(query=['JohnCalls']) # probability of John calling
# probability of John calling in case there was a burglary
inference(query=['JohnCalls'], observed=dict(Burglary='True'))
# probability of John/Mary calling in case there was a burglary
inference(query=['JohnCalls', 'MaryCalls'], observed=dict(Burglary='True'))
# probability of Earthquake given that both John and Mary called
inference(query=['Earthquake'], observed=dict(JohnCalls='True', MaryCalls='True'))
# generating data
data = dgm.rvs(size=1000000)
learned = gm.chow_liu(data)
learned.draw()
gen = gm.ErdosRenyiDGMGen(factor_gen=gm.DirichletTableFactorGen())
gen().draw()
gen = gm.TreeDGMGen(factor_gen=gm.DirichletTableFactorGen())
gen().draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DGM is a subclass of networkx.DiGraph, so you can use any networkx functions on it.
Step2: Also, some DGM-specific queries about the graph are implemented
Step3: Checks for I-equivalence and I-maps
Step4: Find which nodes are reachable in the sense of d-separation
Step5: Factors
Step6: Operations
Step7: Factor Multiplication
Step8: Normalization
Step9: Marginalization
Step10: Factor division
Step11: Random sampling
Step12: Sampling from DGM
Step13: Fitting
Step14: Distributions in this DGM must be close to the real distributions (those in dgm).
Step15: Factors
Step16: Inference
Step17: Structure Learning
Step18: You can see that the Chow-Liu algorithm didn't find the true structure (since it is not a tree). However, the result is quite close to it.
|
11,732
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2019-01-12T00:00:00'
tend = '2019-01-12T23:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
pa = planning.get_nustar_roll(tstart, 0)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
orbit1 = orbits[10].copy()
offset = [300, 195]*u.arcsec
midTime = (0.5*(orbit1[1] - orbit1[0]) + orbit1[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='../data', parallax_correction=True)
print("Orbit start: {} Orbit end: {}".format(orbit1[0].isoformat(), orbit1[1].isoformat()))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
orbit2 = orbits[11]
offset = [420, 195]*u.arcsec
midTime = (0.5*(orbit2[1] - orbit2[0]) + orbit2[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='../data', parallax_correction=True)
print("Orbit start: {} Orbit end: {}".format(orbit2[0].isoformat(), orbit2[1].isoformat()))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
# Just use the first orbit...or choose one. This may download a ton of deltat.preds, which is a known
# bug to be fixed.
# Orbit3: 1946-2048
# Per Karl, SAA time 2031-2047
# Trim mosaic to 1946-2031:
orbit = orbits[12].copy()
orbit[1] = orbit[1].replace(minute=31)
print(orbit)
planning.make_mosaic(orbit, make_regions=True, outfile='orbit3_mosaic.txt', write_output=True)
tstart = orbit[1]
tend = orbits[12][1]
offset = [480, 195]*u.arcsec
midTime = (0.5*(tend - tstart)) + tstart
sky_pos = planning.get_sky_position(midTime, offset)
print("Orbit start: {} Orbit end: {}".format(tstart, tend))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
# Just use the first orbit...or choose one. This may download a ton of deltat.preds, which is a known
# bug to be fixed.
# Orbit4: 2123-2225
# Per Karl, SAA time 2214-2230
# Trim mosaic to 2214-2230
orbit = orbits[13].copy()
orbit[1] = orbit[1].replace(minute=14)
print(orbit)
planning.make_mosaic(orbit, make_regions=True, outfile='orbit4_mosaic.txt', write_output=True)
tstart = orbit[1]
tend = orbits[13][1]
offset = [520, 195]*u.arcsec
midTime = (0.5*(tend - tstart)) + tstart
sky_pos = planning.get_sky_position(midTime, offset)
print("Orbit start: {} Orbit end: {}".format(tstart, tend))
print('Aim time: {} RA (deg): {} Dec (deg): {}'.format(midTime.isoformat(), sky_pos[0], sky_pos[1]))
print("")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
Step3: We want to know how to orient NuSTAR for the Sun.
Step4: Orbit 1
Step5: Orbit 2
Step6: This is where you actually make the Mosaic for Orbit 3
Step7: Now make the parking location for the end of Orbit 3
Step8: Mosiac for orbit 4
Step9: Now make the parking location for the end of Orbit 4
|
11,733
|
<ASSISTANT_TASK:>
Python Code:
! wget -O GEM.tbz2 https://sourceforge.net/projects/gemlibrary/files/gem-library/Binary%20pre-release%202/GEM-binaries-Linux-x86_64-core_i3-20121106-022124.tbz2/download
! tar -xjvf GEM.tbz2
! sudo cp GEM-binaries-Linux-x86_64-core_i3-20121106-022124/gem-mapper /usr/local/bin/
! sudo cp GEM-binaries-Linux-x86_64-core_i3-20121106-022124/gem-indexer /usr/local/bin/
! mkdir ~/bin
! cp GEM-binaries-Linux-x86_64-core_i3-20121106-022124/gem-mapper ~/bin/
! cp GEM-binaries-Linux-x86_64-core_i3-20121106-022124/gem-indexer* ~/bin/
! echo $PATH=$PATH:"~/bin/" >> ~/.bashrc
! wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh
! conda install -y scipy # scientific computing in python
! conda install -y numpy # scientific computing in python
! conda install -y matplotlib # to produce plots
! conda install -y jupyter # this notebook :)
! conda install -y -c https://conda.anaconda.org/bcbio pysam # to deal with SAM/BAM files
! conda install -y -c https://conda.anaconda.org/salilab imp # for 3D modeling
! conda install -y pip # yet another python package manager
! conda install -y -c bioconda mcl # for clustering
! wget http://sun.aei.polsl.pl/dsrc/download/2.0rc/dsrc
! chmod +x dsrc
sudo mv dsrc /usr/local/bin
mv dsrc ~/bin
! git clone git@github.com:3DGenomes/TADbit.git
! cd TADbit; python setup.py install
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Uncompress the archive
Step2: And copy the needed binaries to somewhere in your PATH, like
Step3: In case you do not have root access, just copy the binaries to some path and add this path to your global PATH
Step4: Conda
Step5: And run it with all the default options. The installer will create a miniconda2 folder in your home directory where all the programs that you need will be stored (including python).
Step6: DSRC FASTQ compressor
Step7: And, if you have root access
Step8: Otherwise, and as before
Step9: TADbit
|
11,734
|
<ASSISTANT_TASK:>
Python Code:
import os
from PIL import Image
def get_record_and_image(index):
record = df.iloc[index]
path = os.path.join('data', record.center)
return record, Image.open(path)
def layer_info(model):
for n, layer in enumerate(model.layers, 1):
print('Layer {:2} {:16} input shape {} output shape {}'.format(n, layer.name, layer.input_shape, layer.output_shape))
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
model = Sequential()
model.add(Convolution2D(6, 5, 5, border_mode='valid', subsample=(5, 5), input_shape=(80, 160, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(16, 5, 5, border_mode='valid', subsample=(2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(120))
model.add(Activation('relu'))
model.add(Dense(84))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('tanh'))
layer_info(model)
import numpy as np
import pandas as pd
df = pd.read_csv('data/driving_log.csv')
from tqdm import tqdm
X_train = []
y_train = []
for i in tqdm(range(len(df))):
record, image = get_record_and_image(i)
image = image.resize((image.width // 2, image.height // 2))
X_train.append(np.array(image))
image.close()
y_train.append(record['steering'])
X_min = np.min(X_train)
X_max = np.max(X_train)
X_normalized = (X_train - X_min) / (X_max - X_min) - 0.5
y_train = np.array(y_train)
import keras.optimizers
def train(model, nb_epoch=10, learning_rate=0.001):
adam = keras.optimizers.Adam(lr=learning_rate)
model.compile(loss='mse', optimizer=adam)
model.fit(X_normalized, y_train, validation_split=0.2, nb_epoch=nb_epoch, verbose=2)
model.save('model.h5')
train(model)
from random import randrange
def sample_predictions(model):
for i in range(10):
index = randrange(len(df))
X = np.expand_dims(X_normalized[index], axis=0)
y = y_train[index]
print('Actual steering angle {} model prediction {}'.format(y, model.predict(X)[0][0]))
sample_predictions(model)
from keras.layers import Dropout
model_2 = Sequential()
model_2.add(Convolution2D(6, 5, 5, border_mode='valid', subsample=(5, 5), input_shape=(80, 160, 3)))
model_2.add(Dropout(0.5))
model_2.add(Activation('relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Convolution2D(16, 5, 5, border_mode='valid', subsample=(2, 2)))
model_2.add(Dropout(0.5))
model_2.add(Activation('relu'))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Flatten())
model_2.add(Dense(120))
model_2.add(Activation('relu'))
model_2.add(Dense(84))
model_2.add(Activation('relu'))
model_2.add(Dense(1))
model_2.add(Activation('tanh'))
layer_info(model_2)
train(model_2)
sample_predictions(model_2)
model_3 = Sequential()
model_3.add(Convolution2D(6, 5, 5, border_mode='valid', subsample=(5, 5), input_shape=(80, 160, 3)))
model_3.add(Dropout(0.5))
model_3.add(Activation('relu'))
#model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Convolution2D(16, 5, 5, border_mode='valid'))
model_3.add(Dropout(0.5))
model_3.add(Activation('relu'))
#model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Flatten())
model_3.add(Dense(120))
model_3.add(Activation('relu'))
model_3.add(Dense(84))
model_3.add(Activation('relu'))
model_3.add(Dense(1))
model_3.add(Activation('tanh'))
layer_info(model_3)
train(model_3, 20)
sample_predictions(model_3)
model_4 = Sequential()
model_4.add(Convolution2D(24, 5, 5, border_mode='valid', subsample=(2, 2), input_shape=(80, 160, 3)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Flatten())
model_4.add(Dense(100))
model_4.add(Activation('relu'))
model_4.add(Dense(50))
model_4.add(Activation('relu'))
model_4.add(Dense(10))
model_4.add(Activation('relu'))
model_4.add(Dense(1))
model_4.add(Activation('tanh'))
layer_info(model_4)
train(model_4)
sample_predictions(model_4)
model_4 = Sequential()
model_4.add(Convolution2D(24, 5, 5, border_mode='valid', subsample=(2, 2), input_shape=(80, 160, 3)))
model_4.add(Activation('relu'))
model_4.add(Dropout(0.5))
model_4.add(Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Dropout(0.5))
model_4.add(Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Dropout(0.5))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Dropout(0.5))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Dropout(0.5))
model_4.add(Flatten())
model_4.add(Dense(100))
model_4.add(Activation('relu'))
model_4.add(Dense(50))
model_4.add(Activation('relu'))
model_4.add(Dense(10))
model_4.add(Activation('relu'))
model_4.add(Dense(1))
model_4.add(Activation('tanh'))
layer_info(model_4)
train(model_4)
sample_predictions(model_4)
model_4 = Sequential()
model_4.add(Convolution2D(24, 5, 5, border_mode='valid', subsample=(2, 2), input_shape=(80, 160, 3)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2)))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Convolution2D(64, 3, 3, border_mode='valid'))
model_4.add(Activation('relu'))
model_4.add(Flatten())
model_4.add(Dense(100))
model_4.add(Dropout(0.5))
model_4.add(Activation('relu'))
model_4.add(Dense(50))
model_4.add(Activation('relu'))
model_4.add(Dense(10))
model_4.add(Activation('relu'))
model_4.add(Dense(1))
layer_info(model_4)
train(model_4, 50, learning_rate=0.001)
sample_predictions(model_4)
sample_predictions(model_4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: Now I need to create the actual training data, X_train and y_train. I will just read all the images and store them as NumPy arrays to X_train. Similary, I read the corresponding steering angles and store them to y_train.
Step4: Some preprocessing
Step5: Step 3
Step6: The validation error does not get much lower after epoch 4 or so, whereas the training error keeps falling. This indicates overtraining and poor generalization ability.
Step7: The sample predictions do not look very good. Some tweaks to the model are in place.
Step 4
Step8: The performace is even poorer now so the model is probably not complex enough to learn the given data set. I could increase the layer dimensions directly, but there is another way
Step9: A bit better but even after 20 epochs not that much of an improvement. I begin to suspect that I need to increase the model's complexity quite a bit. At this point I will try to replicate the architecture from the NVidia paper (http
|
11,735
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import sklearn as skl
import holcrawl.shared
dataset_dir = holcrawl.shared._get_dataset_dir_path()
dataset_path = os.path.join(dataset_dir, 'movies_dataset.csv')
df = pd.read_csv(dataset_path)
df['ROI'] = (df['gross_income'] - df['budget']) / df['budget']
df['name_length'] = df['name'].map(lambda name: len(name))
len(df)
df.isnull().sum()
BASE_FEAT_TO_KEEP = [
'duration', 'budget', 'opening_month', 'opening_day', 'opening_day_of_year', 'year',
'avg_mc_critic_by_opening', 'num_mc_critic_by_opening', 'name_length', 'opening_weekend_income',
'num_imdb_user_by_opening', 'avg_imdb_user_by_opening', 'opening_weekend_screens'# 'avg_mc_user_by_opening'
]
FEAT_TO_KEEP = BASE_FEAT_TO_KEEP + [col for col in df.columns if 'genres' in col]
features = df.drop([col for col in df.columns if col not in BASE_FEAT_TO_KEEP], axis=1)
dataset = df.drop([col for col in df.columns if col not in FEAT_TO_KEEP], axis=1)
dataset = dataset.dropna(axis=0)
dataset.isnull().sum().sum()
len(dataset)
import numpy as np
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
failed = df['ROI'].ix[dataset.index] < 0
X = dataset
Y = failed
logreg = linear_model.LogisticRegression()
acc_scores = cross_val_score(logreg, X, Y, cv=5, n_jobs=1)
mean_accuracy = np.mean(acc_scores)
accuracy_std = np.std(acc_scores)
print("Accuracy is {:.2f}% ± {:.2f}%.".format(mean_accuracy*100, accuracy_std*100))
recall_scores = cross_val_score(logreg, X, Y, cv=5, n_jobs=1, scoring='recall')
mean_recall = np.mean(recall_scores)
recall_std = np.std(recall_scores)
print("Recall = {:.2f}% ± {:.2f}".format(mean_recall*100, recall_std*100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Feature Generation
Step2: The number of null values per column
Step3: Keeping all genre dummy variables
Step4: Dropping non-feature columns
Step5: Dropping all rows that still have null values
Step6: Now, making sure we have no null values
Step7: We end up with a dataset of size
Step8: Prediction
Step9: Predicting failed movies
Step10: Running logistic regression over 5 folds of our dataset
|
11,736
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
class Model(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.float32)])
def encode(self, x):
result = tf.strings.as_string(x)
return {
"encoded_result": result
}
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def decode(self, x):
result = tf.strings.to_number(x)
return {
"decoded_result": result
}
model = Model()
# Save the model
SAVED_MODEL_PATH = 'content/saved_models/coding'
tf.saved_model.save(
model, SAVED_MODEL_PATH,
signatures={
'encode': model.encode.get_concrete_function(),
'decode': model.decode.get_concrete_function()
})
# Convert the saved model using TFLiteConverter
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_PATH)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_model = converter.convert()
# Print the signatures from the converted model
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signatures = interpreter.get_signature_list()
print(signatures)
# Generate a Keras model.
keras_model = tf.keras.Sequential(
[
tf.keras.layers.Dense(2, input_dim=4, activation='relu', name='x'),
tf.keras.layers.Dense(1, activation='relu', name='output'),
]
)
# Convert the keras model using TFLiteConverter.
# Keras model converter API uses the default signature automatically.
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
tflite_model = converter.convert()
# Print the signatures from the converted model
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signatures = interpreter.get_signature_list()
print(signatures)
model = Model()
# Convert the concrete functions using TFLiteConverter
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[model.encode.get_concrete_function(),
model.decode.get_concrete_function()], model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, # enable TensorFlow Lite ops.
tf.lite.OpsSet.SELECT_TF_OPS # enable TensorFlow ops.
]
tflite_model = converter.convert()
# Print the signatures from the converted model
interpreter = tf.lite.Interpreter(model_content=tflite_model)
signatures = interpreter.get_signature_list()
print(signatures)
# Load the TFLite model in TFLite Interpreter
interpreter = tf.lite.Interpreter(model_content=tflite_model)
# Print the signatures from the converted model
signatures = interpreter.get_signature_list()
print('Signature:', signatures)
# encode and decode are callable with input as arguments.
encode = interpreter.get_signature_runner('encode')
decode = interpreter.get_signature_runner('decode')
# 'encoded' and 'decoded' are dictionaries with all outputs from the inference.
input = tf.constant([1, 2, 3], dtype=tf.float32)
print('Input:', input)
encoded = encode(x=input)
print('Encoded result:', encoded)
decoded = decode(x=encoded['encoded_result'])
print('Decoded result:', decoded)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Signatures in TensorFlow Lite
Step2: Example model
Step3: In the signature wise, the above TensorFlow model can be summarized as follows
Step4: From Keras Model
Step5: From Concrete Functions
Step6: Run Signatures
|
11,737
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
graphlab.product_key.set_product_key("C0C2-04B4-D94B-70F6-8771-86F9-C6E1-E122")
tmp = graphlab.SArray([1., 2., 3.])
tmp_cubed = tmp.apply(lambda x: x**3)
print tmp
print tmp_cubed
ex_sframe = graphlab.SFrame()
ex_sframe['power_1'] = tmp
print ex_sframe
def polynomial_sframe(feature, degree):
# assume that degree >= 1
# initialize the SFrame:
poly_sframe = graphlab.SFrame()
# and set poly_sframe['power_1'] equal to the passed feature
poly_sframe['power_1'] = feature
# first check if degree > 1
if degree > 1:
# then loop over the remaining degrees:
# range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree
for power in range(2, degree+1):
# first we'll give the column a name:
name = 'power_' + str(power)
# then assign poly_sframe[name] to the appropriate power of feature
poly_sframe[name] = feature**power
return poly_sframe
print polynomial_sframe(tmp, 3)
sales = graphlab.SFrame('kc_house_data.gl/kc_house_data.gl')
sales = sales.sort(['sqft_living', 'price'])
poly1_data = polynomial_sframe(sales['sqft_living'], 1)
poly1_data['price'] = sales['price'] # add price to the data since it's the target
model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None)
#let's take a look at the weights before we plot
model1.get("coefficients")
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(poly1_data['power_1'],poly1_data['price'],'.',
poly1_data['power_1'], model1.predict(poly1_data),'-')
poly2_data = polynomial_sframe(sales['sqft_living'], 2)
my_features = poly2_data.column_names() # get the name of the features
poly2_data['price'] = sales['price'] # add price to the data since it's the target
model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None)
model2.get("coefficients")
plt.plot(poly2_data['power_1'],poly2_data['price'],'.',
poly2_data['power_1'], model2.predict(poly2_data),'-')
poly1_data = polynomial_sframe(sales['sqft_living'], 3)
poly1_data['price'] = sales['price'] # add price to the data since it's the target
model2 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_2'], validation_set = None)
plt.plot(poly1_data['power_2'],poly1_data['price'],'.',
poly1_data['power_2'], model2.predict(poly1_data),'-')
model3 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_3'], validation_set = None)
plt.plot(poly1_data['power_3'],poly1_data['price'],'.',
poly1_data['power_3'], model3.predict(poly1_data),'-')
poly1_data = polynomial_sframe(sales['sqft_living'], 15)
poly1_data['price'] = sales['price'] # add price to the data since it's the target
model2 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_15'], validation_set = None)
plt.plot(poly1_data['power_15'],poly1_data['price'],'.',
poly1_data['power_15'], model2.predict(poly1_data),'-')
temp_set, temp_set_plus = sales.random_split(.5,seed=0)
set_1, set_2 = temp_set.random_split(.5,seed=0)
set_3, set_4 = temp_set_plus.random_split(.5,seed=0)
poly_data = polynomial_sframe(set_1['sqft_living'], 15)
poly_data['price'] = set_1['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = ['power_15'], validation_set = None)
model.get("coefficients")
# plt.plot(poly_data['power_15'],poly_data['price'],'.',
# poly_data['power_15'], model.predict(poly_data),'-')
poly_data = polynomial_sframe(set_2['sqft_living'], 15)
poly_data['price'] = set_2['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = ['power_15'], validation_set = None)
model.get("coefficients")
plt.plot(poly_data['power_15'],poly_data['price'],'.',
poly_data['power_15'], model.predict(poly_data),'-')
poly_data = polynomial_sframe(set_3['sqft_living'], 15)
poly_data['price'] = set_3['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = ['power_15'], validation_set = None)
model.get("coefficients")
plt.plot(poly_data['power_15'],poly_data['price'],'.',
poly_data['power_15'], model.predict(poly_data),'-')
poly_data = polynomial_sframe(set_4['sqft_living'], 15)
poly_data['price'] = set_4['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = ['power_15'], validation_set = None)
model.get("coefficients")
# plt.plot(poly_data['power_15'],poly_data['price'],'.',
# poly_data['power_15'], model.predict(poly_data),'-')
training_and_validation, testing = sales.random_split(.9,seed=1)
training, validation = training_and_validation.random_split(.5,seed=1)
for i in range(1,16):
poly_data = polynomial_sframe(training['sqft_living'], i)
my_features = poly_data.column_names()
poly_data['price'] = training['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = my_features, validation_set = None, verbose = False)
validation_data = polynomial_sframe(validation['sqft_living'], i)
validation_data['price'] = validation['price']
predictions = model.predict(validation_data)
RSS = ((predictions - validation_data['price']) * (predictions - validation_data['price'])).sum()
print str(RSS)
poly_data = polynomial_sframe(testing['sqft_living'], 6)
my_features = poly_data.column_names()
poly_data['price'] = testing['price'] # add price to the data since it's the target
model = graphlab.linear_regression.create(poly_data, target = 'price', features = my_features, validation_set = None, verbose = False)
validation_data = polynomial_sframe(validation['sqft_living'], i)
validation_data['price'] = validation['price']
predictions = model.predict(validation_data)
RSS = ((predictions - validation_data['price']) * (predictions - validation_data['price'])).sum()
print str(RSS)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree.
Step2: We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself).
Step3: Polynomial_sframe function
Step4: To test your function consider the smaller tmp variable and what you would expect the outcome of the following call
Step5: Visualizing polynomial regression
Step6: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices.
Step7: Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like.
Step8: NOTE
Step9: Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'.
Step10: The resulting model looks like half a parabola. Try on your own to see what the cubic looks like
Step11: Now try a 15th degree polynomial
Step12: What do you think of the 15th degree polynomial? Do you think this is appropriate? If we were to change the data do you think you'd get pretty much the same curve? Let's take a look.
Step13: Fit a 15th degree polynomial on set_1, set_2, set_3, and set_4 using sqft_living to predict prices. Print the coefficients and make a plot of the resulting model.
Step14: Some questions you will be asked on your quiz
Step15: Next you should write a loop that does the following
Step16: Quiz Question
|
11,738
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
# If feature_is_constant is True, derivative is twice the dot product of errors and feature
derivative = 2* np.dot(errors, feature)
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
if not feature_is_constant:
derivative += 2*l2_penalty*weight
return derivative
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.])
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,1], my_weights[1], 1, False)
print np.sum(errors*example_features[:,1])*2+20.
print ''
# next two lines should print the same values
print feature_derivative_ridge(errors, example_features[:,0], my_weights[0], 1, True)
print np.sum(errors)*2.
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations=100):
weights = np.array(initial_weights) # make sure it's a numpy array
#while not reached maximum number of iterations:
for it in xrange(max_iterations):
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
for i in xrange(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
feature = feature_matrix[:,i]
weight = weights[i]
derivative = feature_derivative_ridge(errors, feature, weight, l2_penalty, i == 0)
# subtract the step size times the derivative from the current weight
weights[i] -= step_size * derivative
return weights
simple_features = ['sqft_living']
my_output = 'price'
train_data,test_data = sales.random_split(.8,seed=0)
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
l2_penalty = 0
simple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
simple_weights_0_penalty
l2_penalty = 1e11
simple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
simple_weights_high_penalty
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
def rss(predictions, output):
df = predictions - output
return np.dot(df, df)
predictions = predict_output(simple_test_feature_matrix, initial_weights)
rss(predictions, test_output)
predictions = predict_output(simple_test_feature_matrix, simple_weights_0_penalty)
rss(predictions, test_output)
predictions = predict_output(simple_test_feature_matrix, simple_weights_high_penalty)
rss(predictions, test_output)
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
l2_penalty = 0
multiple_weights_0_penalty=ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
multiple_weights_0_penalty
l2_penalty = 1e11
multiple_weights_high_penalty=ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
multiple_weights_high_penalty
predictions = predict_output(test_feature_matrix, initial_weights)
rss(predictions, test_output)
predictions = predict_output(test_feature_matrix, multiple_weights_0_penalty)
rss(predictions, test_output)
predictions = predict_output(test_feature_matrix, multiple_weights_high_penalty)
rss(predictions, test_output)
predict_output(test_feature_matrix, multiple_weights_0_penalty)[0]
print predict_output(test_feature_matrix, multiple_weights_high_penalty)[0], test_output[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.
Step3: Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights
Step4: Computing the Derivative
Step5: To test your feature derivartive run the following
Step6: Gradient Descent
Step7: Visualizing effect of L2 penalty
Step8: Let us split the dataset into training set and test set. Make sure to use seed=0
Step9: In this part, we will only use 'sqft_living' to predict 'price'. Use the get_numpy_data function to get a Numpy versions of your data with only this feature, for both the train_data and the test_data.
Step10: Let's set the parameters for our optimization
Step11: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step12: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step13: This code will plot the two learned models. (The blue line is for the model with no regularization and the red line is for the one with high regularization.)
Step14: Compute the RSS on the TEST data for the following three sets of weights
Step15: QUIZ QUESTIONS
Step16: We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
Step17: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step18: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step19: Compute the RSS on the TEST data for the following three sets of weights
Step20: Predict the house price for the 1st house in the test set using the no regularization and high regularization models. (Remember that python starts indexing from 0.) How far is the prediction from the actual price? Which weights perform best for the 1st house?
|
11,739
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'messy-consortium', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
11,740
|
<ASSISTANT_TASK:>
Python Code:
ph_sel_name = "all-ph"
data_id = "12d"
# ph_sel_name = "all-ph"
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
file_list
## Selection for POLIMI 2012-12-6 dataset
# file_list.pop(2)
# file_list = file_list[1:-2]
# display(file_list)
# labels = ['22d', '27d', '17d', '12d', '7d']
## Selection for P.E. 2012-12-6 dataset
# file_list.pop(1)
# file_list = file_list[:-1]
# display(file_list)
# labels = ['22d', '27d', '17d', '12d', '7d']
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
ph_sel_map = {'all-ph': Ph_sel('all'), 'AexAem': Ph_sel(Aex='Aem')}
ph_sel = ph_sel_map[ph_sel_name]
data_id, ph_sel_name
d = loader.photon_hdf5(filename=files_dict[data_id])
d.ph_times_t, d.det_t
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
from mpl_toolkits.axes_grid1 import AxesGrid
import lmfit
print('lmfit version:', lmfit.__version__)
assert d.dir_ex == 0
assert d.leakage == 0
d.burst_search(m=10, F=6, ph_sel=ph_sel)
print(d.ph_sel, d.num_bursts)
ds_sa = d.select_bursts(select_bursts.naa, th1=30)
ds_sa.num_bursts
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30
ds_saw = d.select_bursts_mask_apply([mask])
ds_sas0 = ds_sa.select_bursts(select_bursts.S, S2=0.10)
ds_sas = ds_sa.select_bursts(select_bursts.S, S2=0.15)
ds_sas2 = ds_sa.select_bursts(select_bursts.S, S2=0.20)
ds_sas3 = ds_sa.select_bursts(select_bursts.S, S2=0.25)
ds_st = d.select_bursts(select_bursts.size, add_naa=True, th1=30)
ds_sas.num_bursts
dx = ds_sas0
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas2
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
dx = ds_sas3
size = dx.na[0] + dx.nd[0]
s_hist, s_bins = np.histogram(size, bins=np.r_[-15 : 25 : 1], density=True)
s_ax = s_bins[:-1] + 0.5*(s_bins[1] - s_bins[0])
plot(s_ax, s_hist, '-o', alpha=0.5)
plt.title('(nd + na) for A-only population using different S cutoff');
dx = ds_sa
alex_jointplot(dx);
dplot(ds_sa, hist_S)
dx = ds_sa
bin_width = 0.03
bandwidth = 0.03
bins = np.r_[-0.2 : 1 : bin_width]
x_kde = np.arange(bins.min(), bins.max(), 0.0002)
## Weights
weights = None
## Histogram fit
fitter_g = mfit.MultiFitter(dx.S)
fitter_g.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_g.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_hist_orig = fitter_g.hist_pdf
S_2peaks = fitter_g.params.loc[0, 'p1_center']
dir_ex_S2p = S_2peaks/(1 - S_2peaks)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p)
## KDE
fitter_g.calc_kde(bandwidth=bandwidth)
fitter_g.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak = fitter_g.kde_max_pos[0]
dir_ex_S_kde = S_peak/(1 - S_peak)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_g, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=True)
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak*100));
## 2-Asym-Gaussian
fitter_ag = mfit.MultiFitter(dx.S)
fitter_ag.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_ag.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.1, p2_center=0.4))
#print(fitter_ag.fit_obj[0].model.fit_report())
S_2peaks_a = fitter_ag.params.loc[0, 'p1_center']
dir_ex_S2pa = S_2peaks_a/(1 - S_2peaks_a)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2pa)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_g, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks*100))
mfit.plot_mfit(fitter_ag, ax=ax[1])
ax[1].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_a*100));
dx = ds_sa.select_bursts(select_bursts.nd, th1=-100, th2=0)
fitter = bext.bursts_fitter(dx, 'S')
fitter.fit_histogram(model = mfit.factory_gaussian(center=0.1))
S_1peaks_th = fitter.params.loc[0, 'center']
dir_ex_S1p = S_1peaks_th/(1 - S_1peaks_th)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S1p)
mfit.plot_mfit(fitter)
plt.xlim(-0.1, 0.6)
dx = ds_sa
## Weights
weights = 1 - mfit.gaussian(dx.S[0], fitter_g.params.loc[0, 'p2_center'], fitter_g.params.loc[0, 'p2_sigma'])
weights[dx.S[0] >= fitter_g.params.loc[0, 'p2_center']] = 0
## Histogram fit
fitter_w1 = mfit.MultiFitter(dx.S)
fitter_w1.weights = [weights]
fitter_w1.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w1.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w1 = fitter_w1.params.loc[0, 'p1_center']
dir_ex_S2p_w1 = S_2peaks_w1/(1 - S_2peaks_w1)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w1)
## KDE
fitter_w1.calc_kde(bandwidth=bandwidth)
fitter_w1.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w1 = fitter_w1.kde_max_pos[0]
dir_ex_S_kde_w1 = S_peak_w1/(1 - S_peak_w1)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w1)
def plot_weights(x, weights, ax):
ax2 = ax.twinx()
x_sort = x.argsort()
ax2.plot(x[x_sort], weights[x_sort], color='k', lw=4, alpha=0.4)
ax2.set_ylabel('Weights');
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_w1, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
plot_weights(dx.S[0], weights, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w1*100))
mfit.plot_mfit(fitter_w1, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
plot_weights(dx.S[0], weights, ax=ax[1])
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w1*100));
## Weights
sizes = dx.nd[0] + dx.na[0] #- dir_ex_S_kde_w3*dx.naa[0]
weights = dx.naa[0] - abs(sizes)
weights[weights < 0] = 0
## Histogram
fitter_w4 = mfit.MultiFitter(dx.S)
fitter_w4.weights = [weights]
fitter_w4.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w4.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w4 = fitter_w4.params.loc[0, 'p1_center']
dir_ex_S2p_w4 = S_2peaks_w4/(1 - S_2peaks_w4)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w4)
## KDE
fitter_w4.calc_kde(bandwidth=bandwidth)
fitter_w4.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w4 = fitter_w4.kde_max_pos[0]
dir_ex_S_kde_w4 = S_peak_w4/(1 - S_peak_w4)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w4)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(fitter_w4, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
#plot_weights(dx.S[0], weights, ax=ax[0])
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w4*100))
mfit.plot_mfit(fitter_w4, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
#plot_weights(dx.S[0], weights, ax=ax[1])
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w4*100));
mask = (d.naa[0] - np.abs(d.na[0] + d.nd[0])) > 30
ds_saw = d.select_bursts_mask_apply([mask])
print(ds_saw.num_bursts)
dx = ds_saw
## Weights
weights = None
## 2-Gaussians
fitter_w5 = mfit.MultiFitter(dx.S)
fitter_w5.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w5.fit_histogram(model = mfit.factory_two_gaussians(p1_center=0.1, p2_center=0.4))
S_2peaks_w5 = fitter_w5.params.loc[0, 'p1_center']
dir_ex_S2p_w5 = S_2peaks_w5/(1 - S_2peaks_w5)
print('Fitted direct excitation (na/naa) [2-Gauss]:', dir_ex_S2p_w5)
## KDE
fitter_w5.calc_kde(bandwidth=bandwidth)
fitter_w5.find_kde_max(x_kde, xmin=0, xmax=0.15)
S_peak_w5 = fitter_w5.kde_max_pos[0]
S_2peaks_w5_fiterr = fitter_w5.fit_res[0].params['p1_center'].stderr
dir_ex_S_kde_w5 = S_peak_w5/(1 - S_peak_w5)
print('Fitted direct excitation (na/naa) [KDE]: ', dir_ex_S_kde_w5)
## 2-Asym-Gaussians
fitter_w5a = mfit.MultiFitter(dx.S)
fitter_w5a.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
fitter_w5a.fit_histogram(model = mfit.factory_two_asym_gaussians(p1_center=0.05, p2_center=0.3))
S_2peaks_w5a = fitter_w5a.params.loc[0, 'p1_center']
dir_ex_S2p_w5a = S_2peaks_w5a/(1 - S_2peaks_w5a)
#print(fitter_w5a.fit_obj[0].model.fit_report(min_correl=0.5))
print('Fitted direct excitation (na/naa) [2-Asym-Gauss]:', dir_ex_S2p_w5a)
fig, ax = plt.subplots(1, 3, figsize=(19, 4.5))
mfit.plot_mfit(fitter_w5, ax=ax[0])
mfit.plot_mfit(fitter_g, ax=ax[0], plot_model=False, plot_kde=False)
ax[0].set_title('2-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5*100))
mfit.plot_mfit(fitter_w5, ax=ax[1], plot_model=False, plot_kde=True)
mfit.plot_mfit(fitter_g, ax=ax[1], plot_model=False, plot_kde=False)
ax[1].set_title('KDE fit (S_fit = %.2f %%)' % (S_peak_w5*100));
mfit.plot_mfit(fitter_w5a, ax=ax[2])
mfit.plot_mfit(fitter_g, ax=ax[2], plot_model=False, plot_kde=False)
ax[2].set_title('2-Asym-Gaussians fit (S_fit = %.2f %%)' % (S_2peaks_w5a*100));
sample = data_id
n_bursts_aa = ds_sas.num_bursts[0]
variables = ('sample n_bursts_aa dir_ex_S1p dir_ex_S_kde dir_ex_S2p dir_ex_S2pa '
'dir_ex_S2p_w1 dir_ex_S_kde_w1 dir_ex_S_kde_w4 dir_ex_S_kde_w5 dir_ex_S2p_w5 dir_ex_S2p_w5a '
'S_2peaks_w5 S_2peaks_w5_fiterr\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-PR-raw-dir_ex_aa-fit-%s.csv' % ph_sel_name, 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: Check that the folder exists
Step4: List of data files in data_dir
Step5: Data load
Step6: Laser alternation selection
Step7: We need to define some parameters
Step8: We should check if everithing is OK with an alternation histogram
Step9: If the plot looks good we can apply the parameters with
Step10: Measurements infos
Step11: Or check the measurements duration
Step12: Compute background
Step13: Burst search and selection
Step14: Preliminary selection and plots
Step15: A-direct excitation fitting
Step16: Zero threshold on nd
Step17: Selection 1
Step18: Selection 2
Step19: Selection 3
Step20: Save data to file
Step21: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step22: This is just a trick to format the different variables
|
11,741
|
<ASSISTANT_TASK:>
Python Code:
ls
# the name of the file that you wish to open
specfilename = '20151111'
# the name of the x column
x = 'MCMY'
# the name of the detector (y column)
y = 'PD21'
# the name of the monitor column
monitor = 'SRcur'
# the scans that you wish to process
scans = [108, 110, 112, 114]
# the name of the output file that you are going to write
output_file_name = '-'.join([specfilename, x, y, monitor]) + '-' + '_'.join([str(scan) for scan in scans])
print('output file name is going to be: %s' % output_file_name)
interpolation_mode = 'linear'
# The number to divide the step size by
# use a value < 1 for more interpolated points
# use a value > 1 for less interpolated points
densify_interpolated_axis_factor = 1
# Some imports that are required for this notebook
import matplotlib.pyplot as plt
from lmfit.models import LorentzianModel, LinearModel
import numpy as np
import os
import pandas as pd
%matplotlib inline
class Specfile:
def __init__(self, filename):
self.filename = os.path.abspath(filename)
with open(self.filename, 'r') as f:
scan_data = f.read().split('#S')
scan_data = [section.split('\n') for section in scan_data]
self.header = scan_data.pop(0)
self.scans = {}
for scan in scan_data:
sid = int(scan[0].split()[0])
self.scans[sid] = Specscan(self, scan)
def __getitem__(self, key):
return self.scans[key]
def __len__(self):
return len(self.scans)-1
def __iter__(self):
return (self.scans[sid] for sid in sorted(self.scans.keys()))
class Specscan:
def __init__(self, specfile, raw_scan_data):
self.specfile = specfile
self.raw_scan_data = raw_scan_data
header_row = self.raw_scan_data.pop(0).split()
self.scan_id = int(header_row.pop(0))
self.scan_command = header_row.pop(0)
self.scan_args = header_row
for row in self.raw_scan_data:
if row.startswith('#L'):
self.col_names = row.split()[1:]
scan_data = [row.split() for row in self.raw_scan_data
if not row.startswith('#') if row]
self.scan_data = pd.DataFrame(data=scan_data, columns=self.col_names, dtype=float)
def __repr__(self):
return 'Specfile("%s")[%s]' % (self.specfile.filename, self.scan_id)
def __str__(self):
return str(self.scan_data)
def __len__(self):
return len(self.scan_data)
def plot(self, column_names=None, x=None):
if x is None:
x = self.scan_data.columns[0]
if column_names is None:
column_names = self.scan_data.columns
ncols = 2
nrows = int(np.ceil(len(column_names)/ncols))
try:
self.ncols
self.nrows
except AttributeError:
self.ncols = 0
self.nrows = 0
if self.ncols != ncols or self.nrows != nrows:
self.ncols, self.nrows = ncols, nrows
self.fig, self.axes = plt.subplots(nrows=nrows,
ncols=ncols,
figsize=(5*ncols, 2*nrows))
self.arts = {}
for data, ax in zip(column_names, self.axes.ravel()):
ax.cla()
self.arts[data] = ax.plot(self.scan_data[x], self.scan_data[data], label=data)
ax.legend(loc=0)
def fit(x, y, bounds=None):
Fit a lorentzian + linear background to `field` in `scan`
Parameters
----------
scan : Specscan object
field : The field to fit
bounds : The +/- range to fit the data to
Returns
-------
fit : lmfit.model.ModelFit
The results of fitting the data to a linear + lorentzian peak
Examples
--------
>>> fit = fit_lorentzian(scan.scan_data)
>>> fit.plot()
lorentzian = LorentzianModel()
linear = LinearModel()
center = x[np.argmax(y)]
if bounds is None:
lower, upper = 0, len(x)
else:
lower = center - bounds
upper = center + bounds
if lower < 0:
lower = 0
if upper > len(x):
upper = len(x)
bounds = slice(lower, upper)
# print("Using bounds = {}".format(bounds))
y = y[bounds]
x = x[bounds]
# print("Using x = {}".format(x))
# print("Using y = {}".format(y))
lorentzian_params = lorentzian.guess(y, x=x, center=center)
linear_params = linear.guess(y, x=x)
lorentzian_params.update(linear_params)
model = lorentzian + linear
return model.fit(y, x=x, params=lorentzian_params)
def plotter(xy):
fig, ax = plt.subplots()
arts = {}
for sid, (xdata, ydata) in zip(scans, xy):
arts[sid] = ax.plot(xdata, ydata, '-o', label=sid)
ax.legend(loc=0)
f = Specfile(specfilename)
raw = [(
f[scan_id].scan_data[x].values,
f[scan_id].scan_data[y].values
) for scan_id in scans]
# Use the plotter helper function defined above
plotter(raw)
normalized = [(
x,
y / f[scan_id].scan_data[monitor].values
) for scan_id, (x, y) in zip(scans, raw)]
# Use the plotter helper function defined above
plotter(normalized)
fits = [fit(xdata, ydata) for (xdata, ydata) in normalized]
fig_kws = {'figsize': [15, 15]}
for sid, f in zip(scans, fits):
title_dict = {'title': 'scan_id: %s' % sid}
f.plot(numpoints=len(f.data)*10, ax_res_kws=title_dict,
ax_fit_kws=title_dict, fig_kws=fig_kws)
zeroed = [(np.array(f.userkws['x']-f.params['center'], dtype=float), f.data) for f in fits]
plotter(zeroed)
diff = np.average([np.average(np.diff(x)) for x, y in zeroed])
minval = np.min([np.min(x) for x, y in zeroed])
maxval = np.max([np.max(x) for x, y in zeroed])
diff, minval, maxval
new_axis = np.arange(minval, maxval, diff / densify_interpolated_axis_factor)
from scipy.interpolate import interp1d
interpolaters = [interp1d(x, y, kind=interpolation_mode,
bounds_error=False,
fill_value=np.nan)
for x, y in zeroed]
# Create a dict of the interpolated values so it can easily be passed to pandas
interpolated = {sid: interpolator(new_axis)
for sid, interpolator in zip(scans, interpolaters)}
df = pd.DataFrame(interpolated, index=new_axis)
df.plot(style='-o')
df.sum(axis=1).plot(style='-o')
(df.sum(axis=1) / df.count(axis=1)).plot(style='-o')
df.dropna().sum(axis=1).plot(style='-o')
df.dropna().sum(axis=1).to_csv(output_file_name, encoding='ascii', sep=',')
!cat 20151111-MCMY-PD21-SRcur-108_110_112_114
!pwd
print(output_file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify the kind of interpolation you want to use as a string
Step2: The boring stuff
Step4: Defining required objects and functions
Step5: The exciting stuff!
Step6: Plot the raw data to make sure it looks ok
Step7: Normalize the data by the monitor value defined in the first cell
Step8: Plot the normalized data to make sure it looks ok
Step9: Fit to a linear model plus a lorentzian model
Step10: Plot the fits to make sure they look ok!
Step11: Shift the peaks to a zero energy transfer based on the center of the Lorentzian model
Step12: Plot the shifted peaks to make sure they look ok!
Step13: Compute the new axis onto which the data should be interpolated
Step14: Set up the interpolator
Step15: Plot the new interpolated values
Step16: Just take the straight sum of all data in the data frame
Step17: Ok, that looks a little weird, let's divide the sum by the number of channels at each x value
Step18: Hmm, that still looks a little strange, lets just sum the channels where they all have values
Step19: Output the data
Step20: Show that we actually output the data...
Step21: If you are working in the notebook and want to download this data file to your local computer, follow these steps
|
11,742
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import astropy.units as u
from astropy import time
from poliastro import iod
from poliastro.plotting import plot
from poliastro.bodies import Sun, Earth
from poliastro.twobody import State
from poliastro import ephem
from jplephem.spk import SPK
ephem.download_kernel("de421")
r = [-6045, -3490, 2500] * u.km
v = [-3.457, 6.618, 2.533] * u.km / u.s
ss = State.from_vectors(Earth, r, v)
with plt.style.context('pybonacci'):
plot(ss)
epoch = time.Time("2015-06-21 16:35")
r_, v_ = ephem.planet_ephem(ephem.EARTH, epoch)
r_
v_.to(u.km / u.s)
date_launch = time.Time('2011-11-26 15:02', scale='utc')
date_arrival = time.Time('2012-08-06 05:17', scale='utc')
tof = date_arrival - date_launch
r0, _ = ephem.planet_ephem(ephem.EARTH, date_launch)
r, _ = ephem.planet_ephem(ephem.MARS, date_arrival)
(v0, v), = iod.lambert(Sun.k, r0, r, tof)
v0
v
def go_to_mars(offset=500., tof_=6000.):
# Initial data
N = 50
date_launch = time.Time('2016-03-14 09:31', scale='utc') + ((offset - 500.) * u.day)
date_arrival = time.Time('2016-10-19 16:00', scale='utc') + ((offset - 500.) * u.day)
tof = tof_ * u.h
# Calculate vector of times from launch and arrival Julian days
jd_launch = date_launch.jd
jd_arrival = jd_launch + tof.to(u.day).value
jd_vec = np.linspace(jd_launch, jd_arrival, num=N)
times_vector = time.Time(jd_vec, format='jd')
rr_earth, vv_earth = ephem.planet_ephem(ephem.EARTH, times_vector)
rr_mars, vv_mars = ephem.planet_ephem(ephem.MARS, times_vector)
# Compute the transfer orbit!
r0 = rr_earth[:, 0]
rf = rr_mars[:, -1]
(va, vb), = iod.lambert(Sun.k, r0, rf, tof)
ss0_trans = State.from_vectors(Sun, r0, va, date_launch)
ssf_trans = State.from_vectors(Sun, rf, vb, date_arrival)
# Extract whole orbit of Earth, Mars and transfer (for plotting)
rr_trans = np.zeros_like(rr_earth)
rr_trans[:, 0] = r0
for ii in range(1, len(jd_vec)):
tof = (jd_vec[ii] - jd_vec[0]) * u.day
rr_trans[:, ii] = ss0_trans.propagate(tof).r
# Better compute backwards
jd_init = (date_arrival - 1 * u.year).jd
jd_vec_rest = np.linspace(jd_init, jd_launch, num=N)
times_rest = time.Time(jd_vec_rest, format='jd')
rr_earth_rest, _ = ephem.planet_ephem(ephem.EARTH, times_rest)
rr_mars_rest, _ = ephem.planet_ephem(ephem.MARS, times_rest)
# Plot figure
# To add arrows:
# https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/streamplot.py#L140
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
def plot_body(ax, r, color, size, border=False, **kwargs):
Plots body in axes object.
return ax.plot(*r[:, None], marker='o', color=color, ms=size, mew=int(border), **kwargs)
# I like color
color_earth0 = '#3d4cd5'
color_earthf = '#525fd5'
color_mars0 = '#ec3941'
color_marsf = '#ec1f28'
color_sun = '#ffcc00'
color_orbit = '#888888'
color_trans = '#444444'
# Plotting orbits is easy!
ax.plot(*rr_earth.to(u.km).value, color=color_earth0)
ax.plot(*rr_mars.to(u.km).value, color=color_mars0)
ax.plot(*rr_trans.to(u.km).value, color=color_trans)
ax.plot(*rr_earth_rest.to(u.km).value, ls='--', color=color_orbit)
ax.plot(*rr_mars_rest.to(u.km).value, ls='--', color=color_orbit)
# But plotting planets feels even magical!
plot_body(ax, np.zeros(3), color_sun, 16)
plot_body(ax, r0.to(u.km).value, color_earth0, 8)
plot_body(ax, rr_earth[:, -1].to(u.km).value, color_earthf, 8)
plot_body(ax, rr_mars[:, 0].to(u.km).value, color_mars0, 8)
plot_body(ax, rf.to(u.km).value, color_marsf, 8)
# Add some text
ax.text(-0.75e8, -3.5e8, -1.5e8, "ExoMars mission:\nfrom Earth to Mars",
size=20, ha='center', va='center', bbox={"pad": 30, "lw": 0, "fc": "w"})
ax.text(r0[0].to(u.km).value * 2.4, r0[1].to(u.km).value * 0.4, r0[2].to(u.km).value * 1.25,
"Earth at launch\n({})".format(date_launch.to_datetime().strftime("%d %b")),
ha="left", va="bottom", backgroundcolor='#ffffff')
ax.text(rf[0].to(u.km).value * 1.1, rf[1].to(u.km).value * 1.1, rf[2].to(u.km).value,
"Mars at arrival\n({})".format(date_arrival.to_datetime().strftime("%d %b")),
ha="left", va="top", backgroundcolor='#ffffff')
ax.text(-1.9e8, 8e7, 1e8, "Transfer\norbit", ha="right", va="center", backgroundcolor='#ffffff')
# Tune axes
ax.set_xlim(-3e8, 3e8)
ax.set_ylim(-3e8, 3e8)
ax.set_zlim(-3e8, 3e8)
# And finally!
ax.view_init(30, 260)
plt.show()
#fig.savefig("trans_30_260.png", bbox_inches='tight')
#return fig, ax
go_to_mars()
%matplotlib inline
from ipywidgets import interactive
from IPython.display import display
w = interactive(go_to_mars, offset=(0., 1000.), tof_=(100., 12000.))
display(w)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Primero
Step2: Segundo
Step3: Tercero
Step5: ...y es Python puro!
Step6: Quinto
|
11,743
|
<ASSISTANT_TASK:>
Python Code:
import sympy as sp
from sympy.interactive import printing
printing.init_printing(use_latex=True)
from sympy.stats import Bernoulli, LogNormal, density, sample, P as Prob, E as Expected, variance
k1, k2 = sp.symbols('k1 k2', real=True)
p = sp.symbols('p', nonnegative=True)
Xs = sp.symbols('X')
X = Bernoulli('X', p=p, succ=k1, fail=k2)
Expected(X)
sp.Eq(p, sp.solve(Expected(X), p)[0])
# Doesn't work - same RV?
# FairCoin = X.subs({p: sp.S.Half, k1: 1, k2: -1})
# FairCoin2 = X.subs({p: sp.S.Half, k1: -1, k2: 1})
# Works
FairCoin = Bernoulli('X1', p=sp.S.Half, succ=1, fail=-1)
FairCoin2 = Bernoulli('X2', p=sp.S.Half, succ=1, fail=-1)
sample(FairCoin + FairCoin2, size=(10,))
Prob(X.subs({p: 1/2, k1:1, k2:-1}) > 0)
# Expected value of FairCoin toss with payoff $16 and $4.
# No risk aversion -> would pay up to this amount for the bet
Payoff = X.subs({p: sp.S.Half, k1: 16, k2: 4})
Expected(Payoff)
# Expected utility of the FairCoin toss with payoff $16 and $4 for sqrt utility
# i.e. U(W) = sqrt(W)
d = sp.Dummy()
U = sp.Lambda(d, sp.sqrt(d))
Expected(U(Payoff))
# Utility of keeping $10 rather than paying $10 for bet
U(10).evalf()
# Calculate the risk premium: what would you pay not to have to take the bet?
initial_wealth = 10
bet_cost = 10
sp.solve(U(initial_wealth-d) - Expected(U(initial_wealth + Payoff - bet_cost)), d)[0].evalf()
# General form
W, c, pis = sp.symbols('W, c, pi', real=True)
# Solve for cost
sp.Eq(c, sp.solve(Expected(X - c), c)[0].collect([k1, k2]))
sp.Eq(pis, sp.solve(U(W-pis) - Expected(U(W + X - Expected(X))), pis)[0].simplify().collect(p))
x = sp.stats.rv.RandomSymbol('x')
Us = sp.symbols('U', cls=sp.Function)
sp.Eq(Us(W - pis), Expected(Us(W + x)))
lhs = sp.series(Us(W - pis), pis, n=2).removeO().simplify()
lhs
sigma_sq_x = sp.symbols('sigma_x^2', positive=True)
rhs = Expected(
sp.series(Us(W + x), x, n=3).removeO()
).collect(Us(W)).subs(Expected(x), 0).subs({Expected(x**2): sigma_sq_x})
rhs
pi = sp.solve(lhs - rhs, pis)[0]
sp.Eq(pis, pi)
Ras, Rrs = sp.symbols('R_A R_R', positive=True)
Ra = pi/(sigma_sq_x/2)
Rr = W*Ras
Rau = lambda U: Ra.subs(Us(W), U).simplify().powsimp()
sp.Eq(Ras, Ra), sp.Eq(Rrs, Rr)
g, a, b, c = sp.symbols('gamma a b c', positive=True)
U_crras, U_caras, U_qs = sp.symbols('U_{CRRA} U_{CARA} U_Q', cls=sp.Function)
U_crra = W**(1-g)/(1-g)
U_cara = a - b*sp.exp(-c*W)
U_q = W - b/2*W**2
sp.Eq(U_crras(W), U_crra)
sp.Eq(Ras, Rau(U_crra)), sp.Eq(Rrs, Rr.subs(Ras, Rau(U_crra)))
sp.Eq(U_caras(W), U_cara)
sp.Eq(Ras, Rau(U_cara)), sp.Eq(Rrs, Rr.subs(Ras, Rau(U_cara)))
sp.Eq(U_qs(W), U_q)
sp.Eq(Ras, Rau(U_q)), sp.Eq(Rrs, Rr.subs(Ras, Rau(U_q)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start by looking at a fair lottery random. A Bernoulli distribution can be used to represent a fair lottery
Step2: Playing around with random variables
Step3: Worked example and definition of Utility
Step4: Now consider a utility function of the form $U(W) = \sqrt{W}$, where $W$ is the wealth of the player.
Step5: We see that the expected utility of the bet is less than the utility of the original \$10
Step6: Calculate the risk premium
Step7: We can go back to the general form for the lottery and calculate the risk premium $\pi$ as a function of the other
Step8: Relate risk premium to utility function curvature.
Step9: We get to the form for risk premium given in the text, using SymPy to do the required manipulations.
Step10: Then we can apply these definitions to some utility functions of interest
Step11: Constant Relative Risk Aversion
Step12: Constant Absolute Risk Aversion
Step13: Quadratic Utility
|
11,744
|
<ASSISTANT_TASK:>
Python Code:
number = int(input("Enter a number: "))
if number > 0:
print("The number is positive.")
number = int(input("Enter a number: "))
if number >= 0:
print("The number is zero or positive.")
else:
print("The number is negative.")
number = int(input("Enter a number: "))
if (number > 0) and (number < 10):
print("The number is between 0 and 10.")
else:
print("The number is not between 0 and 10.")
number = int(input("Enter a number: "))
if (3 == number) or (5 == number) or (7 == number):
print("Your number is a 357 digit.")
else:
print("Your number is not a 357 digit.")
###############################################################
# A program to calculate the area of a square. If the input
# square side is 0 or negative, we will print an error message
# and quit the program
###############################################################
square_side = int(input("Enter the square side: "))
is_greater_than_zero = square_side > 0
# Quit the program if the number is not greater than zero
if not is_greater_than_zero:
print("The square side needs to be greater than zero.")
quit()
# If we didn't quit the program, than the number is okay to work with
square_area = square_side ** 2
print("Square area is: %f" % square_area)
# The names
a_name = "Adriana"
e_name = "Ernest"
i_name = "Ian"
o_name = "Olivia"
u_name = "Ursula"
# Get a string from the keyboard
instr = input("Please enter a vowel: ")
out_template = "The name starting with \"%s\" is %s."
# Print the name or throw an exception if the given string is not a vowel
if ("a" == instr) or ("A" == instr):
print(out_template % (instr, a_name))
elif ("e" == instr) or ("E" == instr):
print(out_template % (instr, e_name))
elif ("i" == instr) or ("I" == instr):
print(out_template % (instr, i_name))
elif ("o" == instr) or ("O" == instr):
print(out_template % (instr, o_name))
elif ("u" == instr) or ("U" == instr):
print(out_template % (instr, u_name))
else:
print("The entered text is not a vowel.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ključna reč <i>else</i> koristi se u paru sa ključnom rečju <i>if</i> i njome definišemo <i>else</i> granu, ili granu "ne". To je blok koda koji će se izvršiti ukoliko uslov u naredbi grananja (naredbi <i>if</i>) nije zadovoljen.
Step2: Ukoliko želimo da ispitamo da li je zadovoljeno dva ili više uslova, koristimo Pajtonov logički operator AND (matematička operacija konjukcije, odnosno matematičko "i").
Step3: Ukoliko želimo da ispitamo da li je zadovoljen bar jedan od dva ili više uslova, koristimo Pajtonov logički operator OR (matematička operacija konjukcije, odnosto matematičko "ili").
Step4: Ukoliko želimo da se neki blok koda izvršava samo kada određeni uslov <b>nije</b> zadovoljen, koristimo logički operator NOT (matematička operacija negacije).
Step5: Ključna reč <i>elif</i>, skraćeno od else-if, omogućava da naredbu grananja koristimo na sledeći način
|
11,745
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from sklearn.linear_model import LinearRegression
model = LinearRegression(normalize=True)
print(model.normalize)
print(model)
x = np.arange(10)
y = 2 * x + 1
print(x)
print(y)
plt.plot(x, y, 'o');
# The input data for sklearn is 2D: (samples == 10 x features == 1)
X = x[:, np.newaxis]
print(X)
print(y)
# fit the model on our data
model.fit(X, y)
# underscore at the end indicates a fit parameter
print(model.coef_)
print(model.intercept_)
from sklearn import neighbors, datasets
iris = datasets.load_iris()
X, y = iris.data, iris.target
# create the model
knn = neighbors.KNeighborsClassifier(n_neighbors=5)
# fit the model
knn.fit(X, y)
# What kind of iris has 3cm x 5cm sepal and 4cm x 2cm petal?
# call the "predict" method:
result = knn.predict([[3, 5, 4, 2],])
print(iris.target_names[result])
knn.predict_proba([[3, 5, 4, 2],])
from fig_code import plot_iris_knn
plot_iris_knn()
from sklearn.svm import SVC
# Create some simple data
import numpy as np
np.random.seed(0)
X = np.random.random(size=(20, 1))
y = 3 * X.squeeze() + 2 + np.random.randn(20)
plt.plot(X.squeeze(), y, 'o');
model = LinearRegression()
model.fit(X, y)
# Plot the data and the model prediction
X_fit = np.linspace(0, 1, 100)[:, np.newaxis]
y_fit = model.predict(X_fit)
plt.plot(X.squeeze(), y, 'o')
plt.plot(X_fit.squeeze(), y_fit);
# Fit a Random Forest
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X, y)
# Plot the data and the model prediction
X_fit = np.linspace(0, 1, 100)[:, np.newaxis]
y_fit = model.predict(X_fit)
plt.plot(X.squeeze(), y, 'o')
plt.plot(X_fit.squeeze(), y_fit);
X, y = iris.data, iris.target
from sklearn.decomposition import PCA
pca = PCA(n_components=0.95)
pca.fit(X)
X_reduced = pca.transform(X)
print("Reduced dataset shape:", X_reduced.shape)
import pylab as plt
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y,
cmap='RdYlBu')
print("Meaning of the 2 components:")
for component in pca.components_:
print(" + ".join("%.3f x %s" % (value, name)
for value, name in zip(component,
iris.feature_names)))
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=3, random_state=0) # Fixing the RNG in kmeans
k_means.fit(X)
y_pred = k_means.predict(X)
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_pred,
cmap='RdYlBu');
from sklearn.neighbors import KNeighborsClassifier
X, y = iris.data, iris.target
clf = KNeighborsClassifier(n_neighbors=1)
clf.fit(X, y)
y_pred = clf.predict(X)
print(np.all(y == y_pred))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y, y_pred))
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y)
clf.fit(Xtrain, ytrain)
ypred = clf.predict(Xtest)
print(confusion_matrix(ytest, ypred))
from IPython.display import Image
Image("http://scikit-learn.org/dev/_static/ml_map.png")
from sklearn import datasets
digits = datasets.load_digits()
digits.images.shape
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(digits.images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(digits.target[i]),
transform=ax.transAxes, color='green')
ax.set_xticks([])
ax.set_yticks([])
# The images themselves
print(digits.images.shape)
print(digits.images[0])
# The data for use in our algorithms
print(digits.data.shape)
print(digits.data[0])
# The target label
print(digits.target)
from sklearn.manifold import Isomap
iso = Isomap(n_components=2)
data_projected = iso.fit_transform(digits.data)
data_projected.shape
plt.scatter(data_projected[:, 0], data_projected[:, 1], c=digits.target,
edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('nipy_spectral', 10));
plt.colorbar(label='digit label', ticks=range(10))
plt.clim(-0.5, 9.5)
from sklearn.model_selection import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(digits.data, digits.target,
random_state=2)
print(Xtrain.shape, Xtest.shape)
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(penalty='l2')
clf.fit(Xtrain, ytrain)
ypred = clf.predict(Xtest)
from sklearn.metrics import accuracy_score
accuracy_score(ytest, ypred)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(ytest, ypred))
plt.imshow(np.log(confusion_matrix(ytest, ypred)),
cmap='Blues', interpolation='nearest')
plt.grid(False)
plt.ylabel('true')
plt.xlabel('predicted');
fig, axes = plt.subplots(10, 10, figsize=(8, 8))
fig.subplots_adjust(hspace=0.1, wspace=0.1)
for i, ax in enumerate(axes.flat):
ax.imshow(Xtest[i].reshape(8, 8), cmap='binary')
ax.text(0.05, 0.05, str(ypred[i]),
transform=ax.transAxes,
color='green' if (ytest[i] == ypred[i]) else 'red')
ax.set_xticks([])
ax.set_yticks([])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Scikit-learn Estimator Object
Step2: Estimator parameters
Step3: Estimated Model parameters
Step4: The model found a line with a slope 2 and intercept 1, as we'd expect.
Step5: You can also do probabilistic predictions
Step6: Exercise
Step7: Regression Example
Step8: As above, we can plot a line of best fit
Step9: Scikit-learn also has some more sophisticated models, which can respond to finer features in the data
Step10: Whether either of these is a "good" fit or not depends on a number of things; we'll discuss details of how to choose a model later in the tutorial.
Step11: Clustering
Step12: Recap
Step13: A more useful way to look at the results is to view the confusion matrix, or the matrix showing the frequency of inputs and outputs
Step14: For each class, all 50 training samples are correctly identified. But this does not mean that our model is perfect! In particular, such a model generalizes extremely poorly to new data. We can simulate this by splitting our data into a training set and a testing set. Scikit-learn contains some convenient routines to do this
Step15: This paints a better picture of the true performance of our classifier
Step16: Original source on the scikit-learn website
Step17: Let's plot a few of these
Step18: Here the data is simply each pixel value within an 8x8 grid
Step19: So our data have 1797 samples in 64 dimensions.
Step20: We see here that the digits are fairly well-separated in the parameter space; this tells us that a supervised classification algorithm should perform fairly well. Let's give it a try.
Step21: Let's use a simple logistic regression which (despite its confusing name) is a classification algorithm
Step22: We can check our classification accuracy by comparing the true values of the test set to the predictions
Step23: This single number doesn't tell us where we've gone wrong
Step24: We might also take a look at some of the outputs along with their predicted labels. We'll make the bad labels red
|
11,746
|
<ASSISTANT_TASK:>
Python Code:
# NBVAL_SKIP
from openeye import oechem # OpenEye Python toolkits
import oenotebook as oenb
# Check license
print("Is your OEChem licensed? ", oechem.OEChemIsLicensed())
from openeye import oeomega # Omega toolkit
from openeye import oequacpac #Charge toolkit
from openeye import oedocking # Docking toolkit
from oeommtools import utils as oeommutils # Tools for OE/OpenMM
from simtk import unit #Unit handling for OpenMM
from simtk.openmm import app
from simtk.openmm.app import PDBFile
from openff.toolkit.typing.engines.smirnoff import *
import os
from pdbfixer import PDBFixer # for solvating
# Where will we write outputs? Directory will be created if it does not exist
datadir = 'datafiles'
# Where will we download the host file from? The below is an uncharged host
#host_source = 'https://raw.githubusercontent.com/MobleyLab/SAMPL6/master/host_guest/OctaAcidsAndGuests/OA.mol2' #octa acid
# Use file provided in this directory - already charged
host_source = 'OA.mol2'
# What SMILES string for the guest? Should be isomeric SMILES
guest_smiles = 'OC(CC1CCCC1)=O' # Use cyclopentyl acetic acid, the first SAMPL6 octa acid guest
# Another useful source of host-guest files is the benchmarksets repo, e.g. github.com/mobleylab/benchmarksets
# This notebook has also been tested on CB7 Set 1 host-cb7.mol2 with SMILES CC12CC3CC(C1)(CC(C3)(C2)[NH3+])C.
# NBVAL_SKIP
# Create empty OEMol
mol = oechem.OEMol()
# Convert SMILES
oechem.OESmilesToMol(mol, guest_smiles)
# Draw
oenb.draw_mol(mol)
# NBVAL_SKIP
# Output host and guest files
hostfile = os.path.join(datadir, 'host.mol2')
guestfile = os.path.join(datadir, 'guest.mol2')
# Create data dir if not present
if not os.path.isdir(datadir):
os.mkdir(datadir)
# Set host file name and retrieve file
if 'http' in host_source:
import urllib
urllib.request.urlretrieve(host_source, hostfile)
else:
import shutil
shutil.copy(host_source, hostfile)
# NBVAL_SKIP
# Read in host file
ifile = oechem.oemolistream(hostfile)
host = oechem.OEMol()
oechem.OEReadMolecule( ifile, host)
ifile.close()
# Prepare a receptor - Start by getting center of mass to use as a hint for where to dock
com = oechem.OEFloatArray(3)
oechem.OEGetCenterOfMass(host, com)
# Create receptor, as per https://docs.eyesopen.com/toolkits/python/dockingtk/receptor.html#creating-a-receptor
receptor = oechem.OEGraphMol()
oedocking.OEMakeReceptor(receptor, host, com[0], com[1], com[2])
# NBVAL_SKIP
#initialize omega for conformer generation
omega = oeomega.OEOmega()
omega.SetMaxConfs(100) #Generate up to 100 conformers since we'll use for docking
omega.SetIncludeInput(False)
omega.SetStrictStereo(True) #Refuse to generate conformers if stereochemistry not provided
#Initialize charge generation
chargeEngine = oequacpac.OEAM1BCCCharges()
# Initialize docking
dock = oedocking.OEDock()
dock.Initialize(receptor)
# Build OEMol from SMILES
# Generate new OEMol and parse SMILES
mol = oechem.OEMol()
oechem.OEParseSmiles( mol, guest_smiles)
# Set to use a simple neutral pH model
oequacpac.OESetNeutralpHModel(mol)
# Generate conformers with Omega; keep only best conformer
status = omega(mol)
if not status:
print("Error generating conformers for %s." % (guest_smiles))
#print(smi, name, mol.NumAtoms()) #Print debug info -- make sure we're getting protons added as we should
# Assign AM1-BCC charges
oequacpac.OEAssignCharges(mol, chargeEngine)
# Dock to host
dockedMol = oechem.OEGraphMol()
status = dock.DockMultiConformerMolecule(dockedMol, mol) #By default returns only top scoring pose
sdtag = oedocking.OEDockMethodGetName(oedocking.OEDockMethod_Chemgauss4)
oedocking.OESetSDScore(dockedMol, dock, sdtag)
dock.AnnotatePose(dockedMol)
# Write out docked pose if docking successful
if status == oedocking.OEDockingReturnCode_Success:
outmol = dockedMol
# Write out
tripos_mol2_filename = os.path.join(os.path.join(datadir, 'docked_guest.mol2'))
ofile = oechem.oemolostream( tripos_mol2_filename )
oechem.OEWriteMolecule( ofile, outmol)
ofile.close()
# Clean up residue names in mol2 files that are tleap-incompatible: replace substructure names with valid text.
infile = open( tripos_mol2_filename, 'r')
lines = infile.readlines()
infile.close()
newlines = [line.replace('<0>', 'GUEST') for line in lines]
outfile = open(tripos_mol2_filename, 'w')
outfile.writelines(newlines)
outfile.close()
else:
raise Exception("Error: Docking failed.")
# NBVAL_SKIP
# Import modules
import nglview
import mdtraj
# Load host structure ("trajectory")
traj = mdtraj.load(os.path.join(datadir, 'host.mol2'))
# Load guest structure
lig = mdtraj.load(os.path.join(tripos_mol2_filename))
# Figure out which atom indices correspond to the guest, for use in visualization
atoms_guest = [ traj.n_atoms+i for i in range(lig.n_atoms)]
# "Stack" host and guest Trajectory objects into a single object
complex = traj.stack(lig)
# Visualize
view = nglview.show_mdtraj(complex)
view.add_representation('spacefill', selection="all")
view.add_representation('spacefill', selection=atoms_guest, color='blue') #Adjust guest to show as blue for contrast
# The view command needs to be the last command issued to nglview
view
# NBVAL_SKIP
# Join OEMols into complex
complex = host.CreateCopy()
oechem.OEAddMols( complex, outmol)
print("Host+guest number of atoms %s" % complex.NumAtoms())
# Write out complex PDB file (won't really use it except as a template)
ostream = oechem.oemolostream( os.path.join(datadir, 'complex.pdb'))
oechem.OEWriteMolecule( ostream, complex)
ostream.close()
# Solvate the system using PDBFixer
# Loosely follows https://github.com/oess/openmm_orion/blob/master/ComplexPrepCubes/utils.py
fixer = PDBFixer( os.path.join(datadir, 'complex.pdb'))
# Convert between OpenEye and OpenMM Topology
omm_top, omm_pos = oeommutils.oemol_to_openmmTop(complex)
# Do it a second time to create a topology we can destroy
fixer_top, fixer_pos = oeommutils.oemol_to_openmmTop(complex)
chain_names = []
for chain in omm_top.chains():
chain_names.append(chain.id)
# Use correct topology, positions
#fixer.topology = copy.deepcopy(omm_top)
fixer.topology = fixer_top
fixer.positions = fixer_pos
# Solvate in 20 mM NaCl and water
fixer.addSolvent(padding=unit.Quantity( 1.0, unit.nanometers), ionicStrength=unit.Quantity( 20, unit.millimolar))
print("Number of atoms after applying PDBFixer: %s" % fixer.topology.getNumAtoms())
# The OpenMM topology produced by the solvation fixer has missing bond
# orders and aromaticity. So our next job is to update our existing OpenMM Topology by copying
# in just the water molecules and ions
# Atom dictionary between the the PDBfixer topology and the water_ion topology
fixer_atom_to_wat_ion_atom = {}
# Loop over new topology and copy water molecules and ions into pre-existing topology
for chain in fixer.topology.chains():
if chain.id not in chain_names:
n_chain = omm_top.addChain(chain.id)
for res in chain.residues():
n_res = omm_top.addResidue(res.name, n_chain)
for at in res.atoms():
n_at = omm_top.addAtom(at.name, at.element, n_res)
fixer_atom_to_wat_ion_atom[at] = n_at
# Copy over any bonds needed
for bond in fixer.topology.bonds():
at0 = bond[0]
at1 = bond[1]
try:
omm_top.addBond(fixer_atom_to_wat_ion_atom[at0],
fixer_atom_to_wat_ion_atom[at1], type=None, order=1)
except:
pass
# Build new position array
omm_pos = omm_pos + fixer.positions[len(omm_pos):]
# Write file of solvated system for visualization purposes
PDBFile.writeFile(omm_top, omm_pos, open(os.path.join(datadir, 'complex_solvated.pdb'), 'w'))
# NBVAL_SKIP
# Keep a list of OEMols of our components
oemols = []
# Build ions from SMILES strings
smiles = ['[Na+]', '[Cl-]']
for smi in smiles:
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, smi)
# Make sure we have partial charges assigned for these (monatomic, so equal to formal charge)
for atom in mol.GetAtoms():
atom.SetPartialCharge(atom.GetFormalCharge())
oemols.append(mol)
# Build water reference molecule
mol = oechem.OEMol()
oechem.OESmilesToMol(mol, 'O')
oechem.OEAddExplicitHydrogens(mol)
oechem.OETriposAtomNames(mol)
oemols.append(mol)
# Add oemols of host and guest
oemols.append(host)
oemols.append(outmol)
# NBVAL_SKIP
# Load force fields for small molecules (plus default ions), water, and (temporarily) hydrogen bonds.
# TODO add HBonds constraint through createSystem when openforcefield#32 is implemented, alleviating need for constraints here
ff = ForceField('test_forcefields/smirnoff99Frosst.offxml',
'test_forcefields/hbonds.offxml',
'test_forcefields/tip3p.offxml')
# Set up system
# This draws to some extent on Andrea Rizzi's code at https://github.com/MobleyLab/SMIRNOFF_paper_code/blob/master/scripts/create_input_files.py
system = ff.createSystem(fixer.topology, oemols, nonbondedMethod = PME, nonbondedCutoff=1.1*unit.nanometer, ewaldErrorTolerance=1e-4) #, constraints=smirnoff.HBonds)
# TODO add HBonds constraints here when openforcefield#32 is implemented.
# Fix switching function.
# TODO remove this when openforcefield#31 is fixed
for force in system.getForces():
if isinstance(force, openmm.NonbondedForce):
force.setUseSwitchingFunction(True)
force.setSwitchingDistance(1.0*unit.nanometer)
# NBVAL_SKIP
# Even though we're just going to minimize, we still have to set up an integrator, since a Simulation needs one
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
# Prep the Simulation using the parameterized system, the integrator, and the topology
simulation = app.Simulation(fixer.topology, system, integrator)
# Copy in the positions
simulation.context.setPositions( fixer.positions)
# Get initial state and energy; print
state = simulation.context.getState(getEnergy = True)
energy = state.getPotentialEnergy() / unit.kilocalories_per_mole
print("Energy before minimization (kcal/mol): %.2g" % energy)
# Minimize, get final state and energy and print
simulation.minimizeEnergy()
state = simulation.context.getState(getEnergy=True, getPositions=True)
energy = state.getPotentialEnergy() / unit.kilocalories_per_mole
print("Energy after minimization (kcal/mol): %.2g" % energy)
newpositions = state.getPositions()
# NBVAL_SKIP
# Set up NetCDF reporter for storing trajectory; prep for Langevin dynamics
from mdtraj.reporters import NetCDFReporter
integrator = openmm.LangevinIntegrator(300*unit.kelvin, 1./unit.picosecond, 2.*unit.femtoseconds)
# Prep Simulation
simulation = app.Simulation(fixer.topology, system, integrator)
# Copy in minimized positions
simulation.context.setPositions(newpositions)
# Initialize velocities to correct temperature
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
# Set up to write trajectory file to NetCDF file in data directory every 100 frames
netcdf_reporter = NetCDFReporter(os.path.join(datadir, 'trajectory.nc'), 100) #Store every 100 frames
# Initialize reporters, including a CSV file to store certain stats every 100 frames
simulation.reporters.append(netcdf_reporter)
simulation.reporters.append(app.StateDataReporter(os.path.join(datadir, 'data.csv'), 100, step=True, potentialEnergy=True, temperature=True, density=True))
# Run the simulation and print start info; store timing
print("Starting simulation")
start = time.clock()
simulation.step(1000) #1000 steps of dynamics
end = time.clock()
# Print elapsed time info, finalize trajectory file
print("Elapsed time %.2f seconds" % (end-start))
netcdf_reporter.close()
print("Done!")
# NBVAL_SKIP
# Load stored trajectory using MDTraj; the trajectory doesn't contain chemistry info so we also load a PDB
traj= mdtraj.load(os.path.join(datadir, 'trajectory.nc'), top=os.path.join(datadir, 'complex_solvated.pdb'))
#Recenter/impose periodicity to the system
anchor = traj.top.guess_anchor_molecules()[0]
imgd = traj.image_molecules(anchor_molecules=[anchor])
traj.center_coordinates()
# View the trajectory
view = nglview.show_mdtraj(traj)
# I haven't totally figured out nglview's selection language for our purposes here, so I'm just showing two residues
# which seems (in this case) to include the host and guest plus an ion (?).
view.add_licorice('1-2')
view
# NBVAL_SKIP
# Save centered trajectory for viewing elsewhere
traj.save_netcdf(os.path.join(datadir, 'trajectory_centered.nc'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuration for your run
Step2: Quickly draw your guest and make sure it's what you intended
Step3: Get host file and prep it for docking
Step4: Prep host file for docking
Step5: Generate 3D structure of our guest and dock it
Step6: Visualize in 3D to make sure we placed the guest into the binding site
Step7: Solvate complex
Step8: Apply SMIRNOFF to set up the system for simulation with OpenMM
Step9: Load our force field and parameterize the system
Step10: Minimize and (very briefly) simulate our system
Step11: Run an MD simulation of a few steps, storing a trajectory for visualization
|
11,747
|
<ASSISTANT_TASK:>
Python Code:
%sosdict
%sos a = 1
%sosdict
d = %sosdict
d.keys()
d = %sosdict a
d.keys()
%sosdict --keys
%sosdict --reset
%sosdict
%sosdict --keys --all
%sos a=10
%sos "a + 100 = ${{a+100}}"
%sos_options sigil='` `'
%sos "a + 100 = `1+100`"
%sos b=['file1.txt', 'file2.txt']
%sos "`b!r,`"
%sos_options sigil='${ }'
%sos name = 'Bob Kenny'
%sos "My name is ${name}"
%%sos
run:
echo "something"
%%sos --rep 10
resource = '~/resource'
ref_genome ="${resource}/hg19"
parameter: rep = 5
%sos rep
%sos a = 20
%sos b = "a**2 = `a**2`"
%sosget a b
a
b
b = 'something else'
%sosput b
%sosdict b
%sosset -v 3
%sosset
%sos_options sigil='${ }'
%sosset -v1
%%sos
resource = '~/resources'
ref_genome = '${resource}/hg19'
%sos ref_genome
%%sos --rep 3
resource = '~/resources'
ref_genome = "${resource}/hg19"
parameter: rep = 5
%sos ref_genome
%sos rep
%%sos
[1]
print(rep)
seq = range(rep)
input: for_each='seq'
python:
import time
print('sleep {} seconds.'.format(_seq))
time.sleep(_seq)
!cat .sos/default_1_0.py
%%sos
[1]
print(rep)
seq = range(rep)
input: for_each='seq'
python:
import time
print('sleep ${_seq} seconds.')
time.sleep(${_seq})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The dictionary is empty because we have not assigned anything to it. Let us run a sos statement
Step2: and you can see the sos dictionary contains one item. There are other usages of the %sosdict magic, you can get the dictionary by assigning the dictionary to a variable
Step3: If you are interested in only a subset of variables, you can list them after %sosdict
Step4: You can get the keys of the dictionary easier using
Step5: If after a while you would like to reset the dictionary and run a SoS script from fresh, you can reset the dictionary using option --reset
Step6: The SoS dictionary actually contains other items such as all the SoS actions and functions. If you would like to see all of them, use option --all. For example,
Step7: In summary, the %sosdict magic accepts
Step8: Here we use SoS string interpolation to evaluate an expression a+100 and return its string representation. The sigil is supposed to be ${ } but it has to be used as ${{ }} because iPython has its own interpolation system that tries to evaluate anything in { }. This can be annoying but you can try to change the SoS default sigil using a %sos_options magic
Step9: Note that iPython will leave { } alone if it does not understand the content inside, so you are usually ok if the variable is not recognizable by iPython
Step10: If you would like to execute multi-line SoS statements or scripts, you will need to use the magic in cell mode (with %% prefix), for example,
Step11: runs a shell script within iPython. Similarly, you can run arbitrary shell, R, perl, python code in ipython/SoS, with string interpolation. Note that ipython already has a magic called %%script that allows you to execute scripts in a cell, while SoS provides a lot more features.
Step12: Because rep is defined as a command line parameter, the command line option --rep 10 overrides its default value 5.
Step13: Magic sosset
Step14: You can run the magic %sosset without option to get a list of acceptable options.
Step15: A complete example
Step16: The script currently looks like
Step17: The statements are executed and you can check the result using
Step18: resource is not interpolated because the expression is quoted using single quotes. To use SoS string interpolation, we will need to quote the string in double quotes
Step19: Now, let us continue, copy/paste or select the next step, run sospaste and .... a big block of errors!
Step20: Here 10 is printed so rep is valid. This means the rep we set last time is available and correct, then what might be the problem? Let us see what is saved in .sos/default_1.py using a bit magic of ipython
Step21: The error seems to be obvious, we need to use ${_rep} for the value to be passed through string interpolation. Let us make some changes to the script and run
|
11,748
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
import csv
import cv2
import numpy as np
import math
import matplotlib.pyplot as plt
signnames = []
with open("signnames.csv", 'r') as f:
next(f)
reader = csv.reader(f)
signnames = list(reader)
n_classes = len(signnames)
training_file = "./train.p"
testing_file = "./test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
from sklearn import cross_validation
X_train, X_test = [], []
y_train, y_test = [], test['labels']
for i, img in enumerate(train['features']):
img = cv2.resize(img,(48, 48), interpolation = cv2.INTER_CUBIC)
X_train.append(img)
y_train.append(train['labels'][i])
# Adaptive Histogram (CLAHE)
imgLab = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
l, a, b = cv2.split(imgLab)
l = clahe.apply(l)
imgLab = cv2.merge((l, a, b))
imgLab = cv2.cvtColor(imgLab, cv2.COLOR_Lab2RGB)
X_train.append(imgLab)
y_train.append(train['labels'][i])
# Rotate -15
M = cv2.getRotationMatrix2D((24, 24), -15.0, 1)
imgL = cv2.warpAffine(img, M, (48, 48))
X_train.append(imgL)
y_train.append(train['labels'][i])
# Rotate 15
M = cv2.getRotationMatrix2D((24, 24), 15.0, 1)
imgR = cv2.warpAffine(img, M, (48, 48))
X_train.append(imgR)
y_train.append(train['labels'][i])
for img in test['features']:
X_test.append(cv2.resize(img,(48, 48), interpolation = cv2.INTER_CUBIC))
X_train, X_validation, y_train, y_validation = cross_validation.train_test_split(X_train, y_train, test_size=0.2, random_state=7)
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
n_train = len(X_train)
n_test = len(X_test)
image_shape = X_train[0].shape
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
print("Number of X_train = ", len(X_train))
print("Number of X_validation = ", len(X_validation))
print("Number of y_train = ", len(y_train))
print("Number of y_validation = ", len(y_validation))
import random
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[index], signnames[y_train[index]][1])
import tensorflow as tf
from tensorflow.contrib.layers import flatten
EPOCHS = 15
BATCH_SIZE = 128
def ConvNet(x):
mu = 0
sigma = 0.1
# Layer 1: Convolutional. Input = 48x48x3. Output = 42x42x100.
c1_W = tf.Variable(tf.truncated_normal([7, 7, 3, 100], mean=mu, stddev=sigma))
c1_b = tf.Variable(tf.zeros(100))
c1 = tf.nn.conv2d(x, c1_W, strides=[1, 1, 1, 1], padding='VALID')
c1 = tf.nn.bias_add(c1, c1_b)
c1 = tf.nn.relu(c1)
# Layer 2: Max Pooling. Input = 42x42x100. Output = 21x21x100.
s2 = tf.nn.max_pool(c1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Layer 3: Convolutional. Input = 21x21x100. Output = 18x18x150.
c3_W = tf.Variable(tf.truncated_normal([4, 4, 100, 150], mean=mu, stddev=sigma))
c3_b = tf.Variable(tf.zeros(150))
c3 = tf.nn.conv2d(s2, c3_W, strides=[1, 1, 1, 1], padding='VALID')
c3 = tf.nn.bias_add(c3, c3_b)
c3 = tf.nn.relu(c3)
# Layer 4: Max Pooling. Input = 18x18x150. Output = 9x9x150
s4 = tf.nn.max_pool(c3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Layer 5: Convolutional. Input = 9x9x150. Output = 6x6x250.
c5_W = tf.Variable(tf.truncated_normal([4, 4, 150, 250], mean=mu, stddev=sigma))
c5_b = tf.Variable(tf.zeros(250))
c5 = tf.nn.conv2d(s4, c5_W, strides=[1, 1, 1, 1], padding='VALID')
c5 = tf.nn.bias_add(c5, c5_b)
c5 = tf.nn.relu(c5)
# Layer 6: Max Pooling. Input = 6x6x250. Output = 3x3x250.
s6 = tf.nn.max_pool(c5, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Layer 6: Flatten. Input = 3x3x250. Output = 2250
s6 = flatten(s6)
# Layer 7: Fully Connected. Input = 2250. Output = 300.
fc7_W = tf.Variable(tf.truncated_normal([2250, 300], mean=mu, stddev=sigma))
fc7_b = tf.Variable(tf.zeros(300))
fc7 = tf.add(tf.matmul(s6, fc7_W), fc7_b)
fc7 = tf.nn.relu(fc7)
# Layer 8: Fully Connected. Input = 300. Output = 43.
fc8_W = tf.Variable(tf.truncated_normal([300, 43], mean=mu, stddev=sigma))
fc8_b = tf.Variable(tf.zeros(43))
fc8 = tf.add(tf.matmul(fc7, fc8_W), fc8_b)
return fc8
x = tf.placeholder(tf.float32, (None, 48, 48, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
rate = 0.001
logits = ConvNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
try:
saver
except NameError:
saver = tf.train.Saver()
saver.save(sess, 'convnet')
print("Model saved")
with tf.Session() as sess:
loader = tf.train.import_meta_graph("convnet.meta")
loader.restore(sess, tf.train.latest_checkpoint('./'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
from PIL import Image
# Visualizations will be shown in the notebook.
%matplotlib inline
new_images = []
new_labels = np.array([4, 17, 26, 28, 14])
fig = plt.figure()
for i in range(1, 6):
subplot = fig.add_subplot(2,3,i)
img = cv2.imread("./dataset/{}.png".format(i))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img = cv2.resize(img,(48, 48), interpolation = cv2.INTER_CUBIC)
subplot.set_title(signnames[new_labels[i-1]][1],fontsize=8)
subplot.imshow(img)
new_images.append(img)
with tf.Session() as sess:
loader = tf.train.import_meta_graph("convnet.meta")
loader.restore(sess, tf.train.latest_checkpoint('./'))
new_pics_classes = sess.run(logits, feed_dict={x: new_images})
test_accuracy = evaluate(new_images, new_labels)
print("Test Accuracy = {:.3f}".format(test_accuracy))
top3 = sess.run(tf.nn.top_k(new_pics_classes, k=3, sorted=True))
for i in range(len(top3[0])):
labels = list(map(lambda x: signnames[x][1], top3[1][i]))
print("Image {} predicted labels: {} with probabilities: {}".format(i+1, labels, top3[0][i]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preprocess Data
Step2: Step 1
Step3: Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include
Step4: Step 2
Step5: Features and Labels
Step6: Training Pipeline
Step7: Model Evaluation
Step8: Model Training
Step9: Model Evaluation
Step10: Question 1
Step11: Question 6
|
11,749
|
<ASSISTANT_TASK:>
Python Code:
import SimpleITK as sitk
# Utility method that either downloads data from the MIDAS repository or
# if already downloaded returns the file name for reading from disk (cached data).
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = 'Output'
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, fixed
from IPython.display import clear_output
# Callback invoked by the interact ipython method for scrolling through the image stacks of
# the two images (moving and fixed).
def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):
# Create a figure with two subplots and the specified size.
plt.subplots(1,2,figsize=(10,8))
# Draw the fixed image in the first subplot.
plt.subplot(1,2,1)
plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('fixed image')
plt.axis('off')
# Draw the moving image in the second subplot.
plt.subplot(1,2,2)
plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r);
plt.title('moving image')
plt.axis('off')
plt.show()
# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending
# of an image stack of two images that occupy the same physical space.
def display_images_with_alpha(image_z, alpha, fixed, moving):
img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z]
plt.imshow(sitk.GetArrayFromImage(img),cmap=plt.cm.Greys_r);
plt.axis('off')
plt.show()
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations
del metric_values
del multires_iterations
# Close figure, we don't want to get a duplicate of the plot latter on.
plt.close()
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations
metric_values.append(registration_method.GetMetricValue())
# Clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# Plot the similarity metric values
plt.plot(metric_values, 'r')
plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*')
plt.xlabel('Iteration Number',fontsize=12)
plt.ylabel('Metric Value',fontsize=12)
plt.show()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32)
moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32)
interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayFromImage(moving_image)));
initial_transform = sitk.CenteredTransformInitializer(fixed_image,
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations)
registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method))
final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32),
sitk.Cast(moving_image, sitk.sitkFloat32))
print('Final metric value: {0}'.format(registration_method.GetMetricValue()))
print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))
moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue())
interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled));
sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha'))
sitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utility functions
Step2: Read images
Step3: Initial Alignment
Step4: Registration
Step5: Post registration analysis
Step6: Now visually inspect the results.
Step7: If we are satisfied with the results, save them to file.
|
11,750
|
<ASSISTANT_TASK:>
Python Code:
from rmgpy.data.rmg import RMGDatabase
from rmgpy import settings
from rmgpy.species import Species
from rmgpy.molecule import Molecule
from rmgpy.molecule import Group
from rmgpy.rmg.main import RMG
from rmgpy.cnn_framework.predictor import Predictor
from IPython.display import display
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import logging
logging.disable(logging.CRITICAL)
from bokeh.charts import Histogram
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
output_notebook()
host = 'mongodb://user:user@rmg.mit.edu/admin'
port = 27018
client = MongoClient(host, port)
db = getattr(client, 'sdata134k')
db.collection_names()
def get_data(db, collection_name):
collection = getattr(db, collection_name)
db_cursor = collection.find()
# collect data
print('reading data...')
db_mols = []
for db_mol in db_cursor:
db_mols.append(db_mol)
print('done')
return db_mols
model = '/home/mjliu/Code/RMG-Py/examples/cnn/evaluate/test_model'
h298_predictor = Predictor()
predictor_input = os.path.join(model,
'predictor_input.py')
h298_predictor.load_input(predictor_input)
param_path = os.path.join(model,
'saved_model',
'full_train.h5')
h298_predictor.load_parameters(param_path)
# fetch testing dataset
collection_name = 'large_linear_polycyclic_table'
db_mols = get_data(db, collection_name)
db_mols.extend(get_data(db, 'large_fused_polycyclic_table'))
print len(db_mols)
filterList = [
Group().fromAdjacencyList(1 R u0 p0 c0 {2,[S,D,T]} {9,[S,D,T]}
2 R u0 p0 c0 {1,[S,D,T]} {3,[S,D,T]}
3 R u0 p0 c0 {2,[S,D,T]} {4,[S,D,T]}
4 R u0 p0 c0 {3,[S,D,T]} {5,[S,D,T]}
5 R u0 p0 c0 {4,[S,D,T]} {6,[S,D,T]}
6 R u0 p0 c0 {5,[S,D,T]} {7,[S,D,T]}
7 R u0 p0 c0 {6,[S,D,T]} {8,[S,D,T]}
8 R u0 p0 c0 {7,[S,D,T]} {9,[S,D,T]}
9 R u0 p0 c0 {1,[S,D,T]} {8,[S,D,T]}
),
Group().fromAdjacencyList(1 R u0 p0 c0 {2,S} {5,S}
2 R u0 p0 c0 {1,S} {3,D}
3 R u0 p0 c0 {2,D} {4,S}
4 R u0 p0 c0 {3,S} {5,S}
5 R u0 p0 c0 {1,S} {4,S} {6,S} {9,S}
6 R u0 p0 c0 {5,S} {7,S}
7 R u0 p0 c0 {6,S} {8,D}
8 R u0 p0 c0 {7,D} {9,S}
9 R u0 p0 c0 {5,S} {8,S}
),
]
test_size = 0
R = 1.987 # unit: cal/mol/K
validation_test_dict = {} # key: spec.label, value: (thermo_heuristic, thermo_qm)
spec_labels = []
spec_dict = {}
H298s_qm = []
Cp298s_qm = []
H298s_cnn = []
Cp298s_cnn = []
for db_mol in db_mols:
smiles_in = str(db_mol["SMILES_input"])
spec_in = Species().fromSMILES(smiles_in)
for grp in filterList:
if spec_in.molecule[0].isSubgraphIsomorphic(grp):
break
else:
spec_labels.append(smiles_in)
# qm: just free energy but not free energy of formation
G298_qm = float(db_mol["G298"])*627.51 # unit: kcal/mol
H298_qm = float(db_mol["Hf298(kcal/mol)"]) # unit: kcal/mol
Cv298_qm = float(db_mol["Cv298"]) # unit: cal/mol/K
Cp298_qm = Cv298_qm + R # unit: cal/mol/K
H298s_qm.append(H298_qm)
# cnn
H298_cnn = h298_predictor.predict(spec_in.molecule[0]) # unit: kcal/mol
H298s_cnn.append(H298_cnn)
spec_dict[smiles_in] = spec_in
# create pandas dataframe
validation_test_df = pd.DataFrame(index=spec_labels)
validation_test_df['H298_cnn(kcal/mol)'] = pd.Series(H298s_cnn, index=validation_test_df.index)
validation_test_df['H298_qm(kcal/mol)'] = pd.Series(H298s_qm, index=validation_test_df.index)
heuristic_qm_diff = abs(validation_test_df['H298_cnn(kcal/mol)']-validation_test_df['H298_qm(kcal/mol)'])
validation_test_df['H298_cnn_qm_diff(kcal/mol)'] = pd.Series(heuristic_qm_diff, index=validation_test_df.index)
display(validation_test_df.head())
print "Validation test dataframe has {0} tricyclics.".format(len(spec_labels))
validation_test_df['H298_cnn_qm_diff(kcal/mol)'].describe()
diff20_df = validation_test_df[(validation_test_df['H298_heuristic_qm_diff(kcal/mol)'] > 15)
& (validation_test_df['H298_heuristic_qm_diff(kcal/mol)'] <= 500)]
len(diff20_df)
print len(diff20_df)
for smiles in diff20_df.index:
print "***********cnn = {0}************".format(diff20_df[diff20_df.index==smiles]['H298_cnn(kcal/mol)'])
print "***********qm = {0}************".format(diff20_df[diff20_df.index==smiles]['H298_qm(kcal/mol)'])
spe = spec_dict[smiles]
display(spe)
p = figure(plot_width=500, plot_height=400)
# plot_df = validation_test_df[validation_test_df['H298_heuristic_qm_diff(kcal/mol)'] < 10]
plot_df = validation_test_df
# add a square renderer with a size, color, and alpha
p.circle(plot_df['H298_cnn(kcal/mol)'], plot_df['H298_qm(kcal/mol)'],
size=5, color="green", alpha=0.5)
x = np.array([-50, 200])
y = x
p.line(x=x, y=y, line_width=2, color='#636363')
p.line(x=x, y=y+10, line_width=2,line_dash="dashed", color='#bdbdbd')
p.line(x=x, y=y-10, line_width=2, line_dash="dashed", color='#bdbdbd')
p.xaxis.axis_label = "H298 CNN (kcal/mol)"
p.yaxis.axis_label = "H298 Quantum (kcal/mol)"
p.xaxis.axis_label_text_font_style = "normal"
p.yaxis.axis_label_text_font_style = "normal"
p.xaxis.axis_label_text_font_size = "16pt"
p.yaxis.axis_label_text_font_size = "16pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
show(p)
len(plot_df.index)
from bokeh.models import Range1d
hist = Histogram(validation_test_df,
values='Cp298_heuristic_qm_diff(cal/mol/K)', xlabel='Cp Prediction Error (cal/mol/K)',
ylabel='Number of Testing Molecules',
bins=50,\
plot_width=500, plot_height=300)
# hist.y_range = Range1d(0, 1640)
hist.x_range = Range1d(0, 20)
show(hist)
with open('validation_test_sdata134k_2903_pyPoly_dbPoly.csv', 'w') as fout:
validation_test_df.to_csv(fout)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Validation Test
Step4: Create pandas dataframe for easy data validation
Step5: categorize error sources
Step6: Parity Plot
Step7: Histogram of abs(heuristic-qm)
|
11,751
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
#!pip install --upgrade tensorflow==2.5
import tensorflow as tf
import numpy as np
import IPython.display as display
print("TensorFlow version: ",tf.version.VERSION)
# TODO 1a
# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
Returns a bytes_list from a string / byte.
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return # TODO: Complete the code here.
def _float_feature(value):
Returns a float_list from a float / double.
return # TODO: Complete the code here.
def _int64_feature(value):
Returns an int64_list from a bool / enum / int / uint.
return # TODO: Complete the code here.
print(_bytes_feature(b'test_string'))
print(_bytes_feature(u'test_bytes'.encode('utf-8')))
print(_float_feature(np.exp(1)))
print(_int64_feature(True))
print(_int64_feature(1))
feature = _float_feature(np.exp(1))
# TODO 1b
# TODO: Complete the code here
# The number of observations in the dataset.
n_observations = int(1e4)
# Boolean feature, encoded as False or True.
feature0 = np.random.choice([False, True], n_observations)
# Integer feature, random from 0 to 4.
feature1 = np.random.randint(0, 5, n_observations)
# String feature
strings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])
feature2 = strings[feature1]
# Float feature, from a standard normal distribution
feature3 = np.random.randn(n_observations)
def serialize_example(feature0, feature1, feature2, feature3):
Creates a tf.Example message ready to be written to a file.
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
feature = {
'feature0': _int64_feature(feature0),
'feature1': _int64_feature(feature1),
'feature2': _bytes_feature(feature2),
'feature3': _float_feature(feature3),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
# This is an example observation from the dataset.
example_observation = []
serialized_example = serialize_example(False, 4, b'goat', 0.9876)
serialized_example
# TODO 1c
example_proto = # TODO: Complete the code here
example_proto
tf.data.Dataset.from_tensor_slices(feature1)
features_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3))
features_dataset
# Use `take(1)` to only pull one example from the dataset.
for f0,f1,f2,f3 in features_dataset.take(1):
print(f0)
print(f1)
print(f2)
print(f3)
# TODO 2a
# TODO: Your code goes here
tf_serialize_example(f0,f1,f2,f3)
# TODO 2b
serialized_features_dataset = #TODO : Complete the code here.
serialized_features_dataset
def generator():
for features in features_dataset:
yield serialize_example(*features)
serialized_features_dataset = tf.data.Dataset.from_generator(
generator, output_types=tf.string, output_shapes=())
serialized_features_dataset
filename = 'test.tfrecord'
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(serialized_features_dataset)
# TODO 2c
# TODO: Your code goes here
for raw_record in raw_dataset.take(10):
print(repr(raw_record))
# Create a description of the features.
feature_description = {
'feature0': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'feature2': tf.io.FixedLenFeature([], tf.string, default_value=''),
'feature3': tf.io.FixedLenFeature([], tf.float32, default_value=0.0),
}
def _parse_function(example_proto):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example_proto, feature_description)
parsed_dataset = raw_dataset.map(_parse_function)
parsed_dataset
for parsed_record in parsed_dataset.take(10):
print(repr(parsed_record))
# Write the `tf.Example` observations to the file.
with tf.io.TFRecordWriter(filename) as writer:
for i in range(n_observations):
example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])
writer.write(example)
!du -sh {filename}
filenames = [filename]
raw_dataset = tf.data.TFRecordDataset(filenames)
raw_dataset
for raw_record in raw_dataset.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
print(example)
cat_in_snow = tf.keras.utils.get_file('320px-Felis_catus-cat_on_snow.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg')
williamsburg_bridge = tf.keras.utils.get_file('194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg')
display.display(display.Image(filename=cat_in_snow))
display.display(display.HTML('Image cc-by: <a "href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg">Von.grzanka</a>'))
display.display(display.Image(filename=williamsburg_bridge))
display.display(display.HTML('<a "href=https://commons.wikimedia.org/wiki/File:New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg">From Wikimedia</a>'))
image_labels = {
cat_in_snow : 0,
williamsburg_bridge : 1,
}
# This is an example, just using the cat image.
image_string = open(cat_in_snow, 'rb').read()
label = image_labels[cat_in_snow]
# Create a dictionary with features that may be relevant.
def image_example(image_string, label):
image_shape = tf.image.decode_jpeg(image_string).shape
feature = {
'height': _int64_feature(image_shape[0]),
'width': _int64_feature(image_shape[1]),
'depth': _int64_feature(image_shape[2]),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image_string),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
for line in str(image_example(image_string, label)).split('\n')[:15]:
print(line)
print('...')
# Write the raw image files to `images.tfrecords`.
# First, process the two images into `tf.Example` messages.
# Then, write to a `.tfrecords` file.
record_file = 'images.tfrecords'
with tf.io.TFRecordWriter(record_file) as writer:
for filename, label in image_labels.items():
image_string = open(filename, 'rb').read()
tf_example = image_example(image_string, label)
writer.write(tf_example.SerializeToString())
!du -sh {record_file}
raw_image_dataset = tf.data.TFRecordDataset('images.tfrecords')
# Create a dictionary describing the features.
image_feature_description = {
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
parsed_image_dataset
for image_features in parsed_image_dataset:
image_raw = image_features['image_raw'].numpy()
display.display(display.Image(data=image_raw))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Please ignore any incompatibility warnings and errors.
Step5: Note
Step6: Lab Task #1b
Step7: Creating a tf.Example message
Step9: Each of these features can be coerced into a tf.Example-compatible type using one of _bytes_feature, _float_feature, _int64_feature. You can then create a tf.Example message from these encoded features
Step10: For example, suppose you have a single observation from the dataset, [False, 4, bytes('goat'), 0.9876]. You can create and print the tf.Example message for this observation using create_message(). Each single observation will be written as a Features message as per the above. Note that the tf.Example message is just a wrapper around the Features message
Step11: Lab Task #1c
Step12: TFRecords format details
Step13: Applied to a tuple of arrays, it returns a dataset of tuples
Step14: Use the tf.data.Dataset.map method to apply a function to each element of a Dataset.
Step15: Lab Task 2b
Step16: And write them to a TFRecord file
Step17: Reading a TFRecord file
Step18: At this point the dataset contains serialized tf.train.Example messages. When iterated over it returns these as scalar string tensors.
Step19: These tensors can be parsed using the function below. Note that the feature_description is necessary here because datasets use graph-execution, and need this description to build their shape and type signature
Step20: Alternatively, use tf.parse example to parse the whole batch at once. Apply this function to each item in the dataset using the tf.data.Dataset.map method
Step21: Use eager execution to display the observations in the dataset. There are 10,000 observations in this dataset, but you will only display the first 10. The data is displayed as a dictionary of features. Each item is a tf.Tensor, and the numpy element of this tensor displays the value of the feature
Step22: Here, the tf.parse_example function unpacks the tf.Example fields into standard tensors.
Step23: Reading a TFRecord file
Step24: Walkthrough
Step25: Write the TFRecord file
Step26: Notice that all of the features are now stored in the tf.Example message. Next, functionalize the code above and write the example messages to a file named images.tfrecords
Step27: Read the TFRecord file
Step28: Recover the images from the TFRecord file
|
11,752
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import geopandas
from shapely.geometry import Polygon
capitals = geopandas.read_file(geopandas.datasets.get_path("naturalearth_cities"))
world = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
# Create a subset of the world data that is just the South American continent
south_america = world[world["continent"] == "South America"]
# Create a custom polygon
polygon = Polygon([(0, 0), (0, 90), (180, 90), (180, 0), (0, 0)])
poly_gdf = geopandas.GeoDataFrame([1], geometry=[polygon], crs=world.crs)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 8))
world.plot(ax=ax1)
poly_gdf.boundary.plot(ax=ax1, color="red")
south_america.boundary.plot(ax=ax2, color="green")
capitals.plot(ax=ax2, color="purple")
ax1.set_title("All Unclipped World Data", fontsize=20)
ax2.set_title("All Unclipped Capital Data", fontsize=20)
ax1.set_axis_off()
ax2.set_axis_off()
plt.show()
world_clipped = world.clip(polygon)
# Plot the clipped data
# The plot below shows the results of the clip function applied to the world
# sphinx_gallery_thumbnail_number = 2
fig, ax = plt.subplots(figsize=(12, 8))
world_clipped.plot(ax=ax, color="purple")
world.boundary.plot(ax=ax)
poly_gdf.boundary.plot(ax=ax, color="red")
ax.set_title("World Clipped", fontsize=20)
ax.set_axis_off()
plt.show()
capitals_clipped = capitals.clip(south_america)
# Plot the clipped data
# The plot below shows the results of the clip function applied to the capital cities
fig, ax = plt.subplots(figsize=(12, 8))
capitals_clipped.plot(ax=ax, color="purple")
south_america.boundary.plot(ax=ax, color="green")
ax.set_title("Capitals Clipped", fontsize=20)
ax.set_axis_off()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get or Create Example Data
Step2: Plot the Unclipped Data
Step3: Clip the Data
Step4: <div class="alert alert-info">
|
11,753
|
<ASSISTANT_TASK:>
Python Code:
# importing packages for wrangling tasks
import pandas as pd
import numpy as np
import re
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
from geopy.distance import great_circle
# create a function to quickly tabulate a dataframe column
def tab(dfcol):
t = pd.crosstab(index=dfcol, columns="count")
print t
print t/t.sum()
pd.set_option('display.max_columns', None)
# load the individual .csv files as pandas dataframes
dpath = "C:\Users\mworley\Dropbox\capstone\data"
pstats = pd.read_csv(dpath + r'\external\player_data\player_stats.csv')
ros = pd.read_csv(dpath + r'\external\rosters\rosters.csv')
espn = pd.read_csv(dpath + r'\external\espn\espn_tabs.csv')
# merge the two sportsreference files, player stats (pstats) and positions (ros)
sr_players = pd.merge(pstats, ros, how='inner',
on=['Player', 'Team', 'Season'])
# load the team identifying data
teams_seasons = pd.read_csv(dpath + r'\interim\teams_seasons.csv')
team_match = pd.read_csv(dpath + r'\interim\team_match.csv')
team_info = pd.merge(teams_seasons, team_match,
how='inner', on='team_id')
# merge team identifier data with player dataframes
sr_players = pd.merge(sr_players, team_info, how='inner',
left_on=['Team', 'Season'],
right_on=['srname', 'season'])
espn_players = pd.merge(espn, team_info, how='inner',
left_on=['espn_id', 'Season'],
right_on=['espn_id', 'season'])
# keep only columns I need to match players
sr_players = sr_players.loc[:, ['Player', 'srname', 'Season', 'espn_id']]
sr_players.drop_duplicates(inplace=True)
espn_players = espn_players.loc[:, ['Player', 'srname', 'Season', 'espn_id']]
espn_players.drop_duplicates(inplace=True)
# keep only years after 2001 in sportsreference file to match with espn player data
sr_players = sr_players[sr_players['Season'] > 2001]
mrg_players = pd.merge(sr_players, espn_players,
how='outer',
on=['Player', 'srname' , 'Season', 'espn_id'],
indicator=True)
tab(mrg_players['_merge'])
nomatch = mrg_players[mrg_players['_merge'] != "both"].copy()
nomatch.sort_values(['srname', 'Season'], inplace=True)
nomatch.head(10)
process.extractOne('Mo Williams', ['Maurice Williams', 'Deji Ibetayo'])
process.extractOne('Mo Williams', ['Maurice Williams', 'John Williams'])
# create dataframe of non-matched player names, separately for each source
nomatch_sr = nomatch[nomatch._merge == "left_only"].copy()
nomatch_sr.drop('_merge', inplace=True, axis=1)
nomatch_espn = nomatch[nomatch._merge == "right_only"].copy()
nomatch_espn.drop('_merge', inplace=True, axis=1)
# group by team and season, create dictionary of non-matched espn names to use in the function
e = nomatch_espn.groupby(['srname','Season'])['Player']
espn_dict = dict(list(e))
# write the function to selectively match using the player's team and season
plist = []
def match_name_team(row):
try:
p = row['Player']
t = row['srname']
s = row['Season']
l = espn_dict.get((t, s))
n, scr = process.extractOne(p, l)
list = (p, t, s, n, scr)
plist.append(list)
except:
pass
# apply the function to the nonmatched sportsreference player dataframe
nomatch_sr.apply(match_name_team, axis=1)
df = pd.DataFrame(plist, columns=('Player', 'srname', 'Season',
'Player_espn', 'score'))
df.sort_values('score', ascending=False).head(10)
# inspect low-scoring matches
df[df['score'] < 65].sort_values('score', ascending=False)
# everything above 50 looks right, how many names are below 50?
len(df[df.score < 50])
# drop matches scoring below 50
df_c50 = df.loc[df.score > 50].copy()
# combine the perfect matches and fuzzy matches into one dataframe
df_c50.drop('score', inplace=True, axis=1)
match = mrg_players[mrg_players['_merge'] == "both"].copy()
match.drop(['espn_id', '_merge'], inplace=True, axis=1)
match['Player_espn'] = match.Player
player_match = match.append(df_c50, ignore_index=True)
# re-create sportsreference and espn player dataframes
sr_players = pd.merge(pstats, ros, how='inner',
on=['Player', 'Team', 'Season'])
espn = pd.read_csv(dpath + r'\external\espn\espn_tabs.csv')
# merge team identifier data with player dataframes
sr_players = pd.merge(sr_players, team_info, how='inner',
left_on=['Team', 'Season'],
right_on=['srname', 'season'])
espn_players = pd.merge(espn, team_info, how='inner',
left_on=['espn_id', 'Season'],
right_on=['espn_id', 'season'])
# keep only years after 2001 in sportsreference file to match with espn player data
sr_players = sr_players[sr_players['Season'] > 2001]
# merge sportsreference file with the player name matches
mrg1 = pd.merge(sr_players, player_match, how='inner',
on=['Player', 'srname', 'Season'])
mrg2 = pd.merge(mrg1, espn_players, how='outer',
left_on=['Player_espn', 'srname', 'Season'],
right_on=['Player', 'srname', 'season'],
indicator=True)
tab(mrg2._merge)
player_match = pd.read_csv(dpath + r'\interim\player_match.csv')
players = pd.merge(pstats, ros, how='inner',
on=['Player', 'Team', 'Season'])
players = pd.merge(players, player_match, how='outer',
left_on=['Player', 'Season', 'Team'],
right_on=['Player', 'Season', 'srname'])
players.drop('srname', inplace=True, axis=1)
ncols = ['Player_espn', 'GPes', 'MINes', 'PPGes', 'RPGes', 'APGes', 'SPGes',
'BPGes', 'TPGes', 'FGPCTes', 'FTPCTes', '3PTPCTes', 'Teames',
'espn_id', 'Season']
espn.columns = ncols
players = pd.merge(players, espn, how='outer',
left_on=['Player_espn', 'Season'],
right_on=['Player_espn', 'Season'])
players = players.dropna(subset = ['Team'])
players = pd.merge(players, team_info, how='inner',
left_on=['Team', 'Season'],
right_on=['srname', 'season'],
indicator=True)
mask = (players.team_id == 1246) & (players.Season == 2012)
players[mask]
players.loc[mask, ['Player', 'Height']]
# change series from object data type to string
players['Height'] = players['Height'].astype(str)
# create a function that converts string height to total inches
def inches(height):
try:
f = int(height.split('-')[0])
i = int(height.split('-')[1])
return f * 12 + i
except:
return np.nan
players['Heightnum'] = players['Height'].apply(inches)
players.loc[mask, ['Player', 'Height', 'Heightnum']]
def team_minutes(group):
s = group.name
# minutes played data only available after 2001
if s[1] > 2001:
group['tmins'] = group['MINes'].sum()
return group
else:
return group
players = players.groupby(['team_id', 'Season'], as_index=False).apply(team_minutes)
players['pminpct'] = players.MINes / players.tmins
players['phgtmins'] = players.pminpct * players.Heightnum
players.loc[mask, ['Player', 'Heightnum', 'pminpct', 'phgtmins']]
flrmins = players.groupby(['team_id', 'Season'])['phgtmins'].sum().reset_index()
flrmins.sort_values(['Season', 'phgtmins'], ascending=False).head(5)
def get_starters(group):
s = group.name
if s[1] > 2001:
group.sort_values('MINes', ascending=False, inplace=True)
group['starter'] = 'no'
i = group.columns.get_loc('starter')
group.iloc[0:5, i] = 'yes'
return group
else:
return group
players = players.groupby(['team_id', 'Season'], as_index=False).apply(get_starters)
mask = (players.team_id == 1246) & (players.Season == 2012)
players.loc[mask, ['Player', 'MINes', 'starter', 'PTS']]
benscr = players.groupby(['team_id', 'Season', 'starter'])['PTS']
benscr = benscr.sum().unstack('starter').reset_index()
benscr['ptspct_bn'] = benscr.no / (benscr.no + benscr.yes)
benscr['ptspct_st'] = 1 - benscr.ptspct_bn
benscr.drop(['no', 'yes'] , inplace=True, axis=1)
benscr[(benscr.team_id==1246) & (benscr.Season == 2012)]
players.loc[mask, ['Player', 'Class']]
tab(players.Class)
players.Class.fillna('', inplace=True)
players.Class = map(str.upper, players.Class)
expdict = {'SR': '3', 'JR': '2', 'GR': '3',
'SO': '1', 'FR': '0', 'MISSING': ""}
players["exp"] = players.Class.map(expdict)
players["exp"] = pd.to_numeric(players.exp, errors='coerce')
players.loc[mask, ['Player', 'Class', 'exp']]
strtexp = players.groupby(['team_id', 'Season', 'starter'])
strtexp = strtexp['exp'].mean().unstack('starter').reset_index()
strtexp.drop('no' , inplace=True, axis=1)
strtexp.rename(columns={"yes": "strtexp"}, inplace=True)
strtexp.head()
roster_feat = pd.merge(benscr, strtexp, how='outer',
on=['team_id', 'Season'])
roster_feat.head()
# read in kaggle team id file
dpath = r'C:\Users\mworley\Dropbox\capstone\data'
teams = pd.read_csv(dpath + r'\raw\Teams.csv')
#C:\Users\mworley\Dropbox\capstone\data\raw\Teams.csv
print(len(teams))
teams.head(5)
tm_names = pd.read_csv(dpath + r'\raw\TeamSpellings.csv')
tm_names[tm_names.team_id == 1453]
'''
# get names of teams from sports reference
url = "http://www.sports-reference.com/cbb/schools/"
req = requests.get(url)
soup = BeautifulSoup(req.content, 'html.parser')
links = []
for link in soup.find_all('a'):
links.append(str(link.get('href')))
tlinks = links[31:508]
srteams = map(lambda x: x.split('/')[-2], tlinks)
srteams = pd.DataFrame(srteams)
srteams.columns = ['srname']
#srteams.to_csv(dpath + r'\interim\srnames.csv', index=False)
'''
srteams = pd.read_csv(dpath + r'\interim\srteams.csv')
print len(srteams)
print srteams.head()
# adjust kaggle mixed team names to optimally match sports reference tables
srnames = tm_names.copy()
filldash = lambda x: re.sub(r' ', '-', x)
srnames['name_spelling'] = srnames['name_spelling'].apply(filldash)
srnames.rename(columns={"name_spelling": "srname"}, inplace=True)
srnames.drop_duplicates(inplace=True)
srnames.head(6)
merge_sr = pd.merge(srnames, srteams, how='outer',
on='srname',
indicator=True)
tab(merge_sr._merge)
print float(len(merge_sr[merge_sr._merge=='both'])) / 364
match = merge_sr[merge_sr._merge == 'both'].copy()
match.drop('_merge', axis=1, inplace=True)
# get a dataframe of the mixed names limited to non-matched teams
nomatch = pd.merge(srnames, match, how='outer',
on=['team_id'], indicator=True)
nomatch = nomatch[nomatch._merge=='left_only']
teams = merge_sr.loc[merge_sr._merge == 'right_only', 'srname']
nomatch.head(len(nomatch))
# create a function to fuzzy match the nonmatched names
def match_srname(name):
new_name, score = process.extractOne(name, teams)
return new_name, score
# run function on kaggle srnames
names, scores = zip(*nomatch['srname_x'].apply(match_srname))
nomatch['name'], nomatch['score'] = names, scores
nomatch.sort_values(['team_id', 'score'], ascending=False, inplace=True)
nomatch.head(len(nomatch))
nomatch.drop_duplicates(['team_id'], inplace=True)
nomatch = nomatch[nomatch.team_id != 1445]
nomatch.drop(['srname_x', 'srname_y', '_merge', 'score'], axis=1, inplace=True)
nomatch.rename(columns={'name': 'srname'}, inplace=True)
team_match = pd.concat([match, nomatch])
len(team_match)
# import the 3 data files
tgames = pd.read_csv(dpath + r'\interim\tourney_games.csv')
gameloc = pd.read_csv(dpath + r'\raw\TourneyGeog.csv')
teamloc = pd.read_csv(dpath + r'\raw\TeamGeog.csv')
# some operations on the dataframes to enable the merge
tgames.columns = map(str.lower, tgames.columns)
gameloc.drop('daynum', axis=1, inplace=True)
# replace baton rouge longitude which was discovered to be an error
gameloc.loc[gameloc.host == 'baton_rouge', ['lng']] = -91.19
gameloc.rename(columns={'wteam': 'w_team_id', 'lteam': 'l_team_id'},
inplace=True)
tgames = pd.merge(tgames, gameloc, how='inner',
on=['season', 'w_team_id', 'l_team_id'])
tgames[['w_team_id', 'l_team_id', 'host', 'lat', 'lng']].head(5)
tgames.rename(columns={'lat': 'glat', 'lng': 'glng'}, inplace=True)
tgames = pd.merge(tgames, teamloc, how='inner',
left_on='w_team_id', right_on='team_id')
tgames.rename(columns={'lat': 'wlat', 'lng': 'wlng'}, inplace=True)
tgames = pd.merge(tgames, teamloc, how='inner',
left_on='l_team_id', right_on='team_id')
tgames.rename(columns={'lat': 'llat', 'lng': 'llng'}, inplace=True)
tgames.iloc[0:5, -9:]
tgames['gloc'] = list(zip(tgames.glat, tgames.glng))
tgames['wloc'] = list(zip(tgames.wlat, tgames.wlng))
tgames['lloc'] = list(zip(tgames.llat, tgames.llng))
tgames.iloc[0:5, -3:]
xl = []
yl = []
for i in range(len(tgames)):
x = int(great_circle(tgames['gloc'][i], tgames['wloc'][i]).miles)
y = int(great_circle(tgames['gloc'][i], tgames['lloc'][i]).miles)
xl.append(x)
yl.append(y)
tgames['w_dist'] = pd.Series(xl).values
tgames['l_dist'] = pd.Series(yl).values
tgames.ix[0:5, ['season', 'w_team_id', 'l_team_id', 'w_dist', 'l_dist']]
# read in data files
dpath = "C:\Users\mworley\Dropbox\capstone\data"
tgames = pd.read_csv(dpath + r'\raw\TourneyCompactResults.csv')
seeds = pd.read_csv(dpath + r'\raw\TourneySeeds.csv')
# add team seeds to tourney games data frame
seeds['Season/Team'] = [(seas, team) for seas,team in zip(seeds.Season, seeds.Team)]
seeds = seeds.set_index('Season/Team').drop(['Season', 'Team'],axis=1).squeeze().to_dict()
tgames['Wteam_seed'] = [seeds[(year,team)] for year,team in zip(tgames.Season,tgames.Wteam)]
tgames['Lteam_seed'] = [seeds[(year,team)] for year,team in zip(tgames.Season,tgames.Lteam)]
tgames['Wteam_seed'] = tgames['Wteam_seed'].str.replace(r'\D+', '').astype('int')
tgames['Lteam_seed'] = tgames['Lteam_seed'].str.replace(r'\D+', '').astype('int')
tgames.columns = map(str.lower, tgames.columns)
tgames['seedif'] = abs(tgames.wteam_seed - tgames.lteam_seed)
# label each matchup as a potential upset (1) or not (0)
def upset_pot(data):
if data.seedif > 3:
return 1
else:
return 0
# label each matchup as an upset (1) or not (0)
def upset_label(data):
x = data.seedif
if (data.wteam_seed > data.lteam_seed) & (x > 3):
return 1
else:
return 0
# identify potential upsets
# defined as games with seed difference greater than 3
tgames['upsetpot'] = tgames.apply(upset_pot, axis=1)
tgames['upset'] = tgames.apply(upset_label, axis=1)
tgames[['wteam_seed', 'lteam_seed', 'seedif', 'upsetpot', 'upset']].head()
tab(tgames.upsetpot)
tab(tgames.upset)
tab(tgames[tgames.upsetpot==1].upset)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fuzzy matching player names
Step2: After loading the separate sportsreference and espn files, I add some common team identifers to each dataframe (created in a separate process) to make it easier to match the players.
Step3: To match the players in the two different files, I first used a merge with an outer join and an indicator to examine the matches and non-matches after the merge.
Step4: Here I used the tab function I created earlier to inspect the proportion of names that were perfect matches and linked from this merge.
Step5: This merge left around 8% of names unmatched. I needed to separate the nonmatched names, determine why they didn't match, and figure out a way to link them together.
Step6: The same players are present in both files, but they have variations in the name due to punctuation, spelling, or use of nicknames.
Step7: To match all the names, my first strategy was to use the function out-of-the-box and simply pass it all the nonmatched names, but this didn't work well. Here's an example of why
Step8: If the function can find a name that is more syntactically similar, it will extract it and miss the correct match. For my purposes, the function might work better if it had fewer options, perhaps a more restrictive list specific to that player.
Step9: After running the function, I can inspect the matched names to assess the quality of the matches. Here are the names of players with the highest scores, all those below 65.
Step10: These are names with minor differences in spelling or punctuation that are all matched well by the function.
Step11: Below 50 look like a reasonable cutoff where pairs may not actually be the same players. Inspecting a handful of these revealed several who only existed in one file but not the other due to being on the official roster but not acquiring any playing time. So they mostly appear to be true nonmatches.
Step12: The result is a table containing each player's name in the sportsreference source and the espn source, even when different names are used.
Step13: Above I can see that 99.4% of the player data is now linked after this merge.
Step14: Here are a few sample rows of these data, taken from the 2011-2012 Kentucky Wildcats.
Step15: These data represent season-long averages for individual players in several game-play statistics tracked in basketball, such as minutes played, points, rebounds, and assists. Because my goal is to use this data to predict outcomes of NCAA tournament games, I need to transform these data into team-level features. Below are some of the obstacles in this task and the solutions I created.
Step16: Now height is numeric and ready for conversion to some team-level feature.
Step17: I'm also interested in looking at how scoring is distributed across different groupings of players, such as by starters vs bench players, or guards vs. forwards.
Step18: Now I can use the starter column to compute some interesting team-level features, such as the percentage of points on the team scored by the bench and the starters.
Step19: Another valuable piece of information in the player data is "Class" which describes the player's year in school from first-year players ("freshmen") to fourth-year players ("seniors"). Teams comprised of more experienced players may have an advantage.
Step20: Similar to the height data, class is encoded in a string format and I need to convert to numeric before using the data for calcuations. There's also some inconsistency in the labels used that needs to be cleaned up.
Step21: The steps above create a numeric experience column ('exp') that describes each player's number of years of college basketball experience.
Step22: Now I can compute some interesting team-level features, such as the average experience level for the starting players
Step23: After computing all of the features I'm interested in, I merge them together to create the processed team-level file with features computed from the roster-level data.
Step24: Matching team names
Step25: The kaggle team file has 364 teams, each with a unique numeric id code and name.
Step26: In many cases the external data I obtained used unique names that did not match these alternative spellings. For example, UC-Berkeley, California, and Cal Golden Bears were different source identifiers for the same team. To resolve this problem, I needed to create a table linking the Kaggle numeric identifier to the unique identifier in each additional data source.
Step27: The sportsreference source has 477 team names. The kaggle file is smaller (364) as it only includes teams who played in at least one NCAA tournament game since 1985. I only need to match the 364 Kaggle team names as this data will be used to predict outcomes of NCAA tournament games.
Step28: The result is a file I can use to attempt a merge with the sportsreference source team names, as the school names are formatted similarly with all lower-case characters and hyphenated word gaps.
Step29: The merge matches nearly all of the teams (361/364). Before continuing I save the matches as a dataframe.
Step30: To isolate the kaggle team ids that did not match, I re-merge the team ids with the match file, and then keep the 'left only' rows from the join. I also save the 'right-only' team names for fuzzy string matching later.
Step31: To match the remaining names, I use the series of sportsreference names in a fuzzy string matching function.
Step32: The only remaining non-match (winston-salem-state) was discovered to be an error as it has never qualified for division 1 basketball. I remove it and keep the best match for the other 2 unique team ids, and add to the teams who matched in the merge.
Step33: Computation of team travel distance
Step34: After importing the first objective is to integrate the game location with the game results.
Step35: This merge produces a file with the two teams, host city, and venue location for all tournament games.
Step36: To compute the distance for each team, I merge in the location for each team. I use two separate merges, one for the winning team and one for the losing team.
Step37: To compute the distance I need each lattitude and longitude together in a single tuple.
Step38: Then I use the great_circle function from the geopy.distance package to compute the distance between game location and team location for winning team and losing team.
Step39: Identifying and labeling upsets
Step40: Two conditions need to be met for a game to be coded as an upset. Condition 1 is that the game involves opponents with an absolute seed difference greater than 3. Condition 2 is that the team with the higher numeric seed wins the game.
Step41: 65% of NCAA tournament games qualified as having "upset potential".
Step42: Only 14% of all tournament games resulted in an upset.
|
11,754
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
)
print(dag)
model = dag.run(
dataset=dataset,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
disable_early_stopping=False,
target_column=label_column,
)
# Get model resource ID
models = aip.Model.list(filter="display_name=bank_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
! gsutil cat $IMPORT_FILE | head -n 1 > tmp.csv
! gsutil cat $IMPORT_FILE | tail -n 10 >> tmp.csv
! cut -d, -f1-16 tmp.csv > batch.csv
gcs_input_uri = BUCKET_NAME + "/test.csv"
! gsutil cp batch.csv $gcs_input_uri
batch_predict_job = model.batch_predict(
job_display_name="bank_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="csv",
generate_explanation=True,
sync=False,
)
print(batch_predict_job)
batch_predict_job.wait()
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
explanation_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("explanation"):
explanation_results.append(blob.name)
tags = list()
for explanation_result in explanation_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{explanation_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Tutorial
Step13: Quick peek at your data
Step14: Create the Dataset
Step15: Create and run training pipeline
Step16: Run the training pipeline
Step17: Review model evaluation scores
Step18: Send a batch prediction request
Step19: Make the batch explanation request
Step20: Wait for completion of batch prediction job
Step21: Get the explanations
Step22: Cleaning up
|
11,755
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import os
from os.path import join
import glob
import numpy as np
from joblib import Parallel, delayed
import sys
import json
cwd = os.getcwd()
data_path = join(cwd, '..', 'Data storage')
idx = pd.IndexSlice
file_date = '2018-03-06'
# Load the "autoreload" extension
%load_ext autoreload
# always reload modules marked with "%aimport"
%autoreload 1
# add the 'src' directory as one where we can import modules
src_dir = join(os.getcwd(), os.pardir, 'src')
sys.path.append(src_dir)
%aimport Analysis.index
from Analysis.index import facility_emission_gen, group_facility_data
%aimport Analysis.index
from Analysis.index import facility_co2, adjust_epa_emissions, group_fuel_cats
%aimport Analysis.index
from Analysis.index import extra_emissions_gen, add_datetime, add_quarter
%aimport util.utils
from util.utils import rename_cols, add_facility_location
states = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE",
"FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS",
"KY", "LA", "ME", "MD", "MA", "MI", "MN", "MS",
"MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY",
"NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
path = join(data_path, 'Final emission factors.csv')
ef = pd.read_csv(path, index_col=0)
facility_path = join(data_path, 'Derived data',
'Facility gen fuels and CO2 {}.csv'.format(file_date))
facility_df = pd.read_csv(facility_path)
facility_df['state'] = facility_df.geography.str[-2:]
rename_cols(facility_df)
epa_path = join(data_path, 'Derived data',
'Monthly EPA emissions {}.csv'.format(file_date))
epa_df = pd.read_csv(epa_path)
rename_cols(epa_df)
facility_locations = pd.read_csv(join(data_path, 'Facility labels',
'Facility locations.csv'))
# Add state labels to the EPA facilities
epa_df = add_facility_location(epa_df, facility_locations, labels=['state'])
fuel_cat_folder = join(data_path, 'Fuel categories')
state_cats_path = join(fuel_cat_folder, 'State_facility.json')
with open(state_cats_path, 'r') as f:
state_fuel_cat = json.load(f)
custom_cats_path = join(fuel_cat_folder, 'Custom_results.json')
with open(custom_cats_path, 'r') as f:
custom_fuel_cat = json.load(f)
path = join(data_path, 'Derived data',
'EIA state-level gen fuel CO2 {}.csv'.format(file_date))
eia_totals = pd.read_csv(path, parse_dates=['datetime'])
rename_cols(eia_totals)
eia_totals['state'] = eia_totals.geography.str[-2:]
# Remove fuel categories that are duplicated with other categories
eia_totals = eia_totals.loc[~eia_totals.type.isin(['SPV', 'AOR', 'TSN'])]
index_list = []
gen_list = []
for state in states:
eia_fac_state = facility_df.loc[facility_df.state == state].copy()
eia_totals_state = eia_totals.loc[eia_totals.state == state].copy()
epa_state = epa_df.loc[epa_df.state == state].copy()
co2, gen_fuels_state = facility_emission_gen(eia_facility=eia_fac_state,
epa=epa_state,
state_fuel_cat=state_fuel_cat,
custom_fuel_cat=custom_fuel_cat,
export_state_cats=True,
print_status=False)
extra_co2, extra_gen = extra_emissions_gen(gen_fuels_state,
eia_totals_state, ef)
# Combine facility and extra co2, name the series
co2_monthly = co2.groupby(['year', 'month']).sum()
total_co2 = (co2_monthly.loc[:, 'final co2 (kg)']
+ extra_co2.loc[:, 'elec fuel co2 (kg)']
.groupby(['year', 'month']).sum())
total_co2.name = 'final co2 (kg)'
# Total gen, and the co2 intensity
total_gen = (eia_totals_state
.groupby(['year', 'month'])['generation (mwh)'].sum())
state_index = pd.concat([total_co2, total_gen], axis=1)
state_index['index (g/kwh)'] = (state_index['final co2 (kg)']
/ state_index['generation (mwh)'])
state_index['state'] = state
state_index.set_index('state', append=True, inplace=True)
# Generation by fuel category
gen_category = group_fuel_cats(eia_totals_state, custom_fuel_cat,
fuel_col='type', new_col='fuel category')
keep_cols = ['fuel category', 'generation (mwh)', 'total fuel (mmbtu)',
'elec fuel (mmbtu)', 'all fuel co2 (kg)',
'elec fuel co2 (kg)', 'year', 'month']
gen_category = gen_category[keep_cols]
gen_category['state'] = state
gen_category.set_index(['year', 'month', 'state'], inplace=True)
# Add each df to the list
index_list.append(state_index)
gen_list.append(gen_category)
# Combine lists of dataframes
state_index_all = pd.concat(index_list)
add_quarter(state_index_all)
gen_category_all = pd.concat(gen_list)
add_quarter(gen_category_all)
# output state results to file
index_fn = 'Monthly index states {}.csv'.format(file_date)
gen_fn = 'Monthly generation states {}.csv'.format(file_date)
state_index_all.to_csv(join(data_path, 'final state data', index_fn))
gen_category_all.to_csv(join(data_path, 'final state data', gen_fn))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: EIA facility data and EPA monthly emissions
Step3: JSON files with fuel categories
Step4: EIA total monthly gen and fuel consumption
Step5: Calculate state-level monthly CO₂ intensity and generation by fuel category
|
11,756
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32,(None,real_dim),name='input_real')
inputs_z = tf.placeholder(tf.float32,(None,z_dim),name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator',reuse=reuse): # finish this
# Hidden layer
h1 = tf.contrib.layers.fully_connected(z,n_units,activation_fn=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.contrib.layers.fully_connected(h1,1,activation_fn=None)
out = tf.tanh(logits)
return out,logits
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator',reuse=reuse): # finish this
# Hidden layer
h1 = tf.contrib.layers.fully_connected(x,n_units,activation_fn=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1,h1)
logits = tf.contrib.layers.fully_connected(h1,1,activation_fn=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model,g_logits = generator(input_z, input_size)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=False)
# Calculate losses
d_loss_real =
d_loss_fake =
d_loss =
g_loss =
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars =
g_vars =
d_vars =
d_train_opt =
g_train_opt =
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
11,757
|
<ASSISTANT_TASK:>
Python Code:
reset_start_time(O.just)
stream = O.just({'answer': rand()})
disposable = subs(stream)
sleep(0.5)
disposable = subs(stream) # same answer
# all stream ops work, its a real stream:
disposable = subs(stream.map(lambda x: x.get('answer', 0) * 2))
print('There is a little API difference to RxJS, see Remarks:\n')
rst(O.start)
def f():
log('function called')
return rand()
stream = O.start(func=f)
d = subs(stream)
d = subs(stream)
header("Exceptions are handled correctly (an observable should never except):")
def breaking_f():
return 1 / 0
stream = O.start(func=breaking_f)
d = subs(stream)
d = subs(stream)
# startasync: only in python3 and possibly here(?) http://www.tornadoweb.org/en/stable/concurrent.html#tornado.concurrent.Future
#stream = O.start_async(f)
#d = subs(stream)
rst(O.from_iterable)
def f():
log('function called')
return rand()
# aliases: O.from_, O.from_list
# 1.: From a tuple:
stream = O.from_iterable((1,2,rand()))
d = subs(stream)
# d = subs(stream) # same result
# 2. from a generator
gen = (rand() for j in range(3))
stream = O.from_iterable(gen)
d = subs(stream)
rst(O.from_callback)
# in my words: In the on_next of the subscriber you'll have the original arguments,
# potentially objects, e.g. user original http requests.
# i.e. you could merge those with the result stream of a backend call to
# a webservice or db and send the request.response back to the user then.
def g(f, a, b):
f(a, b)
log('called f')
stream = O.from_callback(lambda a, b, f: g(f, a, b))('fu', 'bar')
d = subs(stream.delay(200))
# d = subs(stream.delay(200)) # does NOT work
rst()
# start a stream of 0, 1, 2, .. after 200 ms, with a delay of 100 ms:
stream = O.timer(200, 100).time_interval()\
.map(lambda x: 'val:%s dt:%s' % (x.value, x.interval))\
.take(3)
d = subs(stream, name='observer1')
# intermix directly with another one
d = subs(stream, name='observer2')
rst(O.repeat)
# repeat is over *values*, not function calls. Use generate or create for function calls!
subs(O.repeat({'rand': time.time()}, 3))
header('do while:')
l = []
def condition(x):
l.append(1)
return True if len(l) < 2 else False
stream = O.just(42).do_while(condition)
d = subs(stream)
rx = O.create
rst(rx)
def f(obs):
# this function is called for every observer
obs.on_next(rand())
obs.on_next(rand())
obs.on_completed()
def cleanup():
log('cleaning up...')
return cleanup
stream = O.create(f).delay(200) # the delay causes the cleanup called before the subs gets the vals
d = subs(stream)
d = subs(stream)
sleep(0.5)
rst(title='Exceptions are handled nicely')
l = []
def excepting_f(obs):
for i in range(3):
l.append(1)
obs.on_next('%s %s (observer hash: %s)' % (i, 1. / (3 - len(l)), hash(obs) ))
obs.on_completed()
stream = O.create(excepting_f)
d = subs(stream)
d = subs(stream)
rst(title='Feature or Bug?')
print('(where are the first two values?)')
l = []
def excepting_f(obs):
for i in range(3):
l.append(1)
obs.on_next('%s %s (observer hash: %s)' % (i, 1. / (3 - len(l)), hash(obs) ))
obs.on_completed()
stream = O.create(excepting_f).delay(100)
d = subs(stream)
d = subs(stream)
# I think its an (amazing) feature, preventing to process functions results of later(!) failing functions
rx = O.generate
rst(rx)
The basic form of generate takes four parameters:
the first item to emit
a function to test an item to determine whether to emit it (true) or terminate the Observable (false)
a function to generate the next item to test and emit based on the value of the previous item
a function to transform items before emitting them
def generator_based_on_previous(x): return x + 1.1
def doubler(x): return 2 * x
d = subs(rx(0, lambda x: x < 4, generator_based_on_previous, doubler))
rx = O.generate_with_relative_time
rst(rx)
stream = rx(1, lambda x: x < 4, lambda x: x + 1, lambda x: x, lambda t: 100)
d = subs(stream)
rst(O.defer)
# plural! (unique per subscription)
streams = O.defer(lambda: O.just(rand()))
d = subs(streams)
d = subs(streams) # gets other values - created by subscription!
# evaluating a condition at subscription time in order to decide which of two streams to take.
rst(O.if_then)
cond = True
def should_run():
return cond
streams = O.if_then(should_run, O.return_value(43), O.return_value(56))
d = subs(streams)
log('condition will now evaluate falsy:')
cond = False
streams = O.if_then(should_run, O.return_value(43), O.return_value(rand()))
d = subs(streams)
d = subs(streams)
rst(O.range)
d = subs(O.range(0, 3))
rst(O.interval)
d = subs(O.interval(100).time_interval()\
.map(lambda x, v: '%(interval)s %(value)s' \
% ItemGetter(x)).take(3))
rst(O.empty)
d = subs(O.empty())
rst(O.never)
d = subs(O.never())
rst(O.throw)
d = subs(O.throw(ZeroDivisionError))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ..that was returned from a function called at subscribe-time
Step2: ..that was returned from an Action, Callable, Runnable, or something of that sort, called at subscribe-time
Step3: ...after a specified delay
Step4: ...that emits a sequence of items repeatedly
Step6: ...from scratch, with custom logic and cleanup (calling a function again and again)
Step7: ...for each observer that subscribes OR according to a condition at subscription time
Step8: ...that emits a sequence of integers
Step9: ...at particular intervals of time
Step10: ...after a specified delay (see timer)
Step11: ...that does nothing at all
Step12: ...that excepts
|
11,758
|
<ASSISTANT_TASK:>
Python Code:
!git clone https://github.com/tensorflow/models
from __future__ import print_function
from IPython import display
checkpoint_name = 'mobilenet_v2_1.0_224' #@param
url = 'https://storage.googleapis.com/mobilenet_v2/checkpoints/' + checkpoint_name + '.tgz'
print('Downloading from ', url)
!wget {url}
print('Unpacking')
!tar -xvf {checkpoint_name}.tgz
checkpoint = checkpoint_name + '.ckpt'
display.clear_output()
print('Successfully downloaded checkpoint from ', url,
'. It is available as', checkpoint)
!wget https://upload.wikimedia.org/wikipedia/commons/f/fe/Giant_Panda_in_Beijing_Zoo_1.JPG -O panda.jpg
# setup path
import sys
sys.path.append('/content/models/research/slim')
import tensorflow as tf
from nets.mobilenet import mobilenet_v2
tf.reset_default_graph()
# For simplicity we just decode jpeg inside tensorflow.
# But one can provide any input obviously.
file_input = tf.placeholder(tf.string, ())
image = tf.image.decode_jpeg(tf.read_file(file_input))
images = tf.expand_dims(image, 0)
images = tf.cast(images, tf.float32) / 128. - 1
images.set_shape((None, None, None, 3))
images = tf.image.resize_images(images, (224, 224))
# Note: arg_scope is optional for inference.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope(is_training=False)):
logits, endpoints = mobilenet_v2.mobilenet(images)
# Restore using exponential moving average since it produces (1.5-2%) higher
# accuracy
ema = tf.train.ExponentialMovingAverage(0.999)
vars = ema.variables_to_restore()
saver = tf.train.Saver(vars)
from IPython import display
import pylab
from datasets import imagenet
import PIL
display.display(display.Image('panda.jpg'))
with tf.Session() as sess:
saver.restore(sess, checkpoint)
x = endpoints['Predictions'].eval(feed_dict={file_input: 'panda.jpg'})
label_map = imagenet.create_readable_names_for_imagenet_labels()
print("Top 1 prediction: ", x.argmax(),label_map[x.argmax()], x.max())
import numpy as np
img = np.array(PIL.Image.open('panda.jpg').resize((224, 224))).astype(np.float) / 128 - 1
gd = tf.GraphDef.FromString(open(checkpoint_name + '_frozen.pb', 'rb').read())
inp, predictions = tf.import_graph_def(gd, return_elements = ['input:0', 'MobilenetV2/Predictions/Reshape_1:0'])
with tf.Session(graph=inp.graph):
x = predictions.eval(feed_dict={inp: img.reshape(1, 224,224, 3)})
label_map = imagenet.create_readable_names_for_imagenet_labels()
print("Top 1 Prediction: ", x.argmax(),label_map[x.argmax()], x.max())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Checkpoint based inference
Step2: Frozen inference
|
11,759
|
<ASSISTANT_TASK:>
Python Code:
import pymrio
mrio = pymrio.load_test()
mrio.get_sectors()
mrio.get_regions()
mrio.get_Y_categories()
mrio.get_extensions()
list(mrio.get_extensions())
mrio.rename_regions({"reg1": "REGION A", "reg2": "REGION B"})
mrio.get_regions()
mrio.rename_sectors({"mining": "dwarf business"})
mrio.rename_Y_categories({"Final consumption expenditure by households": "fin_house"})
mrio.get_sectors()
mrio.get_Y_categories()
mrio_class = pymrio.get_classification(mrio_name="test")
mrio_class.sectors
conv_dict = mrio_class.get_sector_dict(mrio_class.sectors.TestMrioName, mrio_class.sectors.TestMrioCode)
conv_dict
mrio = pymrio.load_test()
mrio.rename_sectors(conv_dict)
mrio.get_sectors()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can use several functions to get a quick overview over the MRIO system
Step2: A list of available satellite accounts can be obtained by
Step3: this returns a generator over all extensions, to just get the names which can be used to loop over the extensions. To just get the names
Step4: All the names returned are literally just names and can easily be renamed.
Step5: Renaming sectors or Y categories works the same way
Step6: Some MRIOs come with a selections of names which can be used for renaming.
Step7: The classification data contains different names and aggregation levels for the sectors and final demand categories. The easiest way to explore the classification data is by using the autocomplete functionality. Depending on the editor your are using this might work with typing mrio_class.sectors. pressing Tab or Ctrl-Space. The same works for mrio_class.finaldemand.
Step8: This can be used to generate dictionaries for renaming the sectors of the MRIO, eg with
Step9: This can then be used for renaming the sectors
|
11,760
|
<ASSISTANT_TASK:>
Python Code:
def in_unit_circle(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
@numba.vectorize('int64(float64, float64)',target='cpu')
def in_unit_circle_serial(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
@numba.vectorize('int64(float64, float64)',target='parallel')
def in_unit_circle_multicore(x, y):
if x**2 + y**2 < 1:
return 1
else:
return 0
n = int(1e7)
xs, ys = np.random.random((2, n))
%%time
4 * np.sum(in_unit_circle(x, y) for x, y in zip(xs, ys))/n
%%time
4 * np.sum(in_unit_circle_serial(xs, ys))/n
%%time
4 * np.sum(in_unit_circle_multicore(xs, ys))/n
def plot_one(data, name):
xs, ys = data.T
plt.scatter(xs, ys, s=1, edgecolor=None)
plt.savefig('%s.png' % name)
return name
data = np.random.random((10, 10000, 2))
%%time
for i, M in enumerate(data):
plot_one(M, i)
%%time
args = [(x, i) for i, x in enumerate(data)]
with mp.Pool() as pool:
pool.starmap(plot_one, args)
%%time
args = [(x, i) for i, x in enumerate(data)]
with mp.Pool() as pool:
results = pool.starmap_async(plot_one, args)
%%time
Parallel(n_jobs=-1)(delayed(plot_one)(x, i) for i, x in enumerate(data))
pass
def f(x):
import time
time.sleep(np.random.randint(0, 5))
return x
%%time
with mp.Pool(processes=4) as pool:
result = pool.map(f, range(10))
result
%%time
pool = mp.Pool(processes=4)
result = pool.map_async(f, range(10))
if result.ready() and result.successful():
print(result.get())
else:
print(result.wait())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multi-core processing
Step2: Single core
Step3: Threads
Step4: Parallel comprehensions with joblib
Step5: Blocking and non-blocking calls
|
11,761
|
<ASSISTANT_TASK:>
Python Code:
# Basemap Mosaic (v1 API)
mosaicsSeries = 'global_quarterly_2017q1_mosaic'
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'
# Planet tile server url
mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys["PLANET_API_KEY"]
# Map Settings
# Define colors
colors = {'blue': "#009da5"}
# Define initial map center lat/long
center = [45.5231, -122.6765]
# Define initial map zoom level
zoom = 11
# Set Map Tiles URL
planetMapTiles = TileLayer(url= mosaicsTilesURL)
# Create the map
m = Map(
center=center,
zoom=zoom,
default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
# Define the draw tool type options
polygon = {'shapeOptions': {'color': colors['blue']}}
rectangle = {'shapeOptions': {'color': colors['blue']}}
# Create the draw controls
# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293
dc = DrawControl(
polygon = polygon,
rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}
# Register the draw controls handler
def handle_draw(self, action, geo_json):
# Increment the action counter
global actionCount
actionCount += 1
# Remove the `style` property from the GeoJSON
geo_json['properties'] = {}
# Convert geo_json output to a string and prettify (indent & replace ' with ")
geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
AOIs[actionCount] = json.loads(geojsonStr)
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m
areaAOI = AOIs[1]["geometry"]
pathAOI = AOIs[2]["geometry"]
aoi_file ="portland.geojson"
with open(aoi_file,"w") as f:
f.write(json.dumps(areaAOI))
# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes
old = datetime.datetime(year=2017,month=1,day=1)
query = filters.and_filter(
filters.geom_filter(areaAOI),
filters.range_filter('cloud_cover', lt=5),
filters.date_range('acquired', gt=old)
)
# build a request for only PlanetScope imagery
request = filters.build_search_request(
query, item_types=['PSScene3Band']
)
# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
scenes = []
planet_map = {}
for item in result.items_iter(limit=500):
planet_map[item['id']]=item
props = item['properties']
props["id"] = item['id']
props["geometry"] = item["geometry"]
props["thumbnail"] = item["_links"]["thumbnail"]
scenes.append(props)
scenes = pd.DataFrame(data=scenes)
display(scenes)
print len(scenes)
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(areaAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
s = shape(footprint)
footprints.append(s)
overlap = 100.0*(portland.intersection(s).area / portland.area)
overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
scenes.head()
# Now let's get it down to just good, recent, clear scenes
clear = scenes['cloud_cover']<0.4
good = scenes['quality_category']=="standard"
recent = scenes["acquired"] > datetime.date(year=2017,month=1,day=1)
partial_coverage = scenes["overlap"] > 10
good_scenes = scenes[(good&clear&recent&partial_coverage)]
print good_scenes
# first create a list of colors
colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff","#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"]
# grab our scenes from the geometry/footprint geojson
# Chane this number as needed
footprints = good_scenes[0:10]["geometry"].tolist()
# for each footprint/color combo
for footprint,color in zip(footprints,colors):
# create the leaflet object
feat = {'geometry':footprint,"properties":{
'style':{'color': color,'fillColor': color,'fillOpacity': 0.2,'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
# now we will draw our original AOI on top
feat = {'geometry':areaAOI,"properties":{
'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.5,'weight': 1}},
'type':u"Feature"}
gjson = GeoJSON(data=feat)
m.add_layer(gjson)
m
def get_products(client, scene_id, asset_type='PSScene3Band'):
Ask the client to return the available products for a
given scene and asset type. Returns a list of product
strings
out = client.get_assets_by_id(asset_type,scene_id)
temp = out.get()
return temp.keys()
def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"):
Activate a product given a scene, an asset type, and a product.
On success return the return value of the API call and an activation object
temp = client.get_assets_by_id(asset_type,scene_id)
products = temp.get()
if( product in products.keys() ):
return client.activate(products[product]),products[product]
else:
return None
def download_and_save(client,product):
Given a client and a product activation object download the asset.
This will save the tiff file in the local directory and return its
file name.
out = client.download(product)
fp = out.get_body()
fp.write()
return fp.name
def scenes_are_active(scene_list):
Check if all of the resources in a given list of
scene activation objects is read for downloading.
return True
retVal = True
for scene in scene_list:
if scene["status"] != "active":
print "{} is not ready.".format(scene)
return False
return True
def load_image4(filename):
Return a 4D (r, g, b, nir) numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b, g, r, nir = src.read()
return np.dstack([r, g, b, nir])
def load_image3(filename):
Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([b, g, r])
def get_mask(filename):
Return a 1D mask numpy array with the data in the specified TIFF filename.
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([mask])
def rgbir_to_rgb(img_4band):
Convert an RGBIR image to RGB
return img_4band[:,:,:3]
to_get = good_scenes["id"][0:10].tolist()
to_get = sorted(to_get)
activated = []
# for each scene to get
for scene in to_get:
# get the product
product_types = get_products(client,scene)
for p in product_types:
# if there is a visual productfor p in labels:
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
# activate the product
_,product = activate_product(client,scene,product=p)
activated.append(product)
tiff_files = []
asset_type = "_3B_Visual"
# check if our scenes have been activated
if scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
# create the product name
name = name + asset_type + ".tif"
# if the product exists locally
if( os.path.isfile(name) ):
# do nothing
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
# otherwise download the product
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
print tiff_files
subprocess.call(["rm","portland_mosaic.tif"])
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",aoi_file,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
output_mosaic = "portland_mosaic.tif"
for tiff in tiff_files:
commands.append(tiff)
commands.append(output_mosaic)
print " ".join(commands)
subprocess.call(commands)
merged = load_image3(output_mosaic)
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")
def crop_to_area(scene,x_c,y_c,w,h):
tlx = x_c-(w/2)
tly = y_c-(h/2)
brx = x_c+(w/2)
bry = y_c+(h/2)
return scene[tly:bry,tlx:brx,:]
plt.figure(0,figsize=(3,4))
plt.imshow(crop_to_area(merged,3000,3000,640,480))
plt.title("merged")
# Liberally borrowed from this example
# https://gis.stackexchange.com/questions/57834/how-to-get-raster-corner-coordinates-using-python-gdal-bindings
def GetExtent(gt,cols,rows):
Get the list of corners in our output image in the format
[[x,y],[x,y],[x,y]]
ext=[]
# for the corners of the image
xarr=[0,cols]
yarr=[0,rows]
for px in xarr:
for py in yarr:
# apply the geo coordiante transform
# using the affine transform we got from GDAL
x=gt[0]+(px*gt[1])+(py*gt[2])
y=gt[3]+(px*gt[4])+(py*gt[5])
ext.append([x,y])
yarr.reverse()
return ext
def ReprojectCoords(coords,src_srs,tgt_srs):
trans_coords=[]
# create a transform object from the source and target ref system
transform = osr.CoordinateTransformation( src_srs, tgt_srs)
for x,y in coords:
# transform the points
x,y,z = transform.TransformPoint(x,y)
# add it to the list.
trans_coords.append([x,y])
return trans_coords
# TLDR: pixels => UTM coordiantes => Lat Long
raster=output_mosaic
# Load the GDAL File
ds=gdal.Open(raster)
# get the geotransform
gt=ds.GetGeoTransform()
# get the width and height of our image
cols = ds.RasterXSize
rows = ds.RasterYSize
# Generate the coordinates of our image in utm
ext=GetExtent(gt,cols,rows)
# get the spatial referencec object
src_srs=osr.SpatialReference()
# get the data that will allow us to move from UTM to Lat Lon.
src_srs.ImportFromWkt(ds.GetProjection())
tgt_srs = src_srs.CloneGeogCS()
extents = ReprojectCoords(ext,src_srs,tgt_srs)
print extents
def poor_mans_lat_lon_2_pix(lon,lat,w,h,extents):
# split up our lat and longs
lats = [e[1] for e in extents]
lons = [e[0] for e in extents]
# calculate our scene extents max and min
lat_max = np.max(lats)
lat_min = np.min(lats)
lon_max = np.max(lons)
lon_min = np.min(lons)
# calculate the difference between our start point
# and our minimum
lat_diff = lat-lat_min
lon_diff = lon-lon_min
# create the linearization
lat_r = float(h)/(lat_max-lat_min)
lon_r = float(w)/(lon_max-lon_min)
# generate the results.
return int(lat_r*lat_diff),int(lon_r*lon_diff)
def draw_point(x,y,img,t=40):
h,w,d = img.shape
y = h-y
img[(y-t):(y+t),(x-t):(x+t),:] = [255,0,0]
h,w,c = merged.shape
waypoints = [poor_mans_lat_lon_2_pix(point[0],point[1],w,h,extents) for point in pathAOI["coordinates"]]
print waypoints
merged = load_image3(output_mosaic)
[draw_point(pt[1],pt[0],merged) for pt in waypoints]
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")
def interpolate_waypoints(waypoints,velocity=10.0):
retVal = []
last_pt = waypoints[0]
# for each point in our waypoints except the first
for next_pt in waypoints[1:]:
# calculate distance between the points
distance = np.sqrt((last_pt[0]-next_pt[0])**2+(last_pt[1]-next_pt[1])**2)
# use our velocity to calculate the number steps.
steps = np.ceil(distance/velocity)
# linearly space points between the two points on our line
xs = np.array(np.linspace(last_pt[0],next_pt[0],steps),dtype='int64')
ys = np.array(np.linspace(last_pt[1],next_pt[1],steps),dtype='int64')
# zip the points together
retVal += zip(xs,ys)
# move to the next point
last_pt = next_pt
return retVal
def build_scenes(src,waypoints,window=[640,480],path="./movie/"):
count = 0
# Use opencv to change the color space of our image.
src = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)
# define half our sampling window.
w2 = window[0]/2
h2 = window[1]/2
# for our source image get the width and height
h,w,d = src.shape
for pt in waypoints:
# for each point crop the area out.
# the y value of our scene is upside down.
temp = crop_to_area(src,pt[1],h-pt[0],window[0],window[1])
# If we happen to hit the border of the scene, just skip
if temp.shape[0]*temp.shape[1]== 0:
# if we have an issue, just keep plugging along
continue
# Resample the image a bit, this just makes things look nice.
temp = cv2.resize(temp, (int(window[0]*0.75), int(window[1]*.75)))
# create a file name
fname = os.path.abspath(path+"img{num:06d}.png".format(num=count))
# Save it
cv2.imwrite(fname,temp)
count += 1
# load the image
merged = load_image3(output_mosaic)
# interpolate the waypoints
interp = interpolate_waypoints(waypoints)
# draw them on our scene
[draw_point(pt[1],pt[0],merged) for pt in interp]
# display the scene
plt.figure(0,figsize=(18,18))
plt.imshow(merged)
plt.title("merged")
os.system("rm ./movie/*.png")
merged = load_image3(output_mosaic)
build_scenes(merged,interp)
# avconv -framerate 30 -f image2 -i ./movie/img%06d.png -b 65536k out.mpg
os.system("rm ./movie/*.png")
framerate = 30
output = "out.mpg"
command = ["avconv","-framerate", str(framerate), "-f", "image2", "-i", "./movie/img%06d.png", "-b", "65536k", output]
os.system(" ".join(command))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Query the API
Step2: Just like before we clean up our data and distill it down to just the scenes we want.
Step3: To make sure we are good we'll visually inspect the scenes in our slippy map.
Step12: This is from the previous notebook. We are just activating and downloading scenes.
Step13: Perform the actual activation ... go get coffee
Step14: Downloand the scenes
Step15: Now, just like before, we will mosaic those scenes.
Step16: Let's take a look at what we got
Step17: Now we are going to write a quick crop function.
Step19: Now to figure out how our lat/long values map to pixels.
Step20: Here we'll call the functions we wrote.
Step21: Now we'll do a bit of hack.
Step22: Let's check our work
Step23: Now things get interesting....
Step24: Before we generate our video frames, let's check our work
Step25: Now let's re-load the image and run the scene maker.
Step26: Finally, let's make a movie.
|
11,762
|
<ASSISTANT_TASK:>
Python Code:
#If you haven't already, make sure you install the `dfcx-scrapi` library
!pip install dfcx-scrapi
from dfcx_scrapi.core.intents import Intents
from dfcx_scrapi.tools.dataframe_functions import DataframeFunctions
creds_path = '<YOUR_CREDS_PATH_HERE>'
agent_id = '<YOUR_AGENT_ID_HERE>'
google_sheet_name = 'My Google Sheet Name'
google_sheet_tab_write = 'Write To My Tab Name'
# Instanstiate Classes
intents = Intents(creds_path)
dffx = DataframeFunctions(creds_path)
# Transform Intents into a DataFrame
df = intents.bulk_intent_to_df(agent_id)
# Push DataFrame to Google Sheets
dffx.dataframe_to_sheets(google_sheet_name, google_sheet_tab_write, df)
print('Total # of Intents = {}'.format(df.intent.nunique()))
print('Total # of Training Phrases = {}'.format(df.tp.nunique()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: User Inputs
Step3: CX to Sheets
|
11,763
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# Define your function
def softmax(x):
# This is where you write your code!
vector = "This is only psudo code.\nYou will have to write this function yourself!"
return vector # Replace this with the new array
# Test it out on an array
test=[1,3,2]
print(softmax(test))
# The result should be [0.09003057 0.66524096 0.24472847]
import numpy as np
# Define your function
def softmax(x):
# This is where you write your code!
vector = "This is only psudo code.\nYou will have to write this function yourself!"
return vector # Replace this with the new array
# Test it out on an array
test=np.ones((3,4))
test[0,:]=2.
print(test)
print(softmax(test))
# The result should be [[ 2. 2. 2. 2.]
# [ 1. 1. 1. 1.]
# [ 1. 1. 1. 1.]]
# [[ 0.57611688 0.57611688 0.57611688 0.57611688]
# [ 0.21194156 0.21194156 0.21194156 0.21194156]
# [ 0.21194156 0.21194156 0.21194156 0.21194156]]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 5
|
11,764
|
<ASSISTANT_TASK:>
Python Code:
import networkx as nx
import matplotlib.pyplot as plt
%matplotlib inline
G = nx.Graph() # create an empty graph
G.add_node('Luke') # add one node
G.add_nodes_from(['Leia', 'Han']) # add multiple nodes
G.add_edge('Luke', 'Leia') # add one edge
G.add_edges_from([('Luke', 'Han'), ('Leia', 'Han'), ('Leia', 'Lando')]) # add multiple edges
G.remove_edge('Leia', 'Lando') # remove one edge
G.remove_node('Lando') # remove one node
nx.draw(G, with_labels=True, node_size=3000) # draw network
G = nx.DiGraph() # create an empty directed graph
G.add_edges_from([('Luke', 'Han'), ('Luke', 'Leia'), ('Leia', 'Han')])
nx.draw(G, with_labels=True, node_size=3000)
G['Luke']
G['Luke']['Leia']
G.node['Luke']['Force'] = 1
G.node['Leia']['Force'] = 1
G.node['Han']['Force'] = 0
G.nodes(data=True)
G['Luke']['Han']['weight'] = 10
G['Luke']['Leia']['weight'] = 10
G['Leia']['Han']['weight'] = 10
G.edges(data=True)
G = nx.Graph()
G.add_edges_from([('Luke', 'Han'), ('Luke', 'Leia'), ('Leia', 'Han')])
G.add_node('Lando')
list(nx.connected_components(G)) # generate a sorted list of connected components, largest first
nx.draw(G, with_labels=True, node_size=3000)
G = nx.Graph()
e = [('A', 'B'), ('B', 'C'), ('A', 'C'), ('C', 'D')]
G.add_edges_from(e)
print(nx.dijkstra_path(G, 'A', 'D'))
nx.draw(G, with_labels=True)
G = nx.complete_graph(10) # return a graph where all nodes are connected to all edges
fig, ax = plt.subplots(1, 3, figsize=(15,4))
nx.draw_random(G, ax=ax[0])
ax[0].set_title('random')
nx.draw_circular(G, ax=ax[1])
ax[1].set_title('circular')
nx.draw_spectral(G, ax=ax[2])
ax[2].set_title('spectral');
r = 2 # branching factor of the tree
h = 4 # height of the tree
btree = nx.balanced_tree(r,h) # return the perfectly balanced r-tree of height h
pos_neato = nx.nx_pydot.graphviz_layout(btree, prog='neato') # set initial positions for nodes using neato layout
pos_dot = nx.nx_pydot.graphviz_layout(btree, prog='dot') # set initial positions for nodes using dot layout
fig, ax = plt.subplots(1, 2, figsize=(15,4))
nx.draw(btree, pos_neato, ax=ax[0])
ax[0].set_title('neato')
nx.draw(btree, pos_dot, ax=ax[1])
ax[1].set_title('dot');
import os
import json
import numpy as np
import collections
# Choose chemical mechanism
model_name = 'MCM_C1C5' # or 'CRI_C1C5
# Load reaction descriptions
eqs_json_path = '../data/'
with open(os.path.join(eqs_json_path, model_name+'.json'), 'r') as f:
eqs = json.load(f)
eqs[0:3]
# Convert equations's info to nodes, edges and edge labels
nodes = []
edges = []
edge_labels = collections.OrderedDict()
major_reactants = ['CL', 'H2', 'HO2', 'NO', 'NO2', 'NO3', 'OH', 'SO2', 'SO3']
for eq in eqs:
if len(eq['reac']) == 1:
# photolysis
if 'J' in eq['coef'] and len(eq['prod']) == 1:
edge = (eq['reac'][0], eq['prod'][0])
edges.append(edge)
edge_labels[edge] = 'hv'
elif 'J' in eq['coef'] and len(eq['prod']) != 1:
for prod in eq['prod']:
edge = (eq['reac'][0], prod)
edges.append(edge)
edge_labels[edge] = 'hv'
# thermal decomposition
elif 'J' not in eq['coef'] and len(eq['prod']) == 1:
edge = (eq['reac'][0], eq['prod'][0])
edges.append(edge)
edge_labels[edge] = ''
elif 'J' not in eq['coef'] and len(eq['prod']) != 1:
for prod in eq['prod']:
edge = (eq['reac'][0], prod)
edges.append(edge)
edge_labels[edge] = ''
# bimolecular
else: # len(eq['reac']) == 2:
reac1, reac2 = eq['reac']
if reac1 in major_reactants and reac2 not in major_reactants:
reac1, reac2 = reac2, reac1 # for consistency in edge labels (if possible, always use a major reactant as an edge label)
if len(eq['prod']) == 1:
for reac in eq['reac']:
edge = (reac, eq['prod'][0])
edges.append(edge)
edge_labels[edge] = reac2
elif len(eq['prod']) != 1:
for reac in eq['reac']:
for prod in eq['prod']:
edge = (reac, prod)
edges.append(edge)
edge_labels[edge] = reac2
# make sure that all reactants and products have a node
for reac in eq['reac']:
if reac not in nodes:
nodes.append(reac)
for prod in eq['prod']:
if prod not in nodes:
nodes.append(prod)
# Create network layout
scheme = nx.MultiDiGraph()
scheme.add_edges_from(edges)
scheme.add_nodes_from(nodes)
pos = nx.nx_pydot.graphviz_layout(scheme)
# Choose x, y limits for the graph
if model_name.split('_')[0] == 'MCM':
pass
if model_name.split('_')[0] == 'CRI':
x1, x2 = -400, 400
y1, y2 = -400, 400
# Draw network
fig, ax = plt.subplots(figsize=(15,15))
nx.draw_networkx_nodes(scheme, pos, ax=ax, node_color='#669999')
nx.draw_networkx_edges(scheme, pos, ax=ax, edge_color='k', width=0.1, arrows=True)
# Add node and edge labels, but it makes the graph too busy
# nx.draw_networkx_labels(scheme, pos, {k: k for k in nodes}, ax=ax, font_size=9)
# nx.draw_networkx_edge_labels(scheme, pos, edge_labels=edge_labels)
_ = ax.axis('off')
ax.collections[0].set_edgecolor('k') # change color of the nodes's outline
if model_name.split('_')[0] == 'CRI':
ax.set_xlim(x1, x2)
ax.set_ylim(y1, y2)
d = nx.degree_centrality(scheme) # calculate degree of centrality
{k: d[k] for k in sorted(d.keys())[:10]} # show the first 10 entries of a dictionary with degrees of centrality
max([val for key, val in d.items()]) # max degree of centrality in the whole network
# Sort a dictionary with degrees of centrality by value and print the first 11 key, value pairs
sorted_degree_centrality = [(k, d[k]) for k in sorted(d, key=d.get, reverse=True)]
for n, i in enumerate(sorted_degree_centrality[0:11]):
print(n+1, i)
list(scheme.predecessors('O3'))
list(scheme.successors('O3'))
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are two major components in a network - nodes (or vertices) and edges connecting nodes. They are not specified as networkx objects, leaving you to use something meaningful as nodes and edges. Nodes can be anything
Step2: Depending on the nature of your task, you may decide to use different types of graphs. There are
Step3: Data structure
Step4: To add an attribute to nodes or edges
Step5: Analysing network
Step6: Or find the shortest path between point A and point Dusing Dijkstra’s algorithm
Step7: Drawing network
Step8: dot draws hierarchical layouts of directed graphs while neato creates spring model layouts (http
Step9: Task 1
Step10: So, unsurprisingly, the hydroxyl radical (OH) has the maximum number of interactions in the network (actually in both, the MCM and the CRI). Ozone is on 11th place in the MCM, but only 40th in the CRI.
Step11: Task 3. Find species that are produced directly in ozone destroying reactions.
Step12: We were able to find the last two answers only because at the beginning we used MultiDiGraph(), which is a directed graph.
|
11,765
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import logging
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from ptm import AuthorTopicModel
from ptm.utils import convert_cnt_to_list, get_top_words
logger = logging.getLogger('AuthorTopicModel')
logger.propagate=False
%matplotlib inline
doc_ids = pickle.load(open('../data/cora/doc_ids.pkl', 'rb'))
doc_cnt = pickle.load(open('../data/cora/doc_cnt.pkl', 'rb'))
doc_author = pickle.load(open('../data/cora/doc_authorid.pkl', 'rb'))
author_name = pickle.load(open('../data/cora/authorid_authorname.pkl', 'rb'))
voca = pickle.load(open('../data/cora/voca.pkl', 'rb'))
corpus = convert_cnt_to_list(doc_ids, doc_cnt)
n_doc = len(corpus)
n_topic = 10
n_author = len(author_name)
n_voca = len(voca)
max_iter = 50
model = AuthorTopicModel(n_doc, n_voca, n_topic, n_author)
model.fit(corpus, doc_author, max_iter=max_iter)
for k in range(n_topic):
top_words = get_top_words(model.TW, voca, k, 10)
print('topic ', k , ','.join(top_words))
author_id = 7
fig = plt.figure(figsize=(12,6))
plt.bar(range(n_topic), model.AT[author_id]/np.sum(model.AT[author_id]))
plt.title(author_name[author_id])
plt.xticks(np.arange(n_topic)+0.5, ['\n'.join(get_top_words(model.TW, voca, k, 10)) for k in range(n_topic)])
plt.show()
author_id = 32
fig = plt.figure(figsize=(12,6))
plt.bar(range(n_topic), model.AT[author_id]/np.sum(model.AT[author_id]))
plt.title(author_name[author_id])
plt.xticks(np.arange(n_topic)+0.5, ['\n'.join(get_top_words(model.TW, voca, k, 10)) for k in range(n_topic)])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load CORA dataset
Step2: Fit author-topic model
Step3: Print top 10 words for each topic
Step4: Plot topic distribution of random author
|
11,766
|
<ASSISTANT_TASK:>
Python Code:
# show plots in this notebook
%matplotlib inline
import os
# import corpkit
from corpkit import interrogator, editor, plotter, conc
# some wordlists we'll use later
from dictionaries.process_types import processes
from dictionaries.wordlists import wordlists
from dictionaries.roles import roles
# for editing/combining results:
import pandas as pd
pd.options.display.max_rows = 30
pd.options.display.max_columns = 6
nyt = 'data/NYT-parsed'
wsj = 'data/WSJ-parsed'
wap = 'data/WAP-parsed'
cht = 'data/CHT-parsed'
ust = 'data/UST-parsed'
tbt = 'data/TBT-parsed'
all_corpora = [nyt, wsj, wap, cht, ust, tbt]
from corpkit import load_result
allwords = load_result('6_allwords_newest')
riskwords = load_result('6_riskwords_newest')
riskclasses = load_result('6_riskclasses_newest')
risktags = load_result('6_risktags_newest')
n_risk = load_result('6_n_risk')
risk_of = load_result('6_risk_of')
riskword = r'(?i)\brisk'
# noun in NP in PP headed by of in NP headed by nominal risk
risk_of_query = r'/NN.?/ >># (NP > (PP <<# /(?i)of/ > (NP <<# (/NN.?/ < /(?i).?\brisk.?/))))'
risk_of = interrogator(corpora, 'words', risk_of_query, lemmatise = True,
num_proc = 3, quicksave = '6_risk_of')
risk_of['CHT'].results[:10]
n_risk_query = r'/NN.?/ !># NP > (NP <<# (/NN.?/ < /(?i).?\brisk.?/))'
n_risk = interrogator(corpora, 'words', n_risk_query, lemmatise = True,
num_proc = 3, quicksave = '6_n_risk')
n_risk['WAP'].results[:10]
def collapsed(interrogation):
collapse distinction between newspapers
import pandas as pd
interroformat = False
if type(interrogation.values()[0]) == pd.core.frame.DataFrame:
dat = interrogation.values()[0]
else:
dat = interrogation.values()[0].results
interroformat = True
order = list(dat.columns)
for i in interrogation.values()[1:]:
if interroformat:
dat = dat.add(i.results, fill_value = 0)
else:
dat = dat.add(i, fill_value = 0)
return dat[order]
def entry_across_corpora(result_dict, entry_name, regex = False):
get a single entry as a dataframe
import pandas as pd
import re
res = []
# for each corpus name and data
for k, v in sorted(result_dict.items()):
# grab the process result for each paper
if not regex:
try:
column = v.results[entry_name]
except:
continue
else:
column = v.results[[c for c in list(v.results.columns) if re.search(entry_name, c)]].iloc[:,0]
# rename it to the corpus name
column.name = k
# append to a list
res.append(column)
# concatenate and return
return pd.concat(res, axis = 1)
def get_totals(interrogation):
helper function: get totals from dict of interrogations
lst = []
# for each interrogation name and data
for k, v in interrogation.items():
# get the totals
tot = v.totals
# name the totals with the newspaper
tot.name = k.upper()
# add to a list
lst.append(tot)
# turn the list into a dataframe
return pd.concat(lst, axis = 1)
rel_coll = editor(collapsed(risk_of), '%', 'self', print_info = False)
inc_rel_coll = editor(collapsed(risk_of), '%', 'self', sort_by = 'increase', print_info = False)
dec_rel_coll = editor(collapsed(risk_of), '%', 'self', sort_by = 'decrease', print_info = False)
plotter('Risk of \emph{noun}', rel_coll.results)
plotter('Risk of \emph{noun}, increasing', inc_rel_coll.results)
plotter('Risk of \emph{noun}, decreasing', dec_rel_coll.results)
attack = entry_across_corpora(risk_of, 'attack')
attack[:10]
rel_attack = editor(attack, '%', get_totals(risk_of), print_info = False)
plotter('Risk of attack', rel_attack.results, subplots = True, layout = (2,3), figsize = (9,6), save = 'rattack')
r_of_attack = r'NP <<# (/NN.?/ < /(?i)\battack/) > (PP <<# /(?i)of/ > (NP <<# (/NN.?/ < /(?i).?\brisk.?/)))'
lines = conc(os.path.join(wap, '2004'), 't', r_of_attack, print_output = False, random = True)
lines[['l', 'm', 'r']]
rel_coll = editor(collapsed(n_risk), '%', 'self', print_info = False)
plotter('\emph{Noun} risk', rel_coll.results)
riskedthing = ['life', 'everything', 'money', 'career', 'health', 'lot',
'reputation', 'capital', 'future', 'job', 'safety', 'credit', 'currency',
'possibility', 'anything', 'return', 'neck', 'nothing']
riskthings = {}
for name, data in risk_of.items():
df = data.results.add(n_risk[name].results, fill_value = 0)
df.ix['total'] = df.sum()
df = df.T.sort('total', ascending = False).T
df = df.drop('total', axis = 0)
riskthings[name] = df
#collapsed(riskthings)
rel_coll = editor(collapsed(riskthings), '%', 'self')
plotter('Risk of \emph{noun}\slash \emph{noun} risk', rel_coll.results)
cats = {'Health': ['cancer', 'health', 'attack', 'disease', 'death', 'infection', 'heart', 'stroke', 'injury',
'smoking', 'clot', 'complication', 'heart-attack', 'diabetes', 'fracture', 'drug', 'bleeding',
'suicide', 'contagion', 'mortality', 'illness', 'aid', 'breast', 'miscarriage', 'dementia',
'radiation', 'surgery', 'life', 'depression', 'osteoporosis', 'pregnancy', 'breast-cancer',
'birth', 'disorder', 'leukemia'],
'Finance': ['credit', 'inflation', 'default', 'market', 'currency', 'investment', 'interest-rate', 'recession',
'business', 'deflation', 'price', 'return', 'litigation', 'rate', 'cost', 'loan', 'counterparty',
'trading', 'portfolio', 'stock', 'liability', 'prepayment', 'fund', 'bond', 'growth', 'asset',
'decline', 'liquidity', 'lending', 'volatility', 'bank', 'fraud', 'insurance', 'mortgage', 'rollover',
'company', 'accident', 'debt', 'interest', 'bankruptcy', 'slowdown', 'expense',
'collapse', 'foreign-exchange', 'deal', 'downturn', 'transmission',
'foreclosure', 'exchange-rate', 'exchange', 'bubble', 'derivative',
'stock-market', 'devaluation', 'capital', 'pressure', 'downgrade',
'fluctuation', 'investor', 'crash', 'taxpayer', 'economy', 'investing', 'dollar',
'tax', 'national-security', 'equity', 'trade', 'transaction', 'money', 'career',
'executive', 'industry', 'run', 'insolvency', 'underwriting', 'claim', 'settlement']}
uncategorised = [u'downside', 'security', 'loss', 'benefit', 'safety', 'failure', 'problem', 'reward', 'event',
'war', 'flight', 'damage', 'effect', 'course', 'exposure', 'fire', 'country', 'policy', 'control',
'execution', 'u.s', 'increase', 'crisis', 'terrorism', 'defect', 'harm', 'uncertainty',
'lifetime', 'strategy', 'product', 'change', 'recurrence', 'conflict', 'lawsuit', 'contamination',
'government', 'development', 'disruption', 'move', 'opportunity', 'action', 'violence', 'error',
'mr', 'theft', 'use', 'type', 'inventory', 'treatment', 'explosion', 'proliferation', 'headline',
'behavior', 'project', 'material', 'risk', 'china', 'delay', 'weakness', 'abuse', 'earthquake',
'backlash', 'flooding', 'correction', 'approach', 'integration', 'strike', 'reaction', 'collision',
'spill', 'program', 'casualty', 'shock', 'system', 'hurricane', 'flood', 'challenge', 'instability',
'longevity', 'activity', 'catastrophe', 'overheating', 'issue', 'disaster', 'cut', 'property',
'meltdown']
#get_totals(risk_of)
comp_n_risk = editor(n_risk, merge_entries = cats, just_entries = cats.keys())
rel_tot = editor(collapsed(comp_risk_of), '%', get_totals(risk_of),
use_df2_totals = True, sort_by = 'total')
plotter('Health and financial risks', rel_tot.results, style = 'seaborn-notebook')
rel_comp_risk_of = editor(risk_of, '%', risk_of, use_df2_totals = True, \
merge_entries = cats, just_entries = cats.keys(), print_info = False)
for name, data in rel_comp_risk_of.items():
plotter('Health and financial risk in the %s' % name, data.results,
style = 'seaborn-notebook', figsize = (6, 3), legend_pos = 'upper right')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's begin.
Step5: As in our last investigation, we can define a few helper functions to collapse distinctions betwee newspapers, years and entries
Step6: With these interrogations and functions, we have everything we need to do some visualisation.
Step7: What jumps out here, of course, is the risk of attack, which rises quickly in popularity at the turn of the millenium. Let's look at this result across newspapers
Step8: ... but, it's not what you think. While it's perhaps only natural that we assume that this construction occurs in the context of terrorism, closer inspection shows us something different
Step9: Risk patterns with health topics much more than we seem to assume it does. In fact, if we do two searches, for risk of heart attack and risk of terror attack, we can measure the difference
Step10: The important thing to note is that these two categories have overlap, but are not grammatically interchangable. You can say "the risk of cancer" or "the cancer risk", but you can't say "the risk of health".
Step11: Let's combine the two results (knowing full well that they aren't perfect equivalents)
Step12: We can define some fairly unambiguous wordlists, too
Step13: Breaking this down by publication, we can expect to see more financially oriented newspapers focussing more on financial risks
|
11,767
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy.history
import numpy as np
%matplotlib inline
import pynoddy.output
reload(pynoddy.output)
output_name = "feature_out"
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x',
colorbar = True, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!
f_set1 = open("../../sandbox/jack/features_lowres-5 with class ID.csv").readlines()
f_set1[0]
# initialise classification results array
cf1 = np.empty_like(nout.block)
# iterate through results and append
for f in f_set1[1:]:
fl = f.rstrip().split(",")
cf1[int(fl[0]),int(fl[1]),int(fl[2])] = int(fl[6])
f_set1[2:6]
nout.plot_section('x', data = cf1,
colorbar = True, title="", layer_labels = range(5),
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd')
# compare to original model:
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
nout.plot_section('x', ax = ax1,
colorbar = False, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!
nout.plot_section('x', data = cf1,ax = ax2,
colorbar = False, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd')
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im1 = ax1.imshow(nout.block[15,:,:].transpose(),
interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')
plt.colorbar(im1)
im2 = ax2.imshow(cf1[15,:,:].transpose(),
interpolation = 'none',
cmap = 'YlOrRd', origin = 'lower left')
print np.unique(nout.block)
print np.unique(cf1)
# define id mapping from cluster results to original:
# id_mapping = {2:1, 3:2, 4:5, 5:3, 1:4}
# remapping for result 4:
# id_mapping = {4:5, 3:4, 1:3, 5:2, 2:1}
# remapping for result 5:
id_mapping = {2:5, 1:4, 3:3, 5:2, 4:1}
def re_map(id_val):
return id_mapping[id_val]
re_map_vect = np.vectorize(re_map)
cf1_remap = re_map_vect(cf1)
# compare to original model:
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
nout.plot_section('x', ax = ax1,
colorbar = False, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd') # note: YlOrRd colourmap should be suitable for colorblindness!
nout.plot_section('x', data = cf1_remap, ax = ax2,
colorbar = False, title="",
savefig = False, fig_filename = "ex01_faults_combined.eps",
cmap = 'YlOrRd')
feature_diff = (nout.block != cf1_remap)
nout.plot_section('x', data = feature_diff,
colorbar = False, title="Difference between real and matched model",
cmap = 'YlOrRd')
# Calculate the misclassification:
np.sum(feature_diff) / float(nout.n_total)
# Export misclassification to VTK:
misclass = feature_diff.astype('int')
nout.export_to_vtk(vtk_filename = "misclass", data=misclass)
def calc_misclassification(nout, filename):
Calculate misclassification for classification results data stored in file
**Arguments**:
- *nout* = NoddyOutput: original model (Noddy object)
- *filename* = filename (with path): file with classification results
f_set1 = open(filename).readlines()
# initialise classification results array
cf1 = np.empty_like(nout.block)
# iterate through results and append
for f in f_set1[1:]:
fl = f.rstrip().split(",")
cf1[int(fl[0]),int(fl[1]),int(fl[2])] = int(fl[6])
# remap ids
cf1_remap = re_map_vect(cf1)
# determine differences in class ids:
feature_diff = (nout.block != cf1_remap)
# Calculate the misclassification:
misclass = np.sum(feature_diff) / float(nout.n_total)
return misclass
filename = r"../../sandbox/jack/features_lowres-4 with class ID.csv"
calc_misclassification(nout, filename)
# f_set1 = open("../../sandbox/jack/features_lowres-6 with class ID and Prob.csv").readlines()
f_set1 = open("../../sandbox/jack/features_lowres-8 with Prob (weak Beta).csv").readlines()
f_set1[0]
# initialise classification results array
cf1 = np.empty_like(nout.block)
# Initialise probability array
probs = np.empty((5, cf1.shape[0], cf1.shape[1], cf1.shape[2]))
# iterate through results and append
for f in f_set1[1:]:
fl = f.rstrip().split(",")
i,j,k = int(fl[0]),int(fl[1]),int(fl[2])
# cf1[i,j,k] = int(fl[6])
for i2 in range(5):
probs[i2,i,j,k] = float(fl[i2+6])
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im1 = ax1.imshow(nout.block[15,:,:].transpose(),
interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')
plt.colorbar(im2)
im2 = ax2.imshow(probs[4,15,:,:].transpose(),
interpolation = 'none',
cmap = 'YlOrRd', origin = 'lower left')
# Note: map now ids from original model to probability fields in results:
prob_mapping = {4:0, 5:1, 3:2, 1:3, 2:4}
# Check membership for each class in original model
for i in range(1,6):
tmp = np.ones_like(nout.block) * (nout.block==i)
# test if voxels have non-zero probability by checking conjunction with zero-prob voxels
prob_zero = probs[prob_mapping[i],:,:,:] == 0
misidentified = np.sum(tmp * prob_zero)
print i, misidentified
prob_zero = probs[prob_mapping[1],:,:,:] == 0
f_set1 = open("../../sandbox/jack/features_lowres-7 with 151 realizations.csv").readlines()
# Initialise results array
all_results = np.empty((152, cf1.shape[0], cf1.shape[1], cf1.shape[2]))
# iterate through results and append
for f in f_set1[1:]:
fl = f.rstrip().split(",")
i,j,k = int(fl[0]),int(fl[1]),int(fl[2])
# cf1[i,j,k] = int(fl[6])
for i2 in range(152):
try:
all_results[i2,i,j,k] = float(fl[i2+5])
except IndexError:
print i2, i, j, k
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im1 = ax1.imshow(nout.block[15,:,:].transpose(),
interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')
plt.colorbar(im1)
im2 = ax2.imshow(all_results[5,15,:,:].transpose(),
interpolation = 'none',
cmap = 'YlOrRd', origin = 'lower left')
# mapping from results to original:
id_mapping = {2:5, 1:4, 3:3, 5:2, 4:1}
def re_map(id_val):
return id_mapping[id_val]
re_map_vect = np.vectorize(re_map)
# Apply remapping to all but first result (seems to be original feature)
all_results_remap = re_map_vect(all_results[1:,:,:,:])
fig = plt.figure(figsize = (12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
im1 = ax1.imshow(nout.block[30,:,:].transpose(),
interpolation = 'none', cmap = 'YlOrRd', origin = 'lower left')
# plt.colorbar(im1)
im2 = ax2.imshow(all_results_remap[85,30,:,:].transpose(),
interpolation = 'none',
cmap = 'YlOrRd', origin = 'lower left')
all_misclass = np.empty(151)
for i in range(151):
# determine differences in class ids:
feature_diff = (nout.block != all_results_remap[i,:,:,:])
# Calculate the misclassification:
all_misclass[i] = np.sum(feature_diff) / float(nout.n_total)
plt.plot(all_misclass)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load original model
Step2: Load sample classification results
Step3: Results of the classification do not necessarily contain the same ids as the units in the initial model. This seems to be the case here, as well. Re-sort
Step4: Now remap results and compare again
Step6: Combined analysis in a single function
Step7: Determine validity of uncertainty estimate
Step8: We now need to perform the remapping similar to before, but now for the probability fields
Step9: Determination of misclassification statistics
Step10: First, we again need to check the assignment of the units/ class ids
Step11: We can now determine the misclassification for all results
|
11,768
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#print(plt.style.available)
%%latex
Entropy formula
\begin{align}
H(X) = -\sum_{x}{p(x) * log_2\,{p(x)}}
\end{align}
def entropy(p_x):
h_sum = float()
for item in p_x:
h_sum += item * math.log(item,2)
return -h_sum
%%time
x_r = np.arange(0.001, 0.999, 0.01)
print("Lenght of x samples: %i" % len(x_r))
# starting from 0.001 because log_2(0) not defined
# eg: math.log(0,2) will yield "ValueError: math domain error"
#y_r = []
#for x in x_r:
# args = [x, 1.0-x]
# y_r.append(entropy(args))
# <=>
y_r = list(map(lambda x: entropy([x, 1.0-x]), x_r))
plt.plot(x_r, y_r, c='b')
plt.xlabel('Probability - p(x)') #Pr(X=1) ?
plt.ylabel('Entropy - H(x) - bits') #p(x)
plot_margin = 0.01
x0, x1, y0, y1 = plt.axis()
plt.axis((x0 - plot_margin,
x1 + plot_margin,
y0 - plot_margin,
y1 + plot_margin))
plt.show()
%%time
N = 30
N1 = 16
N2 = 14
x1 = 0.9*np.random.rand(N1)
y1 = 0.9*np.random.rand(N1)
#print(np.shape(x1))
x2 = 0.9*np.random.rand(N2)
y2 = 0.9*np.random.rand(N2)
#print(np.shape(x2))
plt.scatter(x1, y1, s=90, marker='o', c='g')
plt.scatter(x2, y2, s=90, marker='^', c='m')
plt.show()
# The only thing valuable of the above are how many members each class has
# in this case, fairly equal
#p_x = [16/30, 14/30]
p_x = [N1/N, N2/N]
print("Entropy result: %s" % entropy(p_x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utils functions
Step2: Entropy vs probability
Step3: What can be seen in this graph is that we get the most bits of information when our sets are relatively equal divided (equal probability of encountering either of the classes).
|
11,769
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install dfcx-scrapi
from dfcx_scrapi.core.intents import Intents
creds_path = '<YOUR_CREDS_PATH_HERE>'
agent_id = '<YOUR_AGENT_ID_HERE>'
intent_subset = ['confirmation.yes','confirmation.no']
intents = Intents(creds_path=creds_path, agent_id=agent_id)
if intent_subset:
all_intents = intents.bulk_intent_to_df(intent_subset=intent_subset)
else:
all_intents = intents.bulk_intent_to_df()
all_intents.head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Imports
Step3: User Inputs
Step4: Extract Intents and Training Phrases
Step5: View Results Sample
|
11,770
|
<ASSISTANT_TASK:>
Python Code:
import pyspark
from pyspark import SparkContext
import urllib
from pyspark.mllib.regression import LabeledPoint
from numpy import array
from pyspark.mllib.tree import RandomForest, RandomForestModel
from pyspark.sql import SQLContext
from time import time
# Custom imports
import MySQLConnection
sqlContext = SQLContext(sc)
# Get username and password from file in this format: {"user":"yourusername","password":"yourpassword"}
connectionProperties = MySQLConnection.getDBConnectionProps('/home/erik/mysql_credentials.txt')
# Get training data from the database...biosensor database and SensorTrainingReadings table
data = sqlContext.read.jdbc("jdbc:mysql://localhost/biosensor", "SensorTrainingReadings", properties=connectionProperties).selectExpr("deviceID","metricTypeID","uomID","positionID","actualPitch")
print "Train data size is {}".format(data.count())
# Split data into training and test dataasets
(trainingDataTable, testDataTable) = data.randomSplit([0.9, 0.1])
trainingDataTable.show()
testDataTable.show()
# The model requires labeldPoints which is a row with label and a vector of features.
def featurize(t):
return LabeledPoint(t.positionID, [t.actualPitch])
trainingData = trainingDataTable.map(featurize)
# Train the classifier/Build the model
startTime = time()
#Random Forest Model
model = RandomForest.trainClassifier(
trainingData,
numClasses=3,
categoricalFeaturesInfo={},
numTrees=6,
featureSubsetStrategy="auto",
impurity='gini',
maxDepth=4,
maxBins=32
)
elapsedTime = time() - startTime
print "Classifier trained in {} seconds".format(round(elapsedTime,3))
# Save the madel for use in evaluating readings
model.save(sc,"models/IoTBackBraceRandomForest.model")
# Evaluate model on test instances and compute test error
testData = testDataTable.map(featurize)
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testErr = labelsAndPredictions.filter(lambda (v, p): v != p).count() / float(testData.count())
print('Test Error = ' + str(testErr))
print('Random Forest Classifcation Model:')
print(model.toDebugString())
loadedModel = RandomForestModel.load(sc, "models/IoTBackBraceRandomForest.model")
for i in range(-50,50):
prediction = loadedModel.predict([i])
positions = {
0 : "upright",
1 : "back bent",
2 : "stooped"
}
print str(i) + " => " + str(positions[prediction])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the training data and creating the RDD
Step2: Split training data into training set and test set
Step3: Create an RDD of LabeledPoints
Step4: Training the model
Step5: Evaluating the accuracy of the model
Step6: Another handy feature is that you can view the model logic tree by using the "toDebugString()" method
Step7: Using the model for analysis of raw data
Step8: The example below passes a value to the model from a range of -50 degrees (stooped) to +10 degrees (standing).
|
11,771
|
<ASSISTANT_TASK:>
Python Code:
import networkx as nx
from networkx.algorithms import bipartite
# Initialize the city/person bipartite graph.
B = nx.Graph()
cities = ['Beijing', "Xi'an", 'Vancouver', 'San Francisco', 'Austin', 'Boston'] # populate a list of cities
people = ['Eric', 'Nan'] # populate a list of people's names
B.add_nodes_from(cities, bipartite='cities')
B.add_nodes_from(people, bipartite='people')
edges = [("Eric", "Vancouver"), ("Nan", "Xi'an"), ("Eric", "San Francisco"), ("Nan", 'Boston'), ("Eric", 'Boston'), ("Nan", 'Beijing')] # populate a list of 2-tuples, which are the edges. Each 2-tuple should join one city with one person.
B.add_edges_from(edges)
# Betweenness Centrality
bipartite.betweenness_centrality(B, cities)
# Degree Centrality
bipartite.degree_centrality(B, cities)
bipartite.projected_graph(B, people).edges()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Explore the graph by going through the following algorithms
Step2: Think about it...
|
11,772
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from prosail_functions import *
plot_config()
def hspot ( h ):
retval = []
wv = np.arange(400, 2501)
for theta_v in np.arange ( -80,80, 5):
if theta_v < 0:
raa = -180
t = -theta_v
else:
raa = 0
t = theta_v
r = call_prosail ( 1.5, 45, 4, 0.1, 0.0143, 0.007, 4, 45, 4,
1, h, 30, t, raa )[wv==865]
retval.append ( r )
return np.arange ( -80,80, 5), np.array(retval).squeeze()
plt.figure(figsize=(12,12))
x,y = hspot(0.01)
plt.plot(x,y,'-', label="0.01")
x,y = hspot(0.1)
plt.plot(x,y,'-', label="0.1")
x,y = hspot(0.5)
plt.plot(x,y,'-', label="0.5")
x,y = hspot(1)
plt.plot(x,y,'-', label="1")
plt.legend(loc='best')
pretty_axes()
w,s = prosail_sensitivity_ssa(x0=np.array([ 2.5 , 40. , 5. , 0. , 0.011, 0.005, 0.5 ,
45. , 3. , 0.5 , 0.01 ]))
plt.xlim ( 650, 890)
w,s = prosail_sensitivity_ssa(x0=np.array([ 2.5 , 40. , 5. , 0. , 0.011, 0.005, 8 ,
45. , 3. , 0.5 , 0.01 ]))
plt.xlim ( 650, 890)
w,s = prosail_sensitivity_ssa
x,r,n = canopy_vi_expt( nuisance=["lai", "cab", "n", "cw"])
plt.figure(figsize=(12,12))
x, y, p0, p1 = mtci_experiment(nuisance=["lai","cw"])
x, y, p0, p1 = mtci_experiment
x, y, p0, p1 = mtci_experiment
from IPython.core.display import HTML
def css_styling():
styles = open("custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A trip to RED/NIR space
Step2: Exploring the MTCI (MERIS Terrestrial Chlorophyll Index)
|
11,773
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import os
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
data_root = '../data' # Change me to store data elsewhere
pickle_file = os.path.join(data_root, 'notMNIST.pickle')
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
summary_writer = tf.summary.FileWriter('../logs', graph=graph)
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
merged_summary = tf.summary.merge_all()
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
relu_size = 1024
input_size = image_size * image_size
num_steps = 3001
batch_size = 128
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, input_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
def create_relu_model(x, weights, biases):
# Layer 1: W1 * X + B1 -> ReLu
layer_1 = tf.matmul(x, weights['n1']) + biases['n1']
layer_1 = tf.nn.relu(layer_1)
# Output layer: W2 * X + B2 -> Output
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
# Simple ReLu model
weights = {
'n1': tf.Variable(tf.truncated_normal([input_size, relu_size])),
'out': tf.Variable(tf.truncated_normal([relu_size, num_labels])),
}
biases = {
'n1': tf.Variable(tf.zeros([relu_size])),
'out': tf.Variable(tf.zeros([num_labels])),
}
relu_model = create_relu_model(tf_train_dataset, weights, biases)
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
labels=tf_train_labels, logits=relu_model))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(relu_model)
valid_prediction = tf.nn.softmax(
create_relu_model(tf_valid_dataset, weights, biases))
test_prediction = tf.nn.softmax(
create_relu_model(tf_test_dataset, weights, biases))
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(
predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First reload the data we generated in 1_notmnist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: We're first going to train a multinomial logistic regression using simple gradient descent.
Step4: Let's run this computation and iterate
Step5: Let's now switch to stochastic gradient descent training instead, which is much faster.
Step6: Let's run it
Step7: Problem
|
11,774
|
<ASSISTANT_TASK:>
Python Code:
# import requirements
import pandas as pd
import nltk
import gensim
import spacy
# read subset of data from csv file into panadas dataframe
df = pd.read_csv('1_100.csv')
# for now, chosing one article to illustrate preprocessing
article = df['full_text'][939]
article[:500]
article[:500].decode('utf-8').lower()
from nltk.tokenize import WhitespaceTokenizer
ws_tokenizer = WhitespaceTokenizer()
# tokenize example document
nyt_ws_tokens = ws_tokenizer.tokenize(article.decode('utf-8').lower())
print nyt_ws_tokens[:75]
from nltk.tokenize import RegexpTokenizer
re_tokenizer = RegexpTokenizer(r'\w+')
nyt_re_tokens = re_tokenizer.tokenize(article.decode('utf-8').lower())
print nyt_re_tokens[:100]
from nltk.corpus import stopwords
# print the first 5 standard English stop words
stop_list = [w for w in stopwords.words('english')]
print stop_list[:5]
# print the type of the elements in the stop words list
print type(stop_list[0])
cleaned_tokens = []
stop_words = set(stopwords.words('english'))
for token in nyt_re_tokens:
if token not in stop_words:
cleaned_tokens.append(token)
print 'Number of tokens before removing stop words: %d' % len(nyt_re_tokens)
print 'Number of tokens after removing stop words: %d' % len(cleaned_tokens)
print cleaned_tokens[:50]
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.lancaster import LancasterStemmer
porter = PorterStemmer()
snowball = SnowballStemmer('english')
lancaster = LancasterStemmer()
print 'Porter Stem of "explanation": %s' % porter.stem('explanation')
print 'Porter2 (Snowball) Stem of "explanation": %s' %snowball.stem('explanation')
print 'Lancaster Stem of "explanation": %s' %lancaster.stem('explanation')
from nltk.stem.wordnet import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print lemmatizer.lemmatize('explanation')
stemmed_tokens = []
lemmatized_tokens = []
for token in cleaned_tokens:
stemmed_tokens.append(stemmer.stem(token))
lemmatized_tokens.append(lemmatizer.lemmatize(token))
print stemmed_tokens[:50]
print lemmatized_tokens[:50]
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
stemmed_article = ' '.join(wd for wd in stemmed_tokens)
article_vect = vectorizer.fit_transform([stemmed_article])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Data</h2>
Step2: Let's take a peek at the raw text of this article to see what we are dealing with!
Step3: <h2>Preprocessing Text</h2>
Step4: <h3>1. Tokenization</h3>
Step5: Example
Step6: <h3>2. Stop Words</h3>
Step7: Let's remove the stop words and compare to our original list of tokens from our regular expression tokenizer.
Step8: You can see that by removing stop words, we now have less than half the number of tokens as our original list. Taking a peek at the cleaned tokens, we can see that a lot of the information that makes the sentence read like something a human would expect has been lost but the key nouns, verbs, adjectives, and adverbs remain.
Step9: You may notice from looking at this sample, however, that a potentially meaningful word has been removed
Step10: While <b><em>stemming</em></b> is a heuristic process that selectively removes the end of words, <b><em>lemmatization</em></b> is a more sophisticated process that takes into account variables such as part-of-speech, meaning, and context within a document or neighboring sentences.</p>
Step11: <p>In this example, lemmatization retains a bit more information than stemming. Within stemming, the Lancaster method is more aggressive than Porter and Snowball. Remember that this step allows us to reduce words to a common base form so that we can reduce our feature space and perform counting of occurrences. It will depend on your data and your application as to how much information you need to retain. </p>
Step12: Let's take a look at a sample of our stemmed tokens
Step13: In contrast, here are the same tokens in their lemmatized form
Step14: <h3>4. Vectorization </h3>
|
11,775
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from quantopian.pipeline.classifiers.morningstar import Sector
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.research import run_pipeline
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.factors import CustomFactor
# Custom Factor 1 : Price to Trailing 12 Month Sales
class Price_to_TTM_Sales(CustomFactor):
inputs = [morningstar.valuation_ratios.ps_ratio]
window_length = 1
def compute(self, today, assets, out, ps):
out[:] = ps[-1]
# create pipeline
temp_pipe_1 = Pipeline()
# add our factor
temp_pipe_1.add(Price_to_TTM_Sales(), 'Price / TTM Sales')
# add sector classifier
temp_pipe_1.add(Sector(), 'Sector')
# get data
temp_res_1 = run_pipeline(temp_pipe_1, '2015-06-06', '2015-06-06')
# show first 15 rows
temp_res_1.head(15)
# Separate sectors into two data frames
retail_df = temp_res_1[temp_res_1['Sector'] == 102]['Price / TTM Sales']
tech_df = temp_res_1[temp_res_1['Sector'] == 311]['Price / TTM Sales']
# get quartiles and print results
print 'Retail Quartiles: \n' + str(retail_df.quantile([0.25, 0.5, 0.75])) + '\n'
print 'Tech Quartiles: \n' + str(tech_df.quantile([0.25, 0.5, 0.75]))
# Custom Factor 2 : Price to Trailing 12 Month Earnings
class Price_to_TTM_Earnings(CustomFactor):
inputs = [morningstar.valuation_ratios.pe_ratio]
window_length = 1
def compute(self, today, assets, out, pe):
out[:] = pe[-1]
temp_pipe_2 = Pipeline()
temp_pipe_2.add(Price_to_TTM_Earnings(), 'Price / TTM Earnings')
temp_pipe_2.add(Sector(), 'Sector')
temp_res_2 = run_pipeline(temp_pipe_2, '2015-06-06', '2015-06-06')
# clean extreme data points
earnings_frame = temp_res_2[temp_res_2['Price / TTM Earnings'] < 100]
# create boxplot by sector
earnings_frame.boxplot(column='Price / TTM Earnings', by='Sector');
# Custom Factor 3 : Price to Trailing 12 Month Cash Flows
class Price_to_TTM_Cashflows(CustomFactor):
inputs = [morningstar.valuation_ratios.pcf_ratio]
window_length = 1
def compute(self, today, assets, out, pcf):
out[:] = pcf[-1]
temp_pipe_3 = Pipeline()
temp_pipe_3.add(Price_to_TTM_Cashflows(), 'Price / TTM Cashflows')
temp_pipe_3.add(Sector(), 'Sector')
temp_res_3 = run_pipeline(temp_pipe_3, '2015-06-06', '2015-06-06')
# clean extreme data points
cashflows_frame = temp_res_3[temp_res_3['Price / TTM Cashflows'] < 100]
# create boxplot by sector
cashflows_frame.boxplot(column='Price / TTM Cashflows', by='Sector');
# clean data, necessary as using mean and standard deviation
retail_df = retail_df[retail_df < 10]
tech_df = tech_df[tech_df < 10]
# summary stats necessary for calculation
retail_mean = retail_df.mean()
retail_std = retail_df.std()
tech_mean = tech_df.mean()
tech_std = tech_df.std()
# standardize the data
retail_standard = (retail_df - retail_mean) / retail_std
tech_standard = (tech_df - tech_mean) / tech_std
# create a grid of plots
fig, axes = plt.subplots(nrows=2, ncols=2)
# name each set of axes
ax_retail, ax_tech, ax_retail_st, ax_tech_st = axes.flat
# number of bins for histograms
bins = 50
# retail
ax_retail.hist(retail_df, bins=bins)
ax_retail.axvline(retail_mean, color='blue')
ax_retail.axvline(retail_mean - retail_std, color='blue')
ax_retail.axvline(retail_mean + retail_std, color='blue')
ax_retail.set_xlabel('Price / Sales')
ax_retail.set_ylabel('Frequency')
ax_retail.set_title('Retail')
# tech
ax_tech.hist(tech_df, bins=bins, stacked=True)
ax_tech.axvline(tech_mean, color='green')
ax_tech.axvline(tech_mean - tech_std, color='green')
ax_tech.axvline(tech_mean + tech_std, color='green')
ax_tech.set_xlabel('Price / Sales')
ax_tech.set_ylabel('Frequency')
ax_tech.set_title('Technology')
# retail standardized
ax_retail_st.hist(retail_standard, bins=bins)
ax_retail_st.axvline(0, color='blue')
ax_retail_st.axvline(-1, color='blue')
ax_retail_st.axvline(1, color='blue')
ax_retail_st.set_xlabel('Standard Deviations')
ax_retail_st.set_ylabel('Frequency')
ax_retail_st.set_title('Retail Standard')
# tech standardized
ax_tech_st.hist(tech_standard, bins=bins, stacked=True)
ax_tech_st.axvline(0, color='green')
ax_tech_st.axvline(-1, color='green')
ax_tech_st.axvline(1, color='green')
ax_tech_st.set_xlabel('Standard Deviations')
ax_tech_st.set_ylabel('Frequency')
ax_tech_st.set_title('Technology Standard')
# prevent text overlap
fig.tight_layout()
# This factor creates the synthetic S&P500
class SPY_proxy(CustomFactor):
inputs = [morningstar.valuation.market_cap]
window_length = 1
def compute(self, today, assets, out, mc):
out[:] = mc[-1]
# this function returns a pipeline that downloads all data necessary for the algo
def Data_Pull():
# create the piepline for the data pull
Data_Pipe = Pipeline()
# create sector partitions
sector = Sector()
# create SPY proxy
Data_Pipe.add(SPY_proxy(), 'SPY Proxy')
# Price / TTM Sales grouped by Industry
sales_grouped = Price_to_TTM_Sales().zscore(groupby=sector)
# Price / TTM Earnings grouped by Industry
earnings_grouped = Price_to_TTM_Earnings().zscore(groupby=sector)
# Price / TTM Cashflows grouped by Industry
cashflows_grouped = Price_to_TTM_Cashflows().zscore(groupby=sector)
# add Price / TTM Sales to Pipeline
Data_Pipe.add(sales_grouped, 'Price / TTM Sales')
# add Price / TTM Earnings to Pipeline
Data_Pipe.add(earnings_grouped, 'Price / TTM Earnings')
# add Price / TTM Cashflows to Pipeline
Data_Pipe.add(cashflows_grouped, 'Price / TTM Cashflows')
return Data_Pipe
results = run_pipeline(Data_Pull(), '2015-06-06', '2015-06-06')
results.head(20)
# limit effect of outliers
def filter_fn(x):
if x <= -10:
x = -10.0
elif x >= 10:
x = 10.0
return x
# standardize using mean and sd of S&P500
def standard_frame_compute(df):
# basic clean of dataset to remove infinite values
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna()
# need standardization params from synthetic S&P500
df_SPY = df.sort(columns='SPY Proxy', ascending=False)
# create separate dataframe for SPY
# to store standardization values
df_SPY = df_SPY.head(500)
# get dataframes into numpy array
df_SPY = df_SPY.as_matrix()
# store index values
index = df.index.values
df = df.as_matrix()
df_standard = np.empty(df.shape[0])
for col_SPY, col_full in zip(df_SPY.T, df.T):
# summary stats for S&P500
mu = np.mean(col_SPY)
sigma = np.std(col_SPY)
col_standard = np.array(((col_full - mu) / sigma))
# create vectorized function (lambda equivalent)
fltr = np.vectorize(filter_fn)
col_standard = (fltr(col_standard))
# make range between -10 and 10
col_standard = (col_standard / df.shape[1])
# attach calculated values as new row in df_standard
df_standard = np.vstack((df_standard, col_standard))
# get rid of first entry (empty scores)
df_standard = np.delete(df_standard,0,0)
return (df_standard, index)
# Sum up and sort data
def composite_score(df, index):
# sum up transformed data
df_composite = df.sum(axis=0)
# put into a pandas dataframe and connect numbers
# to equities via reindexing
df_composite = pd.Series(data=df_composite,index=index)
# sort ascending - change from previous notebook
df_composite.sort(ascending=True)
return df_composite
# compute the standardized values
results_standard, index = standard_frame_compute(results)
# aggregate the scores
ranked_scores = composite_score(results_standard, index)
ranked_scores
# create the histogram
ranked_scores.hist(bins=50)
# make scores into list for ease of manipulation
ranked_scores_list = ranked_scores.tolist()
# add labels to axes
plt.xlabel('Standardized Scores')
plt.ylabel('Quantity in Basket')
# show long bucket
plt.axvline(x=ranked_scores_list[25], linewidth=1, color='r')
# show short bucket
plt.axvline(x=ranked_scores_list[-6], linewidth=1, color='r');
# create Pipeline for sectors
sector_pipe = Pipeline()
sector_pipe.add(Sector(), 'Sector')
sectors = run_pipeline(sector_pipe, '2015-06-06', '2015-06-06')
# connect ranked scores with their sectors
scores_sectors = pd.concat([ranked_scores, sectors], axis=1, join='inner')
# name the columns
scores_sectors.columns=['Score', 'Sector']
# sort ranked scores ascending
scores_sectors.sort('Score', inplace=True)
# show long bucket
scores_sectors.head(26)
SECTOR_NAMES = {
101: 'Basic Materials',
102: 'Consumer Cyclical',
103: 'Financial Services',
104: 'Real Estate',
205: 'Consumer Defensive',
206: 'Healthcare',
207: 'Utilities',
308: 'Communication Services',
309: 'Energy',
310: 'Industrials',
311: 'Technology' ,
}
# create and populate the buckets
long_bucket = pd.Series()
short_bucket = pd.Series()
for key in SECTOR_NAMES:
long_bucket = long_bucket.append(scores_sectors[scores_sectors['Sector'] == key]['Score'].head(13))
short_bucket = short_bucket.append(scores_sectors[scores_sectors['Sector'] == key]['Score'].tail(3))
print 'LONG BUCKET\n' + str(long_bucket) + '\n'
print 'SHORT BUCKET\n' + str(short_bucket)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Relative Value
Step2: Before performing analysis on this data, let us look at the Sector column. This is an example of a Pipeline Classifier. Whereas a CustomFactor outputs numerical information, a Classifier ouputs categorical information. For example, Sector can take on one of a certain number of discrete integers (11 as of writing), each of which corresesponds to the industry of the company that has issued the equity. These are listed fully in https
Step3: Indeed, the median Price to Rales Ratio for tech companies is almost double that of retail companies, which agrees with our narrative explanation. From this data, it is clear that comparisons of Price to Sales Ratios are only really valid when they are conducted within a single industry. This is because a good Price to Sales Ratio for one industry might be terrible for another.
Step4: From this graph it is easy to see that the makeup of Price to Earnings Ratios for different sectors differs substantially. Therefore, an industry-relative metric will be a more discerning value than a universe-wide one.
Step5: The box and whisker diagram here shows that, much like the Price to Sales Ratio, a good Price to Cash Flows Ratio differs from industry to industry. Therefore, this, too, will benefit from an industry-relative calculation.
Step6: As can be seen above, standardization allows comparison between two datasets. While the companies in the technology sector have larger and more spread out Price to Sales Ratios than those in the retail sector, standardization allows an analyst to compare the relative extremity of values between datasets. Now we can compare values across setors
Step7: Now, we will use the same metric to combine these values as in the last notebook. The one large difference between these methodologies is the ranking of the final scores. For each of the ratios here, a low value is considered to be an attractive investment. Therefore we rank the values in ascending order so that the "best" stocks are still at the head of the data series and the "worst" are in the tail. This ranking occurs in the composite_score function.
Step8: NB
Step9: Although it is clear that the long and short baskets are removed from the larger concentration of equities, it is also clear that the histogram is positively skewed, leaving a long tail to the right and a short tail to the left. However, closer inspection reveals a more worrying problem than just excess skewness.
Step10: Each of these securities has sector code 205 and therefore is involved in the Consumer Defensive sector (a subset of the retail space). Therefore, even though we tried to fight this through industry-relative metrics, it seems that the immense skew of these values has made them ineffectual.
|
11,776
|
<ASSISTANT_TASK:>
Python Code:
import pickle
def separate_sentences(filename):
checks = ['. ', '; ', '? ', '! ']
for sentences in open(filename, 'r'):
sentences = sentences.strip()
sep_flag = False
sep_index = 0
check_sign = ''
for i in checks:
if i in sentences:
check_sign = i
sep_index = sentences.find(i)
sep_flag = sentences[sep_index+2].isupper()
continue
if sep_flag:
splited_sentences = sentences.split(check_sign)
for s in splited_sentences:
if s[-1] not in (check_sign[0]):
print(s+check_sign[0]+'\n')
else:
print(s)
else:
print(sentences)
separate_sentences('nlp.txt')
l_sentences = []
def separate_sentences(filename):
checks = ['. ', '; ', '? ', '! ']
for sentences in open(filename, 'r'):
sentences = sentences.strip()
sep_flag = False
sep_index = 0
check_sign = ''
for i in checks:
if i in sentences:
check_sign = i
sep_index = sentences.find(i)
sep_flag = sentences[sep_index+2].isupper()
continue
if sep_flag:
splited_sentences = sentences.split(check_sign)
for s in splited_sentences:
if s[-1] not in (check_sign[0]):
l_sentences.append(s+check_sign[0]+'\n')
else:
l_sentences.append(s)
else:
l_sentences.append(sentences)
separate_sentences('nlp.txt')
with open('nlp.pickle', 'wb') as f:
pickle.dump("".join(l_sentences), f)
with open('nlp.pickle', 'rb') as f:
sentences = pickle.load(f)
words = sentences.split(' ')
with open('nlp_words.pickle', 'wb') as f:
pickle.dump(words, f)
with open('nlp_words.txt', 'w') as f:
for word in words:
f.write(word.rstrip()+'\n')
from nltk.stem import PorterStemmer
porter = PorterStemmer()
with open('nlp_words.pickle', 'rb') as f:
words = pickle.load(f)
stem_words = []
for word in words:
stem = porter.stem(word)
stemmed = word[len(stem):]
stem_words.append(stem+'\t'+stemmed+'\n')
with open('nlp_stems.pickle', 'wb') as f:
pickle.dump(stem_words, f)
with open('nlp_stems.txt', 'w') as f:
for stem in stem_words:
f.write("".join(stem))
%%bash
cd stanford-corenlp-full-2015-04-20/
sh corenlp.sh -file ../nlp.txt
cp nlp.txt.xml ../
import xml.etree.ElementTree as etree
tree = etree.parse('nlp.txt.xml')
root = tree.getroot()
temp = []
for tokens in root.iter('token'):
temp.append(tokens.find('word').text+'\t'+\
tokens.find('lemma').text+'\t'+\
tokens.find('POS').text+'\n')
with open('nlp_tag.pickle', 'wb') as f:
pickle.dump("".join(temp), f)
with open('nlp_tag.txt', 'w') as f:
f.write("".join(temp))
with open('nlp_tag.pickle', 'rb') as f:
tokens = pickle.load(f)
for token in tokens.split('\n'):
splited_token = token.split('\t')
if splited_token[-1] == 'NNP':
print(splited_token[0])
import xml.etree.ElementTree as etree
tree = etree.parse('nlp.txt.xml')
root = tree.getroot()
temp = []
for mentions in root.iter('mention'):
if mentions.attrib != {}:
representative = mentions.find('text').text
else:
temp.append(mentions.find('text').text+'->'+representative)
import xml.etree.ElementTree as etree
import pygraphviz as pgv
tree = etree.parse('nlp.txt.xml')
root = tree.getroot()
collapse = []
collapse_list = []
for collapses in root.iter('dependencies'):
if collapses.attrib['type'] in ('collapsed-dependencies'):
for dep in collapses:
collapse.append((dep.find('governor').text, dep.find('dependent').text))
collapse_list.append(collapse)
collapse = []
graph_list = []
for i, sentence in enumerate(collapse_list):
g = pgv.AGraph(overlap='false')
for node in sentence:
if (node[0] in ('ROOT')):
g.add_node(node[1])
else:
g.add_edge(node, spline='true')
g.layout()
g.draw('./Untitled Folder/'+(str(i+1))+'.png')
del(g)
import pygraphviz as pgv
from collections import defaultdict
def choice_svo(xml_name):
tag_collapsed = "<dependencies type=\"collapsed-dependencies\">"
tag_collapsed_end = "</dependencies>"
collapsed_flag = False;
tag_governor = "</governor>"
tag_dependent = "</dependent>"
tag_dep = "<dep type=\""
dep_type_flag = False
dep_type = ''
dep_dict = defaultdict(lambda: defaultdict(list) )
sentence_id = 0; sentence_num = ''
for line in open(xml_name):
if tag_collapsed in line:
sentence_id += 1
sentence_num = str(sentence_id) + "\t"
collapsed_flag = True
dep_dict = defaultdict(lambda: defaultdict(list) )
if collapsed_flag:
if tag_collapsed_end in line:
make_svo(dep_dict)
collapsed_flag = False
if tag_dep in line:
dep_type = line.replace(tag_dep, '').strip().split("\"")[0]
if dep_type == "dobj" or dep_type == "nsubj":
dep_type_flag = True
else:
dep_type_flag = False
if dep_type_flag:
if tag_governor in line:
governor = line.replace(tag_governor, '').strip().split(">")[1]
elif tag_dependent in line:
dependent = line.replace(tag_dependent, '').strip().split(">")[1]
dep_dict[dep_type][governor].append(dependent)
def make_svo(dep_dict):
predicate_list = list()
for governor_word in dep_dict["nsubj"]:
if governor_word in dep_dict["dobj"]:
predicate_list.append(governor_word)
subject_list = list()
object_list = list()
for predicate in predicate_list:
print("\t".join((",".join(dep_dict["nsubj"][predicate]), \
predicate, ",".join(dep_dict["dobj"][predicate]) )))
if __name__ == "__main__":
xml_name = "nlp.txt.xml"
choice_svo(xml_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 50. 文区切り
Step2: 51. 単語の切り出し
Step3: 52. ステミング
Step4: 53. Tokenization
Step5: 54. 品詞タグ付け
Step6: 55. 固有表現抽出
Step7: 56. 共参照解析
Step8: 57. 係り受け解析
Step9: 58. タプルの抽出
|
11,777
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 3
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
### START CODE HERE ### (≈ 3 lines of code)
m_train = len(train_set_x_orig)
m_test = len(test_set_x_orig)
num_px = np.shape(train_set_x_orig[0])[0]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
# GRADED FUNCTION: sigmoid
def sigmoid(z):
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros(shape=(dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T, X)+b) # compute activation
cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A)))# compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1 / m) * np.dot(X, (A - Y).T)
db = (1 / m) * np.sum(A - Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T,X)+ b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
Y_prediction[0, i] = 1 if A[0, i] > 0.5 else 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros((X_train.shape[0]))
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 - Overview of the Problem set
Step2: We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Step3: Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
Step4: Expected Output for m_train, m_test and num_px
Step5: Expected Output
Step7: <font color='blue'>
Step9: Expected Output
Step11: Expected Output
Step13: Expected Output
Step14: Expected Output
Step16: Expected Output
Step17: Run the following cell to train your model.
Step18: Expected Output
Step19: Let's also plot the cost function and the gradients.
Step20: Interpretation
Step21: Interpretation
|
11,778
|
<ASSISTANT_TASK:>
Python Code:
#!pip install -I "phoebe>=2.4,<2.5"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')
print(b['exptime'])
b['exptime'] = 1, 'hr'
print(b['fti_method'])
b['fti_method'] = 'oversample'
print(b['fti_oversample'])
b.run_compute(fti_method='none', irrad_method='none', model='fti_off')
b.run_compute(fti_method='oversample', irrad_method='none', model='fit_on')
afig, mplfig = b.plot(show=True, legend=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle.
Step2: Relevant Parameters
Step3: Let's set the exposure time to 1 hr to make the convolution obvious in our 1-day default binary.
Step4: An 'fti_method' parameter exists for each set of compute options and each lc dataset. By default this is set to 'none' - meaning that the exposure times are ignored during b.run_compute().
Step5: Once we set fti_method to be 'oversample', the corresponding 'fti_oversample' parameter(s) become visible. This option defines how many different time-points PHOEBE should sample over the width of the exposure time and then average to return a single flux point. By default this is set to 5.
Step6: Influence on Light Curves
Step7: The phase-smearing (convolution) caused by the exposure time is most evident in areas of the light curve with sharp derivatives, where the flux changes significantly over the course of the single exposure. Here we can see that the 1-hr exposure time significantly changes the observed shapes of ingress and egress as well as the observed depth of the eclipse.
|
11,779
|
<ASSISTANT_TASK:>
Python Code:
def list_primes(n):
# TODO: Implement me
pass
# %load test_list_primes.py
from nose.tools import assert_equal
class Test_list_primes(object):
def test_list_primes(self):
assert_equal(list_primes(1), [])
assert_equal(list_primes(2), [2])
assert_equal(list_primes(7), [2, 3, 5, 7])
assert_equal(list_primes(9), list_primes(7))
print('Success: test_list_primes')
def main():
test = Test_list_primes()
test.test_list_primes()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
11,780
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Load local version of yabox
import sys
sys.path.insert(0, '../')
from yabox import DE, PDE
import numpy as np
# Imports required for 3d animations
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation, rcParams
from IPython.display import HTML
# There is a bug in matplotlib that prevents the use of ffmpeg and avconv!
# https://github.com/matplotlib/matplotlib/pull/8743
avail_writers = matplotlib.animation.writers.list()
# Functions to generate 3d animations
def display_animation(anim):
plt.close(anim._fig)
return HTML(anim.to_html5_video())
def generate_video(problem, algorithm, figsize=(12, 8), frames=100, interval=100):
# Try to use tqdm to show progress
use_tqdm = True
try:
from tqdm.auto import tqdm
except:
use_tqdm = False
figure = plt.figure(figsize=figsize)
ax = Axes3D(figure)
problem.plot3d(ax3d=ax)
minz = min(ax.get_zlim())
it = algorithm.geniterator()
if use_tqdm:
it = iter(tqdm(it, total=frames))
def animate(i):
ax.clear()
ax.autoscale(enable=False)
problem.plot3d(ax3d=ax)
status = next(it)
population = status.population
P = algorithm.denormalize(population)
fitness = status.fitness
idx = status.best_idx
PT = P.T
# Individuals
ax.scatter(PT[0], PT[1], fitness, s=30, c='#930039', marker='o', depthshade=False, zorder=999)
# Shadow projections
ax.scatter(PT[0], PT[1], np.full_like(PT[0], minz), alpha=0.5, s=50, c='black', marker='o',
edgecolors='none', depthshade=False, zorder=999)
anim = animation.FuncAnimation(figure, animate, frames=frames, interval=interval, blit=False)
return anim
from yabox.problems import Ackley, Levy
problem = Ackley()
algorithm = DE(problem, problem.bounds)
anim = generate_video(problem, algorithm, figsize=(12, 8), frames=10, interval=100)
display_animation(anim)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Main functions for plotting and generating the animations
Step2: Usage example
|
11,781
|
<ASSISTANT_TASK:>
Python Code:
# importamos la libreria
import tensorflow as tf
# importamos librerías adicionales
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
%matplotlib inline
# Creación de Constantes
# El valor que retorna el constructor es el valor de la constante.
# creamos constantes a=2 y b=3
a = tf.constant(2)
b = tf.constant(3)
# creamos matrices de 3x3
matriz1 = tf.constant([[1, 3, 2],
[1, 0, 0],
[1, 2, 2]])
matriz2 = tf.constant([[1, 0, 5],
[7, 5, 0],
[2, 1, 1]])
# Realizamos algunos cálculos con estas constantes
suma = tf.add(a, b)
mult = tf.mul(a, b)
cubo_a = a**3
# suma de matrices
suma_mat = tf.add(matriz1, matriz2)
# producto de matrices
mult_mat = tf.matmul(matriz1, matriz2)
# Todo en TensorFlow ocurre dentro de una Sesión
# creamos la sesion y realizamos algunas operaciones con las constantes
# y lanzamos la sesión
with tf.Session() as sess:
print("Suma de las constantes: {}".format(sess.run(suma)))
print("Multiplicación de las constantes: {}".format(sess.run(mult)))
print("Constante elevada al cubo: {}".format(sess.run(cubo_a)))
print("Suma de matrices: \n{}".format(sess.run(suma_mat)))
print("Producto de matrices: \n{}".format(sess.run(mult_mat)))
# Creamos una variable y la inicializamos con 0
estado = tf.Variable(0, name="contador")
# Creamos la op que le va a sumar uno a la Variable `estado`.
uno = tf.constant(1)
nuevo_valor = tf.add(estado, uno)
actualizar = tf.assign(estado, nuevo_valor)
# Las Variables deben ser inicializadas por la operación `init` luego de
# lanzar el grafo. Debemos agregar la op `init` a nuestro grafo.
init = tf.initialize_all_variables()
# Lanzamos la sesion y ejecutamos las operaciones
with tf.Session() as sess:
# Ejecutamos la op `init`
sess.run(init)
# imprimir el valor de la Variable estado.
print(sess.run(estado))
# ejecutamos la op que va a actualizar a `estado`.
for _ in range(3):
sess.run(actualizar)
print(sess.run(estado))
# Ejemplo variables simbólicas en los grafos
# El valor que devuelve el constructor representa la salida de la
# variable (la entrada de la variable se define en la sesion)
# Creamos un contenedor del tipo float. Un tensor de 4x4.
x = tf.placeholder(tf.float32, shape=(4, 4))
y = tf.matmul(x, x)
with tf.Session() as sess:
# print(sess.run(y)) # ERROR: va a fallar porque no alimentamos a x.
rand_array = np.random.rand(4, 4)
print(sess.run(y, feed_dict={x: rand_array})) # ahora esta correcto.
# Neurona con TensorFlow
# Defino las entradas
entradas = tf.placeholder("float", name='Entradas')
datos = np.array([[0, 0]
,[1, 0]
,[0, 1]
,[1, 1]])
# Defino las salidas
uno = lambda: tf.constant(1.0)
cero = lambda: tf.constant(0.0)
with tf.name_scope('Pesos'):
# Definiendo pesos y sesgo
pesos = tf.placeholder("float", name='Pesos')
sesgo = tf.placeholder("float", name='Sesgo')
with tf.name_scope('Activacion'):
# Función de activación
activacion = tf.reduce_sum(tf.add(tf.matmul(entradas, pesos), sesgo))
with tf.name_scope('Neurona'):
# Defino la neurona
def neurona():
return tf.case([(tf.less(activacion, 0.0), cero)], default=uno)
# Salida
a = neurona()
# path de logs
logs_path = '/tmp/tensorflow_logs/neurona'
# Lanzar la Sesion
with tf.Session() as sess:
# para armar el grafo
summary_writer = tf.train.SummaryWriter(logs_path,
graph=sess.graph)
# para armar tabla de verdad
x_1 = []
x_2 = []
out = []
act = []
for i in range(len(datos)):
t = datos[i].reshape(1, 2)
salida, activ = sess.run([a, activacion], feed_dict={entradas: t,
pesos:np.array([[1.],[1.]]),
sesgo: -1.5})
# armar tabla de verdad en DataFrame
x_1.append(t[0][0])
x_2.append(t[0][1])
out.append(salida)
act.append(activ)
tabla_info = np.array([x_1, x_2, act, out]).transpose()
tabla = pd.DataFrame(tabla_info,
columns=['x1', 'x2', 'f(x)', 'x1 AND x2'])
tabla
# Neurona OR, solo cambiamos el valor del sesgo
with tf.Session() as sess:
# para armar el grafo
summary_writer = tf.train.SummaryWriter(logs_path,
graph=sess.graph)
# para armar tabla de verdad
x_1 = []
x_2 = []
out = []
act = []
for i in range(len(datos)):
t = datos[i].reshape(1, 2)
salida, activ = sess.run([a, activacion], feed_dict={entradas: t,
pesos:np.array([[1.],[1.]]),
sesgo: -0.5}) # sesgo ahora -0.5
# armar tabla de verdad en DataFrame
x_1.append(t[0][0])
x_2.append(t[0][1])
out.append(salida)
act.append(activ)
tabla_info = np.array([x_1, x_2, act, out]).transpose()
tabla = pd.DataFrame(tabla_info,
columns=['x1', 'x2', 'f(x)', 'x1 OR x2'])
tabla
# Red Neuronal XNOR con TensorFlow
# Defino las entradas
entradas = tf.placeholder("float", name='Entradas')
datos = np.array([[0, 0]
,[1, 0]
,[0, 1]
,[1, 1]])
# Defino las salidas
uno = lambda: tf.constant(1.0)
cero = lambda: tf.constant(0.0)
with tf.name_scope('Pesos'):
# Definiendo pesos y sesgo
pesos = {
'a1': tf.constant([[-1.0], [-1.0]], name='peso_a1'),
'a2': tf.constant([[1.0], [1.0]], name='peso_a2'),
'a3': tf.constant([[1.0], [1.0]], name='peso_a3')
}
sesgo = {
'a1': tf.constant(0.5, name='sesgo_a1'),
'a2': tf.constant(-1.5, name='sesgo_a2'),
'a3': tf.constant(-0.5, name='sesgo_a3')
}
with tf.name_scope('Red_neuronal'):
# Defino las capas
def capa1(entradas, pesos, sesgo):
# activacion a1
a1 = tf.reduce_sum(tf.add(tf.matmul(entradas, pesos['a1']), sesgo['a1']))
a1 = tf.case([(tf.less(a1, 0.0), cero)], default=uno)
# activacion a2
a2 = tf.reduce_sum(tf.add(tf.matmul(entradas, pesos['a2']), sesgo['a2']))
a2 = tf.case([(tf.less(a2, 0.0), cero)], default=uno)
return a1, a2
def capa2(entradas, pesos, sesgo):
# activacion a3
a3 = tf.reduce_sum(tf.add(tf.matmul(entradas, pesos['a3']), sesgo['a3']))
a3 = tf.case([(tf.less(a3, 0.0), cero)], default=uno)
return a3
# path de logs
logs_path = '/tmp/tensorflow_logs/redXNOR'
# Sesion red neuronal XNOR
with tf.Session() as sess:
# para armar el grafo
summary_writer = tf.train.SummaryWriter(logs_path,
graph=sess.graph)
# para armar tabla de verdad
x_1 = []
x_2 = []
out = []
for i in range(len(datos)):
t = datos[i].reshape(1, 2)
# obtenos resultados 1ra capa
a1, a2 = sess.run(capa1(entradas, pesos, sesgo), feed_dict={entradas: t})
# pasamos resultados a la 2da capa
ent_a3 = np.array([[a1, a2]])
salida = sess.run(capa2(ent_a3, pesos, sesgo))
# armar tabla de verdad en DataFrame
x_1.append(t[0][0])
x_2.append(t[0][1])
out.append(salida)
tabla_info = np.array([x_1, x_2, out]).transpose()
tabla = pd.DataFrame(tabla_info,
columns=['x1', 'x2', 'x1 XNOR x2'])
tabla
# importando el dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# forma del dataset 55000 imagenes
mnist.train.images.shape
# cada imagen es un array de 28x28 con cada pixel
# definido como escala de grises.
digito1 = mnist.train.images[0].reshape((28, 28))
# visualizando el primer digito
plt.imshow(digito1, cmap = cm.Greys)
plt.show()
# valor correcto
mnist.train.labels[0].nonzero()[0][0]
# visualizando imagenes de 5 en 5
def visualizar_imagenes(dataset, cant_img):
img_linea = 5
lineas = int(cant_img / img_linea)
imagenes = []
for i in range(lineas):
datos = []
for img in dataset[img_linea* i:img_linea* (i+1)]:
datos.append(img.reshape((28,28)))
imgs = np.hstack(datos)
imagenes.append(imgs)
data = np.vstack(imagenes)
plt.imshow(data, cmap = cm.Greys )
plt.show()
# visualizando los primeros 30 dígitos
plt.figure(figsize=(8, 8))
visualizar_imagenes(mnist.train.images, 30)
# Parametros
tasa_aprendizaje = 0.001
epocas = 15
lote = 100
display_step = 1
logs_path = "/tmp/tensorflow_logs/perceptron"
# Parametros de la red
n_oculta_1 = 256 # 1ra capa de atributos
n_oculta_2 = 256 # 2ra capa de atributos
n_entradas = 784 # datos de MNIST(forma img: 28*28)
n_clases = 10 # Total de clases a clasificar (0-9 digitos)
# input para los grafos
x = tf.placeholder("float", [None, n_entradas], name='DatosEntrada')
y = tf.placeholder("float", [None, n_clases], name='Clases')
# Creamos el modelo
def perceptron_multicapa(x, pesos, sesgo):
# Función de activación de la capa escondida
capa_1 = tf.add(tf.matmul(x, pesos['h1']), sesgo['b1'])
# activacion relu
capa_1 = tf.nn.relu(capa_1)
# Función de activación de la capa escondida
capa_2 = tf.add(tf.matmul(capa_1, pesos['h2']), sesgo['b2'])
# activación relu
capa_2 = tf.nn.relu(capa_2)
# Salida con activación lineal
salida = tf.matmul(capa_2, pesos['out']) + sesgo['out']
return salida
# Definimos los pesos y sesgo de cada capa.
pesos = {
'h1': tf.Variable(tf.random_normal([n_entradas, n_oculta_1])),
'h2': tf.Variable(tf.random_normal([n_oculta_1, n_oculta_2])),
'out': tf.Variable(tf.random_normal([n_oculta_2, n_clases]))
}
sesgo = {
'b1': tf.Variable(tf.random_normal([n_oculta_1])),
'b2': tf.Variable(tf.random_normal([n_oculta_2])),
'out': tf.Variable(tf.random_normal([n_clases]))
}
with tf.name_scope('Modelo'):
# Construimos el modelo
pred = perceptron_multicapa(x, pesos, sesgo)
with tf.name_scope('Costo'):
# Definimos la funcion de costo
costo = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
with tf.name_scope('optimizador'):
# Algoritmo de optimización
optimizar = tf.train.AdamOptimizer(
learning_rate=tasa_aprendizaje).minimize(costo)
with tf.name_scope('Precision'):
# Evaluar el modelo
pred_correcta = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calcular la precisión
Precision = tf.reduce_mean(tf.cast(pred_correcta, "float"))
# Inicializamos todas las variables
init = tf.initialize_all_variables()
# Crear sumarización para controlar el costo
tf.scalar_summary("Costo", costo)
# Crear sumarización para controlar la precisión
tf.scalar_summary("Precision", Precision)
# Juntar los resumenes en una sola operación
merged_summary_op = tf.merge_all_summaries()
# Lanzamos la sesión
with tf.Session() as sess:
sess.run(init)
# op to write logs to Tensorboard
summary_writer = tf.train.SummaryWriter(
logs_path, graph=tf.get_default_graph())
# Entrenamiento
for epoca in range(epocas):
avg_cost = 0.
lote_total = int(mnist.train.num_examples/lote)
for i in range(lote_total):
lote_x, lote_y = mnist.train.next_batch(lote)
# Optimización por backprop y funcion de costo
_, c, summary = sess.run([optimizar, costo, merged_summary_op],
feed_dict={x: lote_x, y: lote_y})
# escribir logs en cada iteracion
summary_writer.add_summary(summary, epoca * lote_total + i)
# perdida promedio
avg_cost += c / lote_total
# imprimir información de entrenamiento
if epoca % display_step == 0:
print("Iteración: {0: 04d} costo = {1:.9f}".format(epoca+1,
avg_cost))
print("Optimización Terminada!\n")
print("Precisión: {0:.2f}".format(Precision.eval({x: mnist.test.images,
y: mnist.test.labels})))
print("Ejecutar el comando:\n",
"--> tensorboard --logdir=/tmp/tensorflow_logs ",
"\nLuego abir https://0.0.0.0:6006/ en el navegador")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Constantes
Step2: Sesiones
Step3: Las Sesiones deben ser cerradas para liberar los recursos, por lo que es una buena práctica incluir la Sesión dentro de un bloque "with" que la cierra automáticamente cuando el bloque termina de ejecutar.
Step4: Variables simbólicas (contenedores)
Step5: Ahora ya conocemos en líneas generales como es la mecánica detrás del funcionamiento de TensorFlow y como deberíamos proceder para crear las operaciones dentro de los grafos. Veamos si podemos implementar modelos de neuronas simples con la ayuda de esta librería.
Step6: Aquí podemos ver los datos de entrada de $x_1$ y $x_2$, el resultado de la función de activación y la decisión final que toma la neurona de acuerdo este último resultado. Como podemos ver en la tabla de verdad, la neurona nos dice que $x_1$ and $x_2$ solo es verdad cuando ambos son verdaderos, lo que es correcto.
Step7: Como vemos, cambiando simplemente el peso del sesgo, convertimos a nuestra neurona AND en una neurona OR. Como muestra la tabla de verdad, el único caso en que $x_1$ OR $x_2$ es falso es cuando ambos son falsos.
Step8: Como vemos, la red neuronal nos da el resultado correcto para la función lógica XNOR, solo es verdadera si ambos valores son verdaderos, o ambos son falsos.
Step9: Explorando MNIST dataset
Step10: Construyendo el perceptron multicapa
|
11,782
|
<ASSISTANT_TASK:>
Python Code:
import agate
lunches2013 = agate.Table.from_csv('frl13.csv')
cleanLunches2013 = lunches2013.where(lambda row: row['FREEREDUCED13'] is not None)
print(cleanLunches2013)
print(len(cleanLunches2013.rows))
lunches2014 = agate.Table.from_csv('frl14.csv')
cleanLunches2014 = lunches2014.where(lambda row: row['FREEREDUCED14'] is not None)
print(cleanLunches2014)
print(len(cleanLunches2014.rows))
lunches2015 = agate.Table.from_csv('frl15.csv')
cleanLunches2015 = lunches2015.where(lambda row: row['FREEREDUCED15'] is not None)
print(cleanLunches2015)
print(len(cleanLunches2015.rows))
lunches1314 = cleanLunches2013.join(cleanLunches2014, 'CODISTSCH13', 'CODISTSCH14', inner=True )
print(lunches1314)
print(len(lunches1314.rows))
lunches = lunches1314.join(cleanLunches2015, 'CODISTSCH13', 'CODISTSCH15', inner=True )
print(lunches)
print(len(lunches.rows))
change = lunches.compute([
('freelunch_change', agate.PercentChange('FREEREDUCED13', 'FREEREDUCED15')),
('enrollment_change', agate.PercentChange('ENROLLED13', 'ENROLLED15'))
])
from decimal import Decimal
def round_freelunch_change(row):
return row['freelunch_change'].quantize(Decimal('0.1'))
def round_enrollment_change(row):
return row['enrollment_change'].quantize(Decimal('0.1'))
rounded_change = change.compute([
('pc_freelunch', agate.Formula(agate.Number(), round_freelunch_change)),
('pc_enrollment', agate.Formula(agate.Number(), round_enrollment_change)),
])
sorted_change = rounded_change.order_by('pc_freelunch', reverse=True)
for_printing = sorted_change.select(['SCHOOLNAME13', 'pc_freelunch', 'pc_enrollment'])
for_printing.print_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pull in the 2013 lunch information csv, clean out any empty values, print table columns and lengths
Step2: Pull in the 2014 lunch information csv, clean out any empty values, print table columns and lengths
Step3: Pull in the 2015 lunch information csv, clean out any empty values, print table columns and lengths
Step4: Join the cleanLunches2013 & 2014 on district codes
Step5: View the new joined table
Step6: Final join between joined lunches1314 and lunches2015
Step7: View the final joined table
Step8: Compute the percent changes for both the free lunch enrollment and enrollment change for comparsion
Step9: Make those percent changes to one decimal
Step10: Find those top free, reduced lunch percent change
Step11: Only need certain columns to print out
Step12: Print out the final table
|
11,783
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import matplotlib.pyplot as plt
import os
data = np.loadtxt(fname = 'data.txt', delimiter = ',')
X, y = data[:,:5], data[:,5]
print("Features sample: {}".format(X[1]))
print("Result: {}".format(y[1]))
m = X.shape[0] #number of samples
#training
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print("Train shape: {}".format(X_train.shape))
print("Test shape: {}".format(X_test.shape))
clf = RidgeCV(alphas = [0.1, 1.0, 10.0], normalize=True)
clf.fit(X_train, y_train)
#predict
prediction = clf.predict(X_test);
print("Expected is: {}".format(y_test[0]))
print("Prediction is: {}".format(prediction[0]))
print("Score: {}".format(clf.score(X_test, y_test)))
print("Alpha: {}".format(clf.alpha_))
#plotting all data
plt.figure(1)
real, = plt.plot(np.arange(m), y, 'b-', label='real')
predicted, = plt.plot(np.arange(m), clf.predict(X), 'r-', label='predicted')
plt.ylabel('Stock')
plt.xlabel('Time')
plt.legend([real, predicted], ['Real', 'Predicted'])
plt.show()
#plotting only test
mtest = X_test.shape[0]
real, = plt.plot(np.arange(mtest), y_test, 'b-', label='real')
test, = plt.plot(np.arange(mtest), clf.predict(X_test), 'g-', label='test')
plt.ylabel('Stock')
plt.xlabel('Time')
plt.legend([real, test], ['Real', 'Test'])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ridge as Linear Regressor
|
11,784
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import statsmodels.api as sm
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog, prepend=False)
print(spector_data.exog.head())
print(spector_data.endog.head())
lpm_mod = sm.OLS(spector_data.endog, spector_data.exog)
lpm_res = lpm_mod.fit()
print("Parameters: ", lpm_res.params[:-1])
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
logit_res = logit_mod.fit(disp=0)
print("Parameters: ", logit_res.params)
margeff = logit_res.get_margeff()
print(margeff.summary())
print(logit_res.summary())
probit_mod = sm.Probit(spector_data.endog, spector_data.exog)
probit_res = probit_mod.fit()
probit_margeff = probit_res.get_margeff()
print("Parameters: ", probit_res.params)
print("Marginal effects: ")
print(probit_margeff.summary())
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog)
print(anes_data.exog.head())
print(anes_data.endog.head())
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
print(mlogit_res.params)
rand_data = sm.datasets.randhie.load()
rand_exog = rand_data.exog
rand_exog = sm.add_constant(rand_exog, prepend=False)
poisson_mod = sm.Poisson(rand_data.endog, rand_exog)
poisson_res = poisson_mod.fit(method="newton")
print(poisson_res.summary())
mod_nbin = sm.NegativeBinomial(rand_data.endog, rand_exog)
res_nbin = mod_nbin.fit(disp=False)
print(res_nbin.summary())
mlogit_res = mlogit_mod.fit(method="bfgs", maxiter=250)
print(mlogit_res.summary())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Inspect the data
Step3: Linear Probability Model (OLS)
Step4: Logit Model
Step5: Marginal Effects
Step6: As in all the discrete data models presented below, we can print a nice summary of results
Step7: Probit Model
Step8: Multinomial Logit
Step9: Inspect the data
Step10: Fit MNL model
Step11: Poisson
Step12: Fit Poisson model
Step13: Negative Binomial
Step14: Alternative solvers
|
11,785
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-1', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
11,786
|
<ASSISTANT_TASK:>
Python Code:
from threeML import *
import matplotlib.pyplot as plt
%matplotlib inline
from threeML.minimizer.tutorial_material import *
# This returns a JointLikelihood object with a simple likelihood function,
# and the corresponding Model instance. These objects are what you will have
# in a typical 3ML analysis. The Model contains one point source, named "test",
# with a spectrum called "simple"
jl, model = get_joint_likelihood_object_simple_likelihood()
# Let's look at the likelihood function, which in this illustrative example
# has a very simple shape
_ = plot_likelihood_function(jl)
model.test.spectrum.main.shape.mu = 1.0
# The minuit minimizer is the default, so no setup is necessary
# quiet = True means that no result will be printed
res = jl.fit(quiet=True)
# This plots the path that Minuit has traveled looking for the minimum
# Arrows connect the different points, starting from 1.0 and going
# to 40, the minimum
fig = plot_minimizer_path(jl)
model.test.spectrum.main.shape.mu = 80.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
model.test.spectrum.main.shape.mu = 20.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
jl, model = get_joint_likelihood_object_complex_likelihood()
_ = plot_likelihood_function(jl)
model.test.spectrum.main.shape.mu = 1.0
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
model.test.spectrum.main.shape.mu = 70
res = jl.fit(quiet=True)
fig = plot_minimizer_path(jl)
# Create an instance of the GRID minimizer
grid_minimizer = GlobalMinimization("grid")
# Create an instance of a local minimizer, which will be used by GRID
local_minimizer = LocalMinimization("minuit")
# Define a grid for mu as 10 steps between 1 and 80
my_grid = {model.test.spectrum.main.shape.mu: np.linspace(1, 80, 10)}
# Setup the global minimization
# NOTE: the "callbacks" option is useless in a normal 3ML analysis, it is
# here only to keep track of the evolution for the plot
grid_minimizer.setup(second_minimization=local_minimizer, grid = my_grid,
callbacks=[get_callback(jl)])
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(grid_minimizer)
jl.fit()
fig = plot_minimizer_path(jl)
# Reset the parameter to a value different from the best fit found
# by previous algorithms
jl, model = get_joint_likelihood_object_complex_likelihood()
model.test.spectrum.main.shape.mu = 2.5
# Create an instance of the PAGMO minimizer
pagmo_minimizer = GlobalMinimization("pagmo")
# Select one of the many algorithms provided by pagmo
# (see https://esa.github.io/pagmo2/docs/algorithm_list.html
# for a list).
# In this case we use the Artificial Bee Colony algorithm
# (see here for a description: https://link.springer.com/article/10.1007/s10898-007-9149-x)
import pygmo
my_algorithm = pygmo.algorithm(pygmo.bee_colony(gen=20))
# Create an instance of a local minimizer
local_minimizer = LocalMinimization("minuit")
# Setup the global minimization
pagmo_minimizer.setup(second_minimization = local_minimizer, algorithm=my_algorithm,
islands=10, population_size=10, evolution_cycles=1)
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(pagmo_minimizer)
jl.fit()
# NOTE: given the inner working of pygmo, it is not possible
# to plot the evolution
# Reset the parameter to a value different from the best fit found
# by previous algorithms
jl, model = get_joint_likelihood_object_complex_likelihood()
model.test.spectrum.main.shape.mu = 5.0
# Create an instance of the PAGMO minimizer
multinest_minimizer = GlobalMinimization("multinest")
# Create an instance of a local minimizer
local_minimizer = LocalMinimization("minuit")
# Setup the global minimization
multinest_minimizer.setup(second_minimization = local_minimizer, live_points=100)
# Set the minimizer for the JointLikelihood object
jl.set_minimizer(multinest_minimizer)
jl.fit()
# Plots the point traversed by Multinest
fig = plot_minimizer_path(jl, points=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's get a JointLikelihood object like the one we would have in a normal 3ML analysis. We use a custom function, prepared for this tutorial, which gives a JointLikelihood object having a very simple model with one free parameter ($\mu$), and with a likelihood having a very simple shape
Step2: Now let's set up the Minuit minimizer and minimize the -log(L), starting from $\mu = 1$
Step3: Now let's do the same, but starting from $\mu=80$
Step4: and from $\mu=20$
Step5: It is clear that, depending on the starting point, minuit makes different steps trying to reach the minimum. In this last case, at one point Minuit overshoots the minimum jumping all the way from $\sim 30$ to $\sim 80$, then realizes the mistake and goes back.
Step6: This likelihood function has 3 minima
Step7: Minuit has found the local minimum, not the global one. Now we start from 80
Step8: Now we found the global minimum. This is a simple example to show that the solution find by a local minimizers can depend on the starting point, and might not be the global minimum. In practice, one can rarely be guaranteed that the likelihood function has only one minimum. This is especially true in many dimensions and in cases of data with poor statistic.
Step9: The GRID minimizer has found the global minimum.
Step10: Multinest minimizer
|
11,787
|
<ASSISTANT_TASK:>
Python Code:
import os
PROJECT = 'your-project-id' # REPLACE WITH YOUR PROJECT ID
REGION = 'us-central1' # REPLACE WITH YOUR REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
## create GCS buckets
exists=$(gsutil ls -d | grep -w gs://${PROJECT}_input/)
if [ -n "$exists" ]; then
echo "Skipping the creation of input bucket."
else
echo "Creating input bucket."
gsutil mb -l ${REGION} gs://${PROJECT}_input
echo "Loading sample data for later"
gsutil cp resources/usa_names.csv gs://${PROJECT}_input
fi
exists=$(gsutil ls -d | grep -w gs://${PROJECT}_output/)
if [ -n "$exists" ]; then
echo "Skipping the creation of output bucket."
else
echo "Creating output bucket."
gsutil mb -l ${REGION} gs://${PROJECT}_output
fi
%%writefile simple_load_dag.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
A simple Airflow DAG that is triggered externally by a Cloud Function when a
file lands in a GCS bucket.
Once triggered the DAG performs the following steps:
1. Triggers a Google Cloud Dataflow job with the input file information received
from the Cloud Function trigger.
2. Upon completion of the Dataflow job, the input file is moved to a
gs://<target-bucket>/<success|failure>/YYYY-MM-DD/ location based on the
status of the previous step.
import datetime
import logging
import os
from airflow import configuration
from airflow import models
from airflow.contrib.hooks import gcs_hook
from airflow.contrib.operators import dataflow_operator
from airflow.operators import python_operator
from airflow.utils.trigger_rule import TriggerRule
# We set the start_date of the DAG to the previous date. This will
# make the DAG immediately available for scheduling.
YESTERDAY = datetime.datetime.combine(
datetime.datetime.today() - datetime.timedelta(1),
datetime.datetime.min.time())
# We define some variables that we will use in the DAG tasks.
SUCCESS_TAG = 'success'
FAILURE_TAG = 'failure'
# An Airflow variable called gcp_completion_bucket is required.
# This variable will contain the name of the bucket to move the processed
# file to.
# '_names' must appear in CSV filename to be ingested (adjust as needed)
# we are only looking for files with the exact name usa_names.csv (you can specify wildcards if you like)
INPUT_BUCKET_CSV = 'gs://'+models.Variable.get('gcp_input_location')+'/usa_names.csv'
# TODO: Populate the models.Variable.get() with the actual variable name for your output bucket
COMPLETION_BUCKET = 'gs://'+models.Variable.get('gcp_completion_bu____')
DS_TAG = '{{ ds }}'
DATAFLOW_FILE = os.path.join(
configuration.get('core', 'dags_folder'), 'dataflow', 'process_delimited.py')
# The following additional Airflow variables should be set:
# gcp_project: Google Cloud Platform project id.
# gcp_temp_location: Google Cloud Storage location to use for Dataflow temp location.
DEFAULT_DAG_ARGS = {
'start_date': YESTERDAY,
'retries': 2,
# TODO: Populate the models.Variable.get() with the variable name for your GCP Project
'project_id': models.Variable.get('gcp_pro____'),
'dataflow_default_options': {
'project': models.Variable.get('gcp_pro____'),
# TODO: Populate the models.Variable.get() with the variable name for temp location
'temp_location': 'gs://'+models.Variable.get('gcp_temp_l_______'),
'runner': 'DataflowRunner'
}
}
def move_to_completion_bucket(target_bucket, target_infix, **kwargs):
A utility method to move an object to a target location in GCS.
# Here we establish a connection hook to GoogleCloudStorage.
# Google Cloud Composer automatically provides a google_cloud_storage_default
# connection id that is used by this hook.
conn = gcs_hook.GoogleCloudStorageHook()
# The external trigger (Google Cloud Function) that initiates this DAG
# provides a dag_run.conf dictionary with event attributes that specify
# the information about the GCS object that triggered this DAG.
# We extract the bucket and object name from this dictionary.
source_bucket = models.Variable.get('gcp_input_location')
source_object = models.Variable.get('gcp_input_location')+'/usa_names.csv'
completion_ds = kwargs['ds']
target_object = os.path.join(target_infix, completion_ds, source_object)
logging.info('Copying %s to %s',
os.path.join(source_bucket, source_object),
os.path.join(target_bucket, target_object))
conn.copy(source_bucket, source_object, target_bucket, target_object)
logging.info('Deleting %s',
os.path.join(source_bucket, source_object))
conn.delete(source_bucket, source_object)
# Setting schedule_interval to None as this DAG is externally trigger by a Cloud Function.
# The following Airflow variables should be set for this DAG to function:
# bq_output_table: BigQuery table that should be used as the target for
# Dataflow in <dataset>.<tablename> format.
# e.g. lake.usa_names
# input_field_names: Comma separated field names for the delimited input file.
# e.g. state,gender,year,name,number,created_date
# TODO: Name the DAG id GcsToBigQueryTriggered
with models.DAG(dag_id='GcsToBigQueryTr_______',
description='A DAG triggered by an external Cloud Function',
schedule_interval=None, default_args=DEFAULT_DAG_ARGS) as dag:
# Args required for the Dataflow job.
job_args = {
'input': INPUT_BUCKET_CSV,
# TODO: Populate the models.Variable.get() with the variable name for BQ table
'output': models.Variable.get('bq_output_t____'),
# TODO: Populate the models.Variable.get() with the variable name for input field names
'fields': models.Variable.get('input_field_n____'),
'load_dt': DS_TAG
}
# Main Dataflow task that will process and load the input delimited file.
# TODO: Specify the type of operator we need to call to invoke DataFlow
dataflow_task = dataflow_operator.DataFlowPythonOp_______(
task_id="process-delimited-and-push",
py_file=DATAFLOW_FILE,
options=job_args)
# Here we create two conditional tasks, one of which will be executed
# based on whether the dataflow_task was a success or a failure.
success_move_task = python_operator.PythonOperator(task_id='success-move-to-completion',
python_callable=move_to_completion_bucket,
# A success_tag is used to move
# the input file to a success
# prefixed folder.
op_args=[models.Variable.get('gcp_completion_bucket'), SUCCESS_TAG],
provide_context=True,
trigger_rule=TriggerRule.ALL_SUCCESS)
failure_move_task = python_operator.PythonOperator(task_id='failure-move-to-completion',
python_callable=move_to_completion_bucket,
# A failure_tag is used to move
# the input file to a failure
# prefixed folder.
op_args=[models.Variable.get('gcp_completion_bucket'), FAILURE_TAG],
provide_context=True,
trigger_rule=TriggerRule.ALL_FAILED)
# The success_move_task and failure_move_task are both downstream from the
# dataflow_task.
dataflow_task >> success_move_task
dataflow_task >> failure_move_task
## Run this to display which key value pairs to input
import pandas as pd
pd.DataFrame([
('gcp_project', PROJECT),
('gcp_input_location', PROJECT + '_input'),
('gcp_temp_location', PROJECT + '_output/tmp'),
('gcp_completion_bucket', PROJECT + '_output'),
('input_field_names', 'state,gender,year,name,number,created_date'),
('bq_output_table', 'ml_pipeline.ingest_table')
], columns = ['Key', 'Value'])
%%bash
gcloud composer environments run ENVIRONMENT_NAME \
--location ${REGION} variables -- \
--set gcp_project ${PROJECT}
AIRFLOW_BUCKET = 'us-central1-composer-21587538-bucket' # REPLACE WITH AIRFLOW BUCKET NAME
os.environ['AIRFLOW_BUCKET'] = AIRFLOW_BUCKET
%%bash
gsutil cp simple_load_dag.py gs://${AIRFLOW_BUCKET}/dags # overwrite DAG file if it exists
gsutil cp -r dataflow/process_delimited.py gs://${AIRFLOW_BUCKET}/dags/dataflow/ # copy Dataflow job to be ran
import google.auth
import google.auth.transport.requests
import requests
import six.moves.urllib.parse
# Authenticate with Google Cloud.
# See: https://cloud.google.com/docs/authentication/getting-started
credentials, _ = google.auth.default(
scopes=['https://www.googleapis.com/auth/cloud-platform'])
authed_session = google.auth.transport.requests.AuthorizedSession(
credentials)
project_id = 'your-project-id'
location = 'us-central1'
composer_environment = 'composer'
environment_url = (
'https://composer.googleapis.com/v1beta1/projects/{}/locations/{}'
'/environments/{}').format(project_id, location, composer_environment)
composer_response = authed_session.request('GET', environment_url)
environment_data = composer_response.json()
airflow_uri = environment_data['config']['airflowUri']
# The Composer environment response does not include the IAP client ID.
# Make a second, unauthenticated HTTP request to the web server to get the
# redirect URI.
redirect_response = requests.get(airflow_uri, allow_redirects=False)
redirect_location = redirect_response.headers['location']
# Extract the client_id query parameter from the redirect.
parsed = six.moves.urllib.parse.urlparse(redirect_location)
query_string = six.moves.urllib.parse.parse_qs(parsed.query)
print(query_string['client_id'][0])
'use strict';
const fetch = require('node-fetch');
const FormData = require('form-data');
/**
* Triggered from a message on a Cloud Storage bucket.
*
* IAP authorization based on:
* https://stackoverflow.com/questions/45787676/how-to-authenticate-google-cloud-functions-for-access-to-secure-app-engine-endpo
* and
* https://cloud.google.com/iap/docs/authentication-howto
*
* @param {!Object} data The Cloud Functions event data.
* @returns {Promise}
*/
exports.triggerDag = async data => {
// Fill in your Composer environment information here.
// The project that holds your function
const PROJECT_ID = 'your-project-id';
// Navigate to your webserver's login page and get this from the URL
const CLIENT_ID = 'your-iap-client-id';
// This should be part of your webserver's URL:
// {tenant-project-id}.appspot.com
const WEBSERVER_ID = 'your-tenant-project-id';
// The name of the DAG you wish to trigger
const DAG_NAME = 'GcsToBigQueryTriggered';
// Other constants
const WEBSERVER_URL = `https://${WEBSERVER_ID}.appspot.com/api/experimental/dags/${DAG_NAME}/dag_runs`;
const USER_AGENT = 'gcf-event-trigger';
const BODY = {conf: JSON.stringify(data)};
// Make the request
try {
const iap = await authorizeIap(CLIENT_ID, PROJECT_ID, USER_AGENT);
return makeIapPostRequest(
WEBSERVER_URL,
BODY,
iap.idToken,
USER_AGENT,
iap.jwt
);
} catch (err) {
throw new Error(err);
}
};
/**
* @param {string} clientId The client id associated with the Composer webserver application.
* @param {string} projectId The id for the project containing the Cloud Function.
* @param {string} userAgent The user agent string which will be provided with the webserver request.
*/
const authorizeIap = async (clientId, projectId, userAgent) => {
const SERVICE_ACCOUNT = `${projectId}@appspot.gserviceaccount.com`;
const JWT_HEADER = Buffer.from(
JSON.stringify({alg: 'RS256', typ: 'JWT'})
).toString('base64');
let jwt = '';
let jwtClaimset = '';
// Obtain an Oauth2 access token for the appspot service account
const res = await fetch(
`http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/${SERVICE_ACCOUNT}/token`,
{
headers: {'User-Agent': userAgent, 'Metadata-Flavor': 'Google'},
}
);
const tokenResponse = await res.json();
if (tokenResponse.error) {
return Promise.reject(tokenResponse.error);
}
const accessToken = tokenResponse.access_token;
const iat = Math.floor(new Date().getTime() / 1000);
const claims = {
iss: SERVICE_ACCOUNT,
aud: 'https://www.googleapis.com/oauth2/v4/token',
iat: iat,
exp: iat + 60,
target_audience: clientId,
};
jwtClaimset = Buffer.from(JSON.stringify(claims)).toString('base64');
const toSign = [JWT_HEADER, jwtClaimset].join('.');
const blob = await fetch(
`https://iam.googleapis.com/v1/projects/${projectId}/serviceAccounts/${SERVICE_ACCOUNT}:signBlob`,
{
method: 'POST',
body: JSON.stringify({
bytesToSign: Buffer.from(toSign).toString('base64'),
}),
headers: {
'User-Agent': userAgent,
Authorization: `Bearer ${accessToken}`,
},
}
);
const blobJson = await blob.json();
if (blobJson.error) {
return Promise.reject(blobJson.error);
}
// Request service account signature on header and claimset
const jwtSignature = blobJson.signature;
jwt = [JWT_HEADER, jwtClaimset, jwtSignature].join('.');
const form = new FormData();
form.append('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer');
form.append('assertion', jwt);
const token = await fetch('https://www.googleapis.com/oauth2/v4/token', {
method: 'POST',
body: form,
});
const tokenJson = await token.json();
if (tokenJson.error) {
return Promise.reject(tokenJson.error);
}
return {
jwt: jwt,
idToken: tokenJson.id_token,
};
};
/**
* @param {string} url The url that the post request targets.
* @param {string} body The body of the post request.
* @param {string} idToken Bearer token used to authorize the iap request.
* @param {string} userAgent The user agent to identify the requester.
*/
const makeIapPostRequest = async (url, body, idToken, userAgent) => {
const res = await fetch(url, {
method: 'POST',
headers: {
'User-Agent': userAgent,
Authorization: `Bearer ${idToken}`,
},
body: JSON.stringify(body),
});
if (!res.ok) {
const err = await res.text();
throw new Error(err);
}
};
{
"name": "nodejs-docs-samples-functions-composer-storage-trigger",
"version": "0.0.1",
"dependencies": {
"form-data": "^2.3.2",
"node-fetch": "^2.2.0"
},
"engines": {
"node": ">=8.0.0"
},
"private": true,
"license": "Apache-2.0",
"author": "Google Inc.",
"repository": {
"type": "git",
"url": "https://github.com/GoogleCloudPlatform/nodejs-docs-samples.git"
},
"devDependencies": {
"@google-cloud/nodejs-repo-tools": "^3.3.0",
"mocha": "^6.0.0",
"proxyquire": "^2.1.0",
"sinon": "^7.2.7"
},
"scripts": {
"test": "mocha test/*.test.js --timeout=20000"
}
}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Cloud Storage buckets
Step4: Create BigQuery Destination Dataset and Table
Step5: Viewing environment information
Step6: Option 1
Step7: Copy your Airflow bucket name
Step8: Copy your Airflow files to your Airflow bucket
Step9: Navigating Using the Airflow UI
Step10: Create the Cloud Function
Step11: populate package.json
|
11,788
|
<ASSISTANT_TASK:>
Python Code:
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
# set the random number generator
np.random.seed(7)
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
def run_simulation(system, update_func):
Simulate a queueing system.
system: System object
update_func: function object
x = 0
results = TimeSeries()
results[0] = x
for t in linrange(0, system.duration):
x = update_func(x, t, system)
results[t+1] = x
return results
# Solution goes here
def compute_metrics(results, system):
Compute average number of customers and wait time.
results: TimeSeries of queue lengths
system: System object
returns: L, W
L = results.mean()
W = L / system.lam
return L, W
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# W_avg = sweep.mean()
def plot_W(lam_array, mu):
Plot the theoretical mean wait time.
lam_array: array of values for `lam`
mu: probability of finishing a checkout
W = 1 / (mu - lam_array)
plot(lam_array, W, 'g-')
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One queue or two?
Step2: Test this function by creating a System object with lam=1/8 and mu=1/5.
Step3: Write an update function that takes as parameters x, which is the total number of customer in the store, including the one checking out; t, which is the number of minutes that have elapsed in the simulation, and system, which is a System object.
Step4: Test your function by calling it with x=1, t=0, and the System object you created. If you run it a few times, you should see different results.
Step6: Now we can run the simulation. Here's a version of run_simulation that creates a TimeSeries with the total number of customers in the store, including the one checking out.
Step7: Call run_simulation with your update function and plot the results.
Step9: After the simulation, we can compute L, which is the average number of customers in the system, and W, which is the average time customers spend in the store. L and W are related by Little's Law
Step10: Call compute_metrics with the results from your simulation.
Step11: Parameter sweep
Step12: Write a function that takes an array of values for lam, a single value for mu, and an update function.
Step13: Call your function to generate a SweepSeries, and plot it.
Step14: If we imagine that this range of values represents arrival rates on different days, we can use the average value of W, for a range of values of lam, to compare different queueing strategies.
Step16: Analysis
Step17: Use this function to plot the theoretical results, then plot your simulation results again on the same graph. How do they compare?
Step18: Multiple servers
Step19: Use this update function to simulate the system, plot the results, and print the metrics.
Step20: Since we have two checkout counters now, we can consider values for $\lambda$ that exceed $\mu$.
Step21: Use your sweep function to simulate the two server, one queue scenario with a range of values for lam.
Step22: Multiple queues
Step23: Write a version of run_simulation that works with this update function.
Step24: Test your functions by running a simulation with a single value of lam.
Step25: Sweep a range of values for lam, plot the results, and print the average wait time across all values of lam.
|
11,789
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import scipy.signal
from IPython.display import Image
import matplotlib.image as mpimg
# This section uses the optional slimscat package (https://krosenfeld.github.io/slimscat/current/)
try:
import slimscat
doExample = True
except ImportError:
print 'Warning: could not import slimscat, will not run example'
doExample = False
Image(filename='figures/corrugatedPhaseScreen.png', width=400)
Image(filename='figures/scattering_table.png', width=600)
Image(filename='figures/scattering_imaging.png', width=600)
if doExample:
# construct gaussian source
nx,ny = 220,256
yy,xx = np.meshgrid(np.arange(ny)-ny/2,np.arange(nx)-nx/2,indexing='ij')
isrc = np.exp(-0.5*(xx**2 + yy**2) / (0.07*np.min([nx,ny]))**2)
slimscat.generate_screen(screenfile='gaussian_screen.bin')
# scatter source
iss = slimscat.run_slimscat(isrc,1.,screenfile='gaussian_screen.bin')
if doExample:
plt.figure(figsize=(15,6))
plt.subplot(121)
plt.imshow(isrc,vmin=isrc.min(),vmax=isrc.max())
plt.title('source')
plt.colorbar()
plt.subplot(122)
plt.imshow(iss,vmin=isrc.min(),vmax=isrc.max())
plt.title('scattered')
plt.colorbar()
if doExample:
# generate scattering file
slimscat.generate_screen(wavelength=0.87e-6,dx=0.2,ips=4,screenfile='gargantua_screen.bin')
# load RGB image with rough scale appropriate for Sgr A*
model = mpimg.imread('figures/gargantua.jpg')
dx = 50/300.
nx,ny,nchan = model.shape
# scatter each rgb channel separately (using same screen)
r = slimscat.run_slimscat(model[:,:,0],dx,screenfile='gargantua_screen.bin')
g = slimscat.run_slimscat(model[:,:,1],dx,screenfile='gargantua_screen.bin')
b = slimscat.run_slimscat(model[:,:,2],dx,screenfile='gargantua_screen.bin')
if doExample:
N=5
t = 1 - np.abs(np.linspace(-1, 1, N))
kernel = t.reshape(N, 1) * t.reshape(1, N)
kernel /= kernel.sum()
rs = scipy.signal.convolve2d(r/255., kernel, mode='same')
gs = scipy.signal.convolve2d(g/255., kernel, mode='same')
bs = scipy.signal.convolve2d(b/255., kernel, mode='same')
if doExample:
rgb_uint8 = (np.dstack((rs,gs,bs)) * 255.).astype(np.uint8)
plt.figure(figsize=(14,20))
plt.subplot(211)
plt.imshow(model[:,:,:])
plt.title('source')
plt.subplot(212)
plt.imshow(rgb_uint8)
plt.title('scattered')
Image(filename='figures/iono.png', width=500)
Image(filename='figures/Atmosphere_with_Ionosphere.png', width=400)
Image(filename='figures/iono_regimes.png', width=400)
Image(filename='figures/total_precipitable_water_201008.jpg', width=600)
Image(filename='figures/ALMAatmo.jpg', width=600)
Image(filename='figures/carilli_holdaway1999.png', width=600)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step2: 7.7 Propagation effects <a id='instrum
Step3: Figure 7.7.1
Step4: Figure 7.7.2
Step5: Figure 7.7.3
Step6: Following the scattering, we need to smooth the image a little to better illustrate the result
Step7: Make the RGB cube and you're done
Step8: Figure 7.7.4
Step9: Figure 7.7.5
Step10: Figure 7.7.5
Step11: Figure 7.7.6
Step12: Figure 7.7.7
Step13: Figure 7.7.8
|
11,790
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Preprocess the data (these are NumPy arrays)
x_train = x_train.reshape(60000, 784).astype("float32") / 255
x_test = x_test.reshape(10000, 784).astype("float32") / 255
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# Reserve 10,000 samples for validation
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
model.compile(
optimizer=keras.optimizers.RMSprop(), # Optimizer
# Loss function to minimize
loss=keras.losses.SparseCategoricalCrossentropy(),
# List of metrics to monitor
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
print("Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=64,
epochs=2,
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
)
history.history
# Evaluate the model on the test data using `evaluate`
print("Evaluate on test data")
results = model.evaluate(x_test, y_test, batch_size=128)
print("test loss, test acc:", results)
# Generate predictions (probabilities -- the output of the last layer)
# on new data using `predict`
print("Generate predictions for 3 samples")
predictions = model.predict(x_test[:3])
print("predictions shape:", predictions.shape)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
def get_uncompiled_model():
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, activation="softmax", name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
def custom_mean_squared_error(y_true, y_pred):
return tf.math.reduce_mean(tf.square(y_true - y_pred))
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=custom_mean_squared_error)
# We need to one-hot encode the labels to use MSE
y_train_one_hot = tf.one_hot(y_train, depth=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
class CustomMSE(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = tf.math.reduce_mean(tf.square(y_true - y_pred))
reg = tf.math.reduce_mean(tf.square(0.5 - y_pred))
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = tf.one_hot(y_train, depth=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
class CategoricalTruePositives(keras.metrics.Metric):
def __init__(self, name="categorical_true_positives", **kwargs):
super(CategoricalTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name="ctp", initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.reshape(tf.argmax(y_pred, axis=1), shape=(-1, 1))
values = tf.cast(y_true, "int32") == tf.cast(y_pred, "int32")
values = tf.cast(values, "float32")
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, "float32")
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def reset_state(self):
# The state of the metric will be reset at the start of each epoch.
self.true_positives.assign(0.0)
model = get_uncompiled_model()
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[CategoricalTruePositives()],
)
model.fit(x_train, y_train, batch_size=64, epochs=3)
class ActivityRegularizationLayer(layers.Layer):
def call(self, inputs):
self.add_loss(tf.reduce_sum(inputs) * 0.1)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert activity regularization as a layer
x = ActivityRegularizationLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
# The displayed loss will be much higher than before
# due to the regularization component.
model.fit(x_train, y_train, batch_size=64, epochs=1)
class MetricLoggingLayer(layers.Layer):
def call(self, inputs):
# The `aggregation` argument defines
# how to aggregate the per-batch values
# over each epoch:
# in this case we simply average them.
self.add_metric(
keras.backend.std(inputs), name="std_of_activation", aggregation="mean"
)
return inputs # Pass-through layer.
inputs = keras.Input(shape=(784,), name="digits")
x = layers.Dense(64, activation="relu", name="dense_1")(inputs)
# Insert std logging as a layer.
x = MetricLoggingLayer()(x)
x = layers.Dense(64, activation="relu", name="dense_2")(x)
outputs = layers.Dense(10, name="predictions")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu", name="dense_1")(inputs)
x2 = layers.Dense(64, activation="relu", name="dense_2")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
model.add_loss(tf.reduce_sum(x1) * 0.1)
model.add_metric(keras.backend.std(x1), name="std_of_activation", aggregation="mean")
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
)
model.fit(x_train, y_train, batch_size=64, epochs=1)
class LogisticEndpoint(keras.layers.Layer):
def __init__(self, name=None):
super(LogisticEndpoint, self).__init__(name=name)
self.loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
self.accuracy_fn = keras.metrics.BinaryAccuracy()
def call(self, targets, logits, sample_weights=None):
# Compute the training-time loss value and add it
# to the layer using `self.add_loss()`.
loss = self.loss_fn(targets, logits, sample_weights)
self.add_loss(loss)
# Log accuracy as a metric and add it
# to the layer using `self.add_metric()`.
acc = self.accuracy_fn(targets, logits, sample_weights)
self.add_metric(acc, name="accuracy")
# Return the inference-time prediction tensor (for `.predict()`).
return tf.nn.softmax(logits)
import numpy as np
inputs = keras.Input(shape=(3,), name="inputs")
targets = keras.Input(shape=(10,), name="targets")
logits = keras.layers.Dense(10)(inputs)
predictions = LogisticEndpoint(name="predictions")(logits, targets)
model = keras.Model(inputs=[inputs, targets], outputs=predictions)
model.compile(optimizer="adam") # No loss argument!
data = {
"inputs": np.random.random((3, 3)),
"targets": np.random.random((3, 10)),
}
model.fit(data)
model = get_compiled_model()
model.fit(x_train, y_train, batch_size=64, validation_split=0.2, epochs=1)
model = get_compiled_model()
# First, let's create a training Dataset instance.
# For the sake of our example, we'll use the same MNIST data as before.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Now we get a test dataset.
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(64)
# Since the dataset already takes care of batching,
# we don't pass a `batch_size` argument.
model.fit(train_dataset, epochs=3)
# You can also evaluate or predict on a dataset.
print("Evaluate")
result = model.evaluate(test_dataset)
dict(zip(model.metrics_names, result))
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Only use the 100 batches per epoch (that's 64 * 100 samples)
model.fit(train_dataset, epochs=3, steps_per_epoch=100)
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(train_dataset, epochs=1, validation_data=val_dataset)
model = get_compiled_model()
# Prepare the training dataset
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
# Prepare the validation dataset
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
model.fit(
train_dataset,
epochs=1,
# Only run validation using the first 10 batches of the dataset
# using the `validation_steps` argument
validation_data=val_dataset,
validation_steps=10,
)
import numpy as np
class_weight = {
0: 1.0,
1: 1.0,
2: 1.0,
3: 1.0,
4: 1.0,
# Set weight "2" for class "5",
# making this class 2x more important
5: 2.0,
6: 1.0,
7: 1.0,
8: 1.0,
9: 1.0,
}
print("Fit with class weight")
model = get_compiled_model()
model.fit(x_train, y_train, class_weight=class_weight, batch_size=64, epochs=1)
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
print("Fit with sample weight")
model = get_compiled_model()
model.fit(x_train, y_train, sample_weight=sample_weight, batch_size=64, epochs=1)
sample_weight = np.ones(shape=(len(y_train),))
sample_weight[y_train == 5] = 2.0
# Create a Dataset that includes sample weights
# (3rd element in the return tuple).
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train, sample_weight))
# Shuffle and slice the dataset.
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model = get_compiled_model()
model.fit(train_dataset, epochs=1)
image_input = keras.Input(shape=(32, 32, 3), name="img_input")
timeseries_input = keras.Input(shape=(None, 10), name="ts_input")
x1 = layers.Conv2D(3, 3)(image_input)
x1 = layers.GlobalMaxPooling2D()(x1)
x2 = layers.Conv1D(3, 3)(timeseries_input)
x2 = layers.GlobalMaxPooling1D()(x2)
x = layers.concatenate([x1, x2])
score_output = layers.Dense(1, name="score_output")(x)
class_output = layers.Dense(5, name="class_output")(x)
model = keras.Model(
inputs=[image_input, timeseries_input], outputs=[score_output, class_output]
)
keras.utils.plot_model(model, "multi_input_and_output_model.png", show_shapes=True)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
metrics=[
[
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
[keras.metrics.CategoricalAccuracy()],
],
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={
"score_output": keras.losses.MeanSquaredError(),
"class_output": keras.losses.CategoricalCrossentropy(),
},
metrics={
"score_output": [
keras.metrics.MeanAbsolutePercentageError(),
keras.metrics.MeanAbsoluteError(),
],
"class_output": [keras.metrics.CategoricalAccuracy()],
},
loss_weights={"score_output": 2.0, "class_output": 1.0},
)
# List loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[None, keras.losses.CategoricalCrossentropy()],
)
# Or dict loss version
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss={"class_output": keras.losses.CategoricalCrossentropy()},
)
model.compile(
optimizer=keras.optimizers.RMSprop(1e-3),
loss=[keras.losses.MeanSquaredError(), keras.losses.CategoricalCrossentropy()],
)
# Generate dummy NumPy data
img_data = np.random.random_sample(size=(100, 32, 32, 3))
ts_data = np.random.random_sample(size=(100, 20, 10))
score_targets = np.random.random_sample(size=(100, 1))
class_targets = np.random.random_sample(size=(100, 5))
# Fit on lists
model.fit([img_data, ts_data], [score_targets, class_targets], batch_size=32, epochs=1)
# Alternatively, fit on dicts
model.fit(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
batch_size=32,
epochs=1,
)
train_dataset = tf.data.Dataset.from_tensor_slices(
(
{"img_input": img_data, "ts_input": ts_data},
{"score_output": score_targets, "class_output": class_targets},
)
)
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
model.fit(train_dataset, epochs=1)
model = get_compiled_model()
callbacks = [
keras.callbacks.EarlyStopping(
# Stop training when `val_loss` is no longer improving
monitor="val_loss",
# "no longer improving" being defined as "no better than 1e-2 less"
min_delta=1e-2,
# "no longer improving" being further defined as "for at least 2 epochs"
patience=2,
verbose=1,
)
]
model.fit(
x_train,
y_train,
epochs=20,
batch_size=64,
callbacks=callbacks,
validation_split=0.2,
)
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs):
self.per_batch_losses = []
def on_batch_end(self, batch, logs):
self.per_batch_losses.append(logs.get("loss"))
model = get_compiled_model()
callbacks = [
keras.callbacks.ModelCheckpoint(
# Path where to save the model
# The two parameters below mean that we will overwrite
# the current checkpoint if and only if
# the `val_loss` score has improved.
# The saved model name will include the current epoch.
filepath="mymodel_{epoch}",
save_best_only=True, # Only save a model if `val_loss` has improved.
monitor="val_loss",
verbose=1,
)
]
model.fit(
x_train, y_train, epochs=2, batch_size=64, callbacks=callbacks, validation_split=0.2
)
import os
# Prepare a directory to store all the checkpoints.
checkpoint_dir = "./ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def make_or_restore_model():
# Either restore the latest model, or create a fresh one
# if there is no checkpoint available.
checkpoints = [checkpoint_dir + "/" + name for name in os.listdir(checkpoint_dir)]
if checkpoints:
latest_checkpoint = max(checkpoints, key=os.path.getctime)
print("Restoring from", latest_checkpoint)
return keras.models.load_model(latest_checkpoint)
print("Creating a new model")
return get_compiled_model()
model = make_or_restore_model()
callbacks = [
# This callback saves a SavedModel every 100 batches.
# We include the training loss in the saved model name.
keras.callbacks.ModelCheckpoint(
filepath=checkpoint_dir + "/ckpt-loss={loss:.2f}", save_freq=100
)
]
model.fit(x_train, y_train, epochs=1, callbacks=callbacks)
initial_learning_rate = 0.1
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
optimizer = keras.optimizers.RMSprop(learning_rate=lr_schedule)
keras.callbacks.TensorBoard(
log_dir="/full_path_to_your_logs",
histogram_freq=0, # How often to log histogram visualizations
embeddings_freq=0, # How often to log embedding visualizations
update_freq="epoch",
) # How often to write logs (default: once per epoch)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training and evaluation with the built-in methods
Step2: Introduction
Step3: Here's what the typical end-to-end workflow looks like, consisting of
Step4: We specify the training configuration (optimizer, loss, metrics)
Step5: We call fit(), which will train the model by slicing the data into "batches" of size
Step6: The returned history object holds a record of the loss values and metric values
Step7: We evaluate the model on the test data via evaluate()
Step8: Now, let's review each piece of this workflow in detail.
Step9: The metrics argument should be a list -- your model can have any number of metrics.
Step10: For later reuse, let's put our model definition and compile step in functions; we will
Step11: Many built-in optimizers, losses, and metrics are available
Step12: If you need a loss function that takes in parameters beside y_true and y_pred, you
Step13: Custom metrics
Step14: Handling losses and metrics that don't fit the standard signature
Step15: You can do the same for logging metric values, using add_metric()
Step16: In the Functional API,
Step17: Note that when you pass losses via add_loss(), it becomes possible to call
Step18: You can use it in a model with two inputs (input data & targets), compiled without a
Step19: For more information about training multi-input models, see the section Passing data
Step20: Training & evaluation from tf.data Datasets
Step21: Note that the Dataset is reset at the end of each epoch, so it can be reused of the
Step22: Using a validation dataset
Step23: At the end of each epoch, the model will iterate over the validation dataset and
Step24: Note that the validation dataset will be reset after each use (so that you will always
Step25: Sample weights
Step26: Here's a matching Dataset example
Step27: Passing data to multi-input, multi-output models
Step28: Let's plot this model, so you can clearly see what we're doing here (note that the
Step29: At compilation time, we can specify different losses to different outputs, by passing
Step30: If we only passed a single loss function to the model, the same loss function would be
Step31: Since we gave names to our output layers, we could also specify per-output losses and
Step32: We recommend the use of explicit names and dicts if you have more than 2 outputs.
Step33: You could also choose not to compute a loss for certain outputs, if these outputs are
Step34: Passing data to a multi-input or multi-output model in fit() works in a similar way as
Step35: Here's the Dataset use case
Step36: Using callbacks
Step37: Many built-in callbacks are available
Step38: Checkpointing models
Step39: The ModelCheckpoint callback can be used to implement fault-tolerance
Step40: You call also write your own callback for saving and restoring models.
Step41: Several built-in schedules are available
|
11,791
|
<ASSISTANT_TASK:>
Python Code:
from scipy.fftpack import fft, fftshift
import numpy as np
from math import gcd, ceil, floor
import sys
sys.path.append('../software/models/')
from dftModel import dftAnal, dftSynth
from scipy.signal import get_window
import matplotlib.pyplot as plt
# E3 - 1.1: Complete the function minimize_energy_spread_dft()
def minimize_energy_spread_dft(x, fs, f1, f2):
From a signal with two sinusoids compute its magnitude spectrum having only two non-zero value.
Args:
x (np.array): input signal
fs (float): sampling frequency in Hz
f1 (float): frequency of first sinusoid component in Hz
f2 (float): frequency of second sinusoid component in Hz
Returns:
np.array: positive half of magnitude spectrum (in dB)
### Your code here
# E3 - 1.2: Compute and plot the two input signals proposed above, call the function minimize_energy_spread_dft(),
# and plot the output magnitude spectra
### Your code here
# E3 - 2.1: Complete the function test_real_even()
def test_real_even(x):
check if x is real and even using the symmetry properties of its DFT.
Args:
x (np.array): input signal of length M (M is odd)
Returns:
tuple including:
isRealEven (boolean): True if input x is real and even, and False otherwise
dftbuffer (np.array): M point zero phase windowed version of x
X (np.array): M point DFT of dftbuffer
### Your code here
# E3 - 2.2: Plot the input signal proposed above (window signal), call the function test_real_even(),
# and plot its output spectrum (real and imaginary)
### Your code here
# E3 - 3.1: Complete the function suppress_freq_dft_model()
def suppress_freq_dft_model(x, fs, N):
Args:
x (np.array): input signal of length M (odd size)
fs (float): sampling frequency (Hz)
N (int): FFT size
Returns:
np.array: output signal with filtering (N samples long)
M = len(x)
w = get_window('hamming', M)
outputScaleFactor = sum(w)
### Your code here
# E3 - 3.2: Compute the input signals proposed above and plot their magnitude spectra (x-axis in Hz),
# call the function suppress_freq_dft_model(), and plot the magnitude spectra of the output signals
### Your code here
# E3 - 4.1: Complete the function zp_fft_size_expt()
def zp_fft_size_expt(x, window_size=[256, 512, 256], FFT_size=[256, 512, 512]):
compute magnitude spectra of x with different window sizes and FFT sizes.
Args:
x (np.array): input signal (512 samples long)
Returns:
list with magnitude spectra (np.array)
### Your code here
# E3 - 4.2: Compute, plot, and play the input signal proposed above, call the function zp_fft_size_expt(), and plot
# the outputs
### Your code here
# E3 - 4.3: Explain the results of Part 4. If we were to estimate the frequency of the sinusoid using its DFT,
# a first principles approach is to choose the frequency value of the bin corresponding to the maximum in the
# DFT magnitude spectrum. If you were to take this approach, which of the magnitude spectra will give you a
# better estimate of the frequency of the sinusoid? Comment and discuss.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 3
Step2: Test cases for minimize_energy_spread_dft()
Step4: Part 2 - Symmetry properties of the DFT
Step5: Test cases for test_real_even()
Step7: Part 3 - Suppressing frequency components using DFT model
Step8: Test case for the function suppress_freq_dft_model()
Step10: Part 4 - Window-size, FFT-size, and zero-padding
Step12: Test cases for the function zp_fft_size_expt()
|
11,792
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
# Create linearly inseparable data
X, _ = make_circles(n_samples=1000, random_state=1, noise=0.1, factor=0.1)
# Apply kernal PCA with radius basis function (RBF) kernel
kpca = KernelPCA(kernel="rbf", gamma=15, n_components=1)
X_kpca = kpca.fit_transform(X)
print('Original number of features:', X.shape[1])
print('Reduced number of features:', X_kpca.shape[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Linearly Inseparable Data
Step2: Conduct Kernel PCA
Step3: View Results
|
11,793
|
<ASSISTANT_TASK:>
Python Code:
import array
import binascii
s= b'this is a array'
a = array.array('b', s)
print('As byte string', s)
print('As array ', a)
print('As hex', binascii.hexlify(a))
import array
import pprint
a = array.array('i', range(3))
print('initialize\n', a)
a.extend(range(3))
print('Extend\n',a)
print('Slice \n', a[2:5])
print('Iterartor\n')
print(list(enumerate(a)))
import array
import binascii
import tempfile
a = array.array('i', range(5))
print('A1:', a)
# Write the array of numbers to a temporary file
output = tempfile.NamedTemporaryFile()
a.tofile(output.file) # must pass an *actual* file
output.flush()
# Read the raw data
with open(output.name, 'rb') as input:
raw_data = input.read()
print('Raw Contents:', binascii.hexlify(raw_data))
# Read the data into an array
input.seek(0)
a2 = array.array('i')
a2.fromfile(input, len(a))
print('A2:', a2)
import array
import binascii
def to_hex(a):
chars_per_item = a.itemsize * 2 # 2 hex digits
hex_version = binascii.hexlify(a)
num_chunks = len(hex_version) // chars_per_item
for i in range(num_chunks):
start = i * chars_per_item
end = start + chars_per_item
yield hex_version[start:end]
start = int('0x12345678', 16)
end = start + 5
a1 = array.array('i', range(start, end))
a2 = array.array('i', range(start, end))
a2.byteswap()
fmt = '{:>12} {:>12} {:>12} {:>12}'
print(fmt.format('A1 hex', 'A1', 'A2 hex', 'A2'))
print(fmt.format('-' * 12, '-' * 12, '-' * 12, '-' * 12))
fmt = '{!r:>12} {:12} {!r:>12} {:12}'
for values in zip(to_hex(a1), a1, to_hex(a2), a2):
print(fmt.format(*values))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Manuipulating Arrays
Step2: Arrays and Files
Step3: Alternative Byte Ordering
|
11,794
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.metrics import normalized_mutual_info_score
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage, fcluster
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
X, y = make_blobs(n_samples=90, centers=4, n_features=3, random_state=4)
plt.scatter(X[:, 0], X[:, 1])
plt.show()
Z = linkage(X, method="single", metric="euclidean")
print(Z.shape)
Z
plt.figure(figsize=(25, 10))
plt.title("Hierarchical Clustering Dendrogram")
plt.xlabel("Samples indexes")
plt.ylabel("distance")
dendrogram(Z, leaf_rotation=90., leaf_font_size=8. )
plt.show()
cluster = fcluster(Z, 2, criterion="distance")
cluster
plt.figure(figsize=(10, 8))
plt.scatter(X[:, 0], X[:, 1], c=cluster, cmap="Accent")
plt.savefig("clusters.png")
plt.show()
normalized_mutual_info_score(y, cluster)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating Sample data
Step2: Performing Hierarchical clustering
Step3: Plotting dendrogram
Step4: Retrive the clusters
Step5: Plotting Clusters
Step6: Evaluting clusters
|
11,795
|
<ASSISTANT_TASK:>
Python Code:
arthur = "king"
lancelot = -23
robin = 1.99
bedevere = True
arthur = "king"
type(arthur)
lancelot = -23
type(lancelot)
robin = 1.99
type(robin)
bedevere = True
type(bedevere)
galahad = 1
galahad = 57
galahad
patsy = 2
patsy = "Clip clop"
type(patsy)
zoot = float(5)
zoot = 5.0
zoot
type(zoot)
zoot = "5"
type(zoot)
zoot = str(5)
zoot
type(zoot)
arthur = "king"
galahad = "5"
print(arthur + galahad)
robin = 4
bedevere = 5.0
print(robin * bedevere)
int(20.7)
import sys
print(sys.maxint)
print(sys.float_info)
cold = True
if cold:
print("Wear a coat.")
x = 1
y = 2
y != x
name1 = "Mary1"
name2 = "Mary"
name1 < name2
temperature = 10
if (temperature < 12):
print("Wear a coat.")
temperature = 39.5
if temperature < 40.0:
print("A little cold, isn't it?")
print("Wear a coat")
else:
print("Nice weather we're having")
print("Don't wear a coat")
print("Done the if-statement")
salary = 40000
years_on_the_job = 2
if salary >= 30000:
if years_on_the_job >= 2:
print("You qualify for the loan")
else:
print("You must have been on your current job for at least two years to qualify.")
else:
print("You must earn at least $30,000 per year to qualify.")
score = 45
if score >= 90:
print("Your grade is A")
elif score >= 80:
print("Your grade is B")
elif score >= 70:
print("Your grade is C")
else:
print ("Restart. Try again.")
print("Done with if-statement")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: How does Python know the data type?
Step2: Reassigning Variables
Step3: Variables can also change types after initial assignment
Step4: Value can also be forced to take on a data type
Step5: Ranges of Data Types
Step6: Decision Structures
Step7: Boolean Expressions and Relational Operators
Step8: Comparing Strings
Step9: Putting Together Concepts
Step10: Alternate Paths
Step11: Nested Decision Structure
Step12: Multiple Nested Decision Structures
|
11,796
|
<ASSISTANT_TASK:>
Python Code:
import re
import pandas as pd
import urllib.request
frequency = {}
document_text = urllib.request.urlopen \
('http://www.textfiles.com/etext/FICTION/bronte-jane-178.txt') \
.read().decode('utf-8')
text_string = document_text.lower()
match_pattern = re.findall(r'\b[a-z]{3,15}\b', text_string)
for word in match_pattern:
count = frequency.get(word,0)
frequency[word] = count + 1
frequency_list = frequency.keys()
d = []
for word in frequency_list:
var = word + "," + str(frequency[word]) + "\r"
d.append({'word':word, 'Frequency': frequency[word]})
df = pd.DataFrame(d)
df1 = df.sort_values(by="Frequency", ascending=False)
# the next line displays the first number of rows you select
df1.head(10)
df2 = df1.query('word not in \
("the","and","it","was","for","but","that") \
')
df2.head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Word frequency list
Step2: Filtering the results
|
11,797
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy
x = numpy.linspace(0, 1)
y1 = numpy.sin(numpy.pi * x) + 0.1 * numpy.random.rand(50)
y2 = numpy.cos(3.0 * numpy.pi * x) + 0.2 * numpy.random.rand(50)
from matplotlib import pyplot
pyplot.plot(x, y1)
pyplot.show()
pyplot.plot(x, y1)
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('A single line plot')
pyplot.show()
pyplot.plot(x, y1, linestyle='--', color='black', linewidth=3)
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title(r'A single line plot, roughly $\sin(\pi x)$')
pyplot.show()
pyplot.plot(x, y1, label=r'$y_1$')
pyplot.plot(x, y2, label=r'$y_2$')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('Two line plots')
pyplot.legend(loc='lower left')
pyplot.show()
fig, axes = pyplot.subplots(nrows=1, ncols=2, figsize=(10,6))
axis1 = axes[0]
axis1.plot(x, y1)
axis1.set_xlabel(r'$x$')
axis1.set_ylabel(r'$y_1$')
axis2 = axes[1]
axis2.plot(x, y2)
axis2.set_xlabel(r'$x$')
axis2.set_ylabel(r'$y_2$')
fig.tight_layout()
pyplot.show()
data = []
for nx in range(2,5):
for ny in range(2,5):
data.append(numpy.sin(nx * numpy.pi * x) + numpy.cos(ny * numpy.pi * x))
fig, axes = pyplot.subplots(nrows=3, ncols=3, figsize=(10,10))
for nrow in range(3):
for ncol in range(3):
ndata = ncol + 3 * nrow
axes[nrow, ncol].plot(x, data[ndata])
axes[nrow, ncol].set_xlabel(r'$x$')
axes[nrow, ncol].set_ylabel(r'$\sin({} \pi x) + \cos({} \pi x)$'.format(nrow+2, ncol+2))
fig.tight_layout()
pyplot.show()
import pandas
iris = pandas.read_csv('https://raw.githubusercontent.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv')
iris.columns
iris.dtypes
# print first 5 records
iris.head()
iris['PetalLength'].plot.hist()
pyplot.show()
iris.describe()
print(iris['SepalLength'].min())
print(iris['PetalLength'].std())
print(iris['PetalWidth'].count())
grouped_iris = iris.groupby('Name')
grouped_iris.describe()
grouped_iris['PetalLength'].mean()
iris[iris.PetalLength > 5].head()
iris[(iris.Name == 'Iris-setosa') & (iris.PetalWidth < 0.3)].head()
dfs = {'indie': pandas.read_csv('spotify_data/indie.csv'), 'pop': pandas.read_csv('spotify_data/pop.csv'),
'country': pandas.read_csv('spotify_data/country.csv'), 'metal': pandas.read_csv('spotify_data/metal.csv'),
'house': pandas.read_csv('spotify_data/house.csv'), 'rap': pandas.read_csv('spotify_data/rap.csv')}
# add genre field to each dataframe
for name, df in dfs.items():
df['genre'] = name
# combine into single dataframe
data = pandas.concat(dfs.values())
data
data.index.is_unique
data.iloc[46]
data.set_index('id', inplace=True)
data.index.is_unique
data = data[~data.index.duplicated(keep='first')]
data.index.is_unique
data.to_csv('spotify_data/combined_data.csv')
data[['duration_ms', 'explicit', 'popularity', 'acousticness', 'danceability', 'energy', 'instrumentalness',
'liveness', 'loudness', 'speechiness', 'tempo', 'valence', 'genre']].groupby('genre').mean()
data.sort_values('valence')[['album', 'artists', 'name', 'genre', 'valence']].groupby('genre').head(1)
colours = ['red', 'blue', 'green', 'orange', 'pink', 'purple']
ax = data[data.genre == 'country'].plot.scatter('danceability', 'energy', c=colours[0], label='country', figsize=(10,10))
data[data.genre == 'metal'].plot.scatter('danceability', 'energy', c=colours[1], marker='x', label='metal', ax=ax)
data[data.genre == 'house'].plot.scatter('danceability', 'energy', c=colours[2], marker='+', label='house', ax=ax)
survey = pandas.read_excel('https://github.com/IanHawke/msc-or-week0/blob/master/excel_data/surveys.xlsx?raw=true')
survey.head()
len(survey) - survey.count()
survey.weight.astype('int')
survey.dropna()
survey.dropna(subset=['weight'])
nan_zeros = survey.copy() # make a copy so we don't overwrite original dataframe
nan_zeros.weight.fillna(0, inplace=True)
nan_zeros.head()
print(survey.weight.mean(), nan_zeros.weight.mean())
nan_mean = survey.copy()
nan_mean.weight.fillna(survey.weight.mean(), inplace=True)
print(survey.weight.mean(), nan_mean.weight.mean())
nan_mean.head()
dice = pandas.read_excel('https://github.com/IanHawke/msc-or-week0/blob/master/excel_data/dice-roll-example.xlsx?raw=true', skiprows=5)
print(dice.columns)
dice = dice[['# 1', '# 2', '# 3']]
print(dice.columns)
dice.describe()
for name, df in iris.groupby('Name'):
# create a new figure
pyplot.figure()
# plot histogram of sepalwidth
df['SepalWidth'].plot.hist()
# add title
pyplot.title(name)
# create a new axis
fig, axis = pyplot.subplots()
# create a dictionary of colours
colours = {'indie': 'red', 'pop': 'blue',
'country': 'green', 'metal': 'black',
'house': 'orange', 'rap': 'pink'}
# create a dictionary of markers
markers = {'indie': '+', 'pop': 'x',
'country': 'o', 'metal': 'd',
'house': 's', 'rap': '*'}
for name, df in data.groupby('genre'):
df.plot.scatter('acousticness', 'liveness', label=name, s=30, color=colours[name], marker=markers[name],
ax=axis, figsize=(10,8), fontsize=16)
# set limits of x and y axes so that they are between 0 and 1
axis.set_xlim([0,1.0])
axis.set_ylim([0,1.0])
# set the font size of the axis labels
axis.xaxis.label.set_fontsize(16)
axis.yaxis.label.set_fontsize(16)
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above command is only needed if you are plotting in a Jupyter notebook.
Step2: And then produce a line plot
Step3: We can add labels and titles
Step4: We can change the plotting style, and use LaTeX style notation where needed
Step5: We can plot two lines at once, and add a legend, which we can position
Step6: We would probably prefer to use subplots. At this point we have to leave the simple interface, and start building the plot using its individual components, figures and axes, which are objects to manipulate
Step7: The axes variable contains all of the separate axes that you may want. This makes it easy to construct many subplots using a loop
Step8: Matplotlib will allow you to generate and place axes pretty much wherever you like, to use logarithmic scales, to do different types of plot, and so on. Check the examples and gallery for details.
Step9: Let's get some information about the file we just read in. First, let's see what data fields our dataset has
Step10: Now let's see what datatype (i.e. integer, boolean, string, float,...) the data in each field is
Step11: Finally, let's try printing the first few records in our dataframe
Step12: Note that pandas can read Excel files (using pandas.read_excel), and takes as arguments either the URL (as here) or the filename on the local machine.
Step13: We can see the underlying library is Matplotlib, but it's far easier to plot large data sets.
Step14: We can also extract specific metrics
Step15: However, we often wish to calculate statistics for a subset of our data. For this, we can use pandas' groups. Let's group our data by Name and try running describe again. We see that pandas has now calculated statistics for each type of iris separately.
Step16: We can select subsets of our data using criteria. For example, we can select all records with PetalLength greater than 5
Step17: We can also combine criteria like so
Step18: Data across multiple files
Step19: To compare the data from these different datasets, it will help if we first combine them into a single dataframe. Before we do this, we'll add an extra field to each of our dataframes describing the musical genre so that we do not lose this information when the dataframes are combined.
Step20: This has given us a fairly sizeable dataframe with 513 rows and 32 columns. However, if you look closely at the index column you'll notice something dodgey has happened - combining our dataframes has meant that the index field is no longer unique (multiple records share the same index).
Step21: This is not good. Looking at the printout of the dataframe above, we see that the last record is LOYALTY. by Kendrick Lamar and has index 46. However, if we try to access the record with index 46, we instead get Rebellion (Lies) by Arcade Fire.
Step22: We can remedy this by reindexing. Looking at the fields available, it looks like the tracks' id would be a good choice for a unique index.
Step23: Unfortunately, there are still duplicates where the same track appears in multiple playlists. Let's remove these duplicates, keeping only the first instance.
Step24: Sucess! Before we do anything else, let's write our single combined dataset to file.
Step25: Now onto some analysis. Let's first look at some statistics for each of our genres.
Step26: From this alone we can get a lot of information
Step27: We can visualise our data by plotting the various characteristics against each other. In the plot below, we compare the energy and danceability of country, metal and house music. The data from the three different genres separates into three pretty distinct clusters.
Step28: More information about pandas can be found in the documentation, or in tutorials or in standard books.
Step29: In the weight column, instead of a number as we may expect, we see the values are 'NaN' or 'Not a Number'. If you open the original spreadsheet, you'll see that the original weight data is missing for these records. The count function returns the number of non-NaN entries per column, so if we subtract that from the length of the survey, we can see how many NaN entries there are per column
Step30: We need to work out a sensible way to deal with this missing data, as if we try to do any analysis on the dataset in its current state, python may throw value errors. For example, let's try converting the data in the weight column to an integer
Step31: There are several different ways we can deal with NaNs - which we choose depends on the individual dataset.
Step32: We may just wish to discard records with NaNs in a particular column (e.g. if we wish to deal with NaNs in other columns in a different way). We can discard all the records with NaNs in the weight column like so
Step33: It may be that it's more appropriate for us to set all missing data with a certain value. For example, let's set all missing weights to 0
Step34: For our dataset, this is not the best choice as it will change the mean of our data
Step35: A better solution here is to fill all NaN values with the mean weight value
Step36: Exercises
Step37: In the solution below for the music genre exercise, we've included a few extra steps in order to format the plot and make it more readable (e.g. changing the axis limits, increasing the figure size and fontsize).
|
11,798
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv("./2017-german-election-overall.csv")
type(df)
df
df.info()
df.describe()
df.copy()
df.shape
list(df.columns)
df.rename(columns = {'registered.voters':'registered_voters', 'area_names':'area'}, inplace = True)
df
df.drop(['invalid_second_votes', 'valid_second_votes'], axis=1, inplace = True)
df
df.drop([7,9,12], axis=0, inplace = True)
df
df.head(8)
df.tail(8)
df.head()
df.tail()
df[0:11]
df[:11]
df.head(11)
type(df['registered_voters'])
df['registered_voters']
df['registered_voters'].describe()
df.registered_voters.describe()
type(df[['registered_voters', 'total_votes']])
df[['registered_voters', 'total_votes']].mean()
df[['registered_voters', 'total_votes']].mean()[1]
df[['registered_voters', 'total_votes']].corr()
df.mean()
df.corr()
pd.isnull(df)
df.isnull()
df['state'].isnull()
pd.isnull(df['state'])
df['state'].unique()
df.state.unique()
mask = df['state'] == 'Berlin'
mask
mask = df.state == 'Berlin'
mask
df[mask]
mask = (df['state'] == 'Berlin') | (df['state'] == 'Bayern')
df[mask]
df[mask][['area', 'registered_voters']]
df[mask][['area', 'registered_voters']].index
df.iloc[7]
df.iloc[7]['area']
df.iloc[7:12]
df.iloc[7:12]['area']
df.iloc[[7, 8, 11, 13]]
df.iloc[[7, 8, 11, 13]]['area']
df.loc[5]
df.loc[[5,8,10]]
df.loc[5, 'state']
df.loc[[5,10,11], 'state']
df.loc[[5,10,11], 'state'] = 'unknown'
df.loc[[5,10,11], ['area', 'state']]
df.loc[df['state'] == 'Berlin']
df.at[11, 'area']
df.at[11, 'area'] = 'unknown'
df.sort_values('total_votes', ascending = False)
df.sort_values(['state', 'area'], ascending = True)
df.groupby('state')['registered_voters'].sum()
df.groupby(['state', 'area'])['registered_voters'].sum()
df['difference'] = df['valid_first_votes'] - df['invalid_first_votes']
df
df['registered_voters'].apply(lambda x: float(x+1))
df[['registered_voters', 'total_votes']].applymap(lambda x: 'votes='+str(x))
for (index, record) in df.iterrows():
print(str(index) + ' ' + record['state'])
df.to_csv('./output.csv', index = False)
df.registered_voters.plot(label="Registered voters", legend=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Leggere un file csv con Pandas
Step2: Ottenere informazioni sul data frame
Step3: Ottenere la copia di un data frame
Step4: Variabili shape e columns
Step5: Cambiare i nomi delle colonne
Step6: Rimuovere colonne
Step7: Rimuovere righe per indice
Step8: Ottenere le prime/ultime righe
Step9: Selezionare righe per posizione (slicing)
Step10: Selezionare una colonna
Step11: In alternativa si può usare la notazione con il punto
Step12: Selezionare colonne
Step13: NB
Step14: Controllare se ci sono valori nulli
Step15: Le espressioni
Step16: Metodo unique()
Step17: Selezionare le righe che verificano una certa condizione
Step18: L'espressione
Step19: Ottenere gli indici delle righe che verificano una certa condizione
Step20: Localizzare righe con iloc[]
Step21: L'espressione
Step22: L'espressione
Step23: Uso di loc[]
Step24: accesso a più righe tramite i loro indicidf.loc[[index1, index2, ...]]
Step25: accesso a un valore del data framedf.loc[index, column_name]
Step26: accesso a più valori del data framedf.loc[[index1, index2, ...], column_name]
Step27: accesso a più valori del data framedf.loc[[index1, index2, ...], [column_name1, column_name2, ...]]
Step28: accesso alle righe che verificano una certa condizionedf.loc[mask]
Step29: Ottenere un valore tramite un indice con at[]
Step30: Ordinare valori
Step31: Raggruppare i valori
Step32: Aggiungere una colonna
Step33: Applicare una funzione a un oggetto Series
Step34: Applicare una funzione a un oggetto DataFrame
Step35: Come iterare i record di un data frame
Step36: Scrivere un data frame su un file in formato csv
Step37: Richiamare matplotlib da Pandas
|
11,799
|
<ASSISTANT_TASK:>
Python Code:
import arcpy as ARCPY
import arcgisscripting as ARC
import SSDataObject as SSDO
import SSUtilities as UTILS
import WeightsUtilities as WU
import numpy as NUM
import scipy as SCIPY
import pysal as PYSAL
import os as OS
import pandas as PANDAS
inputFC = r'../data/CA_Polygons.shp'
fullFC = OS.path.abspath(inputFC)
fullPath, fcName = OS.path.split(fullFC)
ssdo = SSDO.SSDataObject(inputFC)
uniqueIDField = "MYID"
fieldNames = ['GROWTH', 'LOGPCR69', 'PERCNOHS', 'POP1969']
ssdo.obtainData(uniqueIDField, fieldNames)
df = ssdo.getDataFrame()
print(df.head())
import pysal2ArcUtils as PYSAL_UTILS
swmFile = OS.path.join(fullPath, "queen.swm")
W = PYSAL_UTILS.PAT_W(ssdo, swmFile)
w = W.w
kernelSWMFile = OS.path.join(fullPath, "knn8.swm")
KW = PYSAL_UTILS.PAT_W(ssdo, kernelSWMFile)
kw = KW.w
import AutoModel as AUTO
auto = AUTO.AutoSpace_PySAL(ssdo, "GROWTH", ['LOGPCR69', 'PERCNOHS', 'POP1969'],
W, KW, pValue = 0.1, useCombo = True)
ARCPY.env.overwriteOutput = True
outputFC = r'../data/pysal_automodel.shp'
auto.createOutput(outputFC)
print(auto.olsModel.summary)
print(auto.finalModel.summary)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example
Step2: Use the PySAL-ArcGIS Utilities to Read in Spatial Weights Files
Step3: Run the Auto Model Class and Export Your Data to an Output Feature Class
Step4: Compare OLS and Spatial Lag Results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.