markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
下面这个的这个拦截特征比较明显,两天前才发生向上跳空的交易:
|
print('失败概率最大的分类簇{0}'.format(ump_jump.cprs.lrs.argmax()))
# 拿出跳空失败概率最大的分类簇
max_failed_cluster_orders = ump_jump.nts[ump_jump.cprs.lrs.argmax()]
# 显示失败概率最大的分类簇,表11-6所示
max_failed_cluster_orders
ml.show_orders_hist(max_failed_cluster_orders, feature_columns=['buy_diff_up_days', 'buy_jump_up_power',
'buy_diff_down_days', 'buy_jump_down_power'])
print('分类簇中jump_up_power平均值为{0:.2f}, 向上跳空平均天数{1:.2f}'.format(
max_failed_cluster_orders.buy_jump_up_power.mean(), max_failed_cluster_orders.buy_diff_up_days.mean()))
print('分类簇中jump_down_power平均值为{0:.2f}, 向下跳空平均天数{1:.2f}'.format(
max_failed_cluster_orders.buy_jump_down_power.mean(), max_failed_cluster_orders.buy_diff_down_days.mean()))
print('训练数据集中jump_up_power平均值为{0:.2f},向上跳空平均天数{1:.2f}'.format(
orders_pd_train.buy_jump_up_power.mean(), orders_pd_train.buy_diff_up_days.mean()))
print('训练数据集中jump_down_power平均值为{0:.2f}, 向下跳空平均天数{1:.2f}'.format(
orders_pd_train.buy_jump_down_power.mean(), orders_pd_train.buy_diff_down_days.mean()))
|
ipython/第十一章-量化系统——机器学习•ABU.ipynb
|
bbfamily/abu
|
gpl-3.0
|
11.2.4 价格主裁
请对照阅读ABU量化系统使用文档 :第16节 UMP主裁交易决策 中相关内容
|
from abupy import AbuUmpMainPrice
ump_price = AbuUmpMainPrice.ump_main_clf_dump(orders_pd_train, save_order=False)
ump_price.fiter.df.head()
print('失败概率最大的分类簇{0}'.format(ump_price.cprs.lrs.argmax()))
# 拿出价格失败概率最大的分类簇
max_failed_cluster_orders = ump_price.nts[ump_price.cprs.lrs.argmax()]
# 表11-8所示
max_failed_cluster_orders
|
ipython/第十一章-量化系统——机器学习•ABU.ipynb
|
bbfamily/abu
|
gpl-3.0
|
11.2.5 波动主裁
请对照阅读ABU量化系统使用文档 :第16节 UMP主裁交易决策 中相关内容
|
from abupy import AbuUmpMainWave
ump_wave = AbuUmpMainWave.ump_main_clf_dump(orders_pd_train, save_order=False)
ump_wave.fiter.df.head()
print('失败概率最大的分类簇{0}'.format(ump_wave.cprs.lrs.argmax()))
# 拿出波动特征失败概率最大的分类簇
max_failed_cluster_orders = ump_wave.nts[ump_wave.cprs.lrs.argmax()]
# 表11-10所示
max_failed_cluster_orders
ml.show_orders_hist(max_failed_cluster_orders, feature_columns=['buy_wave_score1', 'buy_wave_score3'])
print('分类簇中wave_score1平均值为{0:.2f}'.format(
max_failed_cluster_orders.buy_wave_score1.mean()))
print('分类簇中wave_score3平均值为{0:.2f}'.format(
max_failed_cluster_orders.buy_wave_score3.mean()))
ml.show_orders_hist(orders_pd_train, feature_columns=['buy_wave_score1', 'buy_wave_score1'])
print('训练数据集中wave_score1平均值为{0:.2f}'.format(
orders_pd_train.buy_wave_score1.mean()))
print('训练数据集中wave_score3平均值为{0:.2f}'.format(
orders_pd_train.buy_wave_score1.mean()))
|
ipython/第十一章-量化系统——机器学习•ABU.ipynb
|
bbfamily/abu
|
gpl-3.0
|
11.2.6 验证主裁是否称职
请对照阅读ABU量化系统使用文档 :第21节 A股UMP决策 中相关内容
|
# 选取有交易结果的数据order_has_result
order_has_result = abu_result_tuple_test.orders_pd[abu_result_tuple_test.orders_pd.result != 0]
ump_wave.best_hit_cnt_info(ump_wave.llps)
from abupy import AbuUmpMainDeg, AbuUmpMainJump, AbuUmpMainPrice, AbuUmpMainWave
ump_deg = AbuUmpMainDeg(predict=True)
ump_jump = AbuUmpMainJump(predict=True)
ump_price = AbuUmpMainPrice(predict=True)
ump_wave = AbuUmpMainWave(predict=True)
def apply_ml_features_ump(order, predicter, progress, need_hit_cnt):
if not isinstance(order.ml_features, dict):
import ast
# 低版本pandas dict对象取出来会成为str
ml_features = ast.literal_eval(order.ml_features)
else:
ml_features = order.ml_features
progress.show()
# 将交易单中的买入时刻特征传递给ump主裁决策器,让每一个主裁来决策是否进行拦截
return predicter.predict_kwargs(need_hit_cnt=need_hit_cnt, **ml_features)
def pararllel_func(ump, ump_name):
with AbuMulPidProgress(len(order_has_result), '{} complete'.format(ump_name)) as progress:
# 启动多进程进度条,对order_has_result进行apply
ump_result = order_has_result.apply(apply_ml_features_ump, axis=1, args=(ump, progress, 2,))
return ump_name, ump_result
# 并行处理4个主裁,每一个主裁启动一个进程进行拦截决策
parallel = Parallel(
n_jobs=4, verbose=0, pre_dispatch='2*n_jobs')
out = parallel(delayed(pararllel_func)(ump, ump_name)
for ump, ump_name in zip([ump_deg, ump_jump, ump_price, ump_wave],
['ump_deg', 'ump_jump', 'ump_price', 'ump_wave']))
# 将每一个进程中的裁判的拦截决策进行汇总
for sub_out in out:
order_has_result[sub_out[0]] = sub_out[1]
block_pd = order_has_result.filter(regex='^ump_*')
# 把所有主裁的决策进行相加
block_pd['sum_bk'] = block_pd.sum(axis=1)
block_pd['result'] = order_has_result['result']
# 有投票1的即会进行拦截
block_pd = block_pd[block_pd.sum_bk > 0]
print('四个裁判整体拦截正确率{:.2f}%'.format(block_pd[block_pd.result == -1].result.count() / block_pd.result.count() * 100))
block_pd.tail()
print('角度裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_deg')))
print('角度扩展裁判拦拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_jump')))
print('单混裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_wave')))
print('价格裁判拦截正确率{:.2f}%, 拦截交易数量{}'.format(*sub_ump_show('ump_price')))
|
ipython/第十一章-量化系统——机器学习•ABU.ipynb
|
bbfamily/abu
|
gpl-3.0
|
<hr> Turning on tooltips
Many plot types let you specify tooltips with the labels argument and the tooltips=True setting. First, turn on the setting for a simple scatter plot, and try clicking a point -- you should see it's x and y value appear above.
|
x = random.rand(10)
y = random.rand(10)
lgn.scatter(x, y, size=10, tooltips=True)
|
misc/tooltips.ipynb
|
RaoUmer/lightning-example-notebooks
|
mit
|
Now let's try adding explicit text labels. We'll make labels maked on random group assignments.
|
x = random.rand(10)
y = random.rand(10)
g = (random.rand(10) * 5).astype('int')
lgn.scatter(x, y, size=10, labels=['group ' + str(i) for i in g], tooltips=True, group=g)
|
misc/tooltips.ipynb
|
RaoUmer/lightning-example-notebooks
|
mit
|
<hr> Labeling graph vertices
A common use case for tooltips is in labeling graphs. Here we'll make a simple force network and label the vertices based on a group assignment.
|
mat = random.rand(25,25)
mat[mat<0.8] = 0
group = (random.rand(25) * 5).astype('int')
labels = ['vertex ' + str(g) for g in group]
lgn.force(mat, labels=labels, group=group)
|
misc/tooltips.ipynb
|
RaoUmer/lightning-example-notebooks
|
mit
|
2) What's the current wind speed? How much warmer does it feel than it actually is?
|
print('The current wind speed is', data['currently']['windSpeed'], 'miles per hour.')
print('It feels', round(data['currently']['apparentTemperature'] - data['currently']['temperature'], 2), 'degrees Fahrenheit warmer than it actually is.')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
3) The first daily forecast is the forecast for today. For the place you decided on up above, how much of the moon is currently visible?
|
# #temp. Answer: dict
# print(type(data['daily']))
# #temp. Answer: ['summary', 'data', 'icon']
# print(data['daily'].keys())
# #temp. Answer: list
# print(type(data['daily']['data']))
# #temp. It's a list of dictionaries
# #this time means Wed, 08 Jun 2016 05:00:00 GMT, which is currently today
# print(data['daily']['data'][0])
# #this time means Thu, 09 Jun 2016 05:00:00 GMT
# print(data['daily']['data'][1])
# #temp. Answer: 8
# print(len(data['daily']['data']))
# #temp. Answer: ['windSpeed', 'time', 'sunsetTime', 'precipIntensityMaxTime', 'apparentTemperatureMax', 'windBearing',
# #'temperatureMinTime', 'precipIntensityMax', 'precipProbability', 'sunriseTime', 'temperatureMin',
# #'apparentTemperatureMaxTime', 'precipIntensity', 'apparentTemperatureMinTime', 'temperatureMax', 'dewPoint',
# #'temperatureMaxTime', 'icon', 'moonPhase', 'precipType', 'visibility', 'cloudCover', 'pressure',
# #'apparentTemperatureMin', 'ozone', 'humidity', 'summary']
# print(data['daily']['data'][0].keys())
today_moon = data['daily']['data'][0]['moonPhase']
print(100 * (1 - abs(1 - (today_moon * 2))), 'percent of the moon is visible today.')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
4) What's the difference between the high and low temperatures for today?
|
print('The difference between today\'s high and low temperatures is', round(data['daily']['data'][0]['temperatureMax'] - data['daily']['data'][0]['temperatureMin'], 2), 'degrees Fahrenheit.')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
5) Loop through the daily forecast, printing out the next week's worth of predictions. I'd like to know the high temperature for each day, and whether it's hot, warm, or cold, based on what temperatures you think are hot, warm or cold.
|
daily_forecast = data['daily']['data']
print('Starting with today\'s, the forecasts for the next week are for highs of:')
for day in daily_forecast:
if 85 <= day['temperatureMax']:
warmth = 'hot'
elif 70 <= day['temperatureMax'] < 85:
warmth = 'warm'
else:
warmth = 'cold'
print(day['temperatureMax'], 'degrees Fahrenheit, a pretty', warmth, 'day.')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
6) What's the weather looking like for the rest of today in Miami, Florida? I'd like to know the temperature for every hour, and if it's going to have cloud cover of more than 0.5 say "{temperature} and cloudy" instead of just the temperature.
|
fl_url = 'https://api.forecast.io/forecast/' + apikey + '/' + coordinates['Miami']
fl_response = requests.get(url)
fl_data = fl_response.json()
# #temp. Answer: dict
# print(type(fl_data['hourly']))
# #temp. Answer: ['summary', 'data', 'icon']
# print(fl_data['hourly'].keys())
# #temp. Answer: list
# print(type(fl_data['hourly']['data']))
# #temp. Answer: 49
# print(len(fl_data['hourly']['data']))
# #temp. It's a list of dictionaries
# #the top of this hour
# print(fl_data['hourly']['data'][0])
# #the top of next hour
# print(fl_data['hourly']['data'][1])
# #temp. Answer: ['precipType', 'time', 'apparentTemperature', 'windSpeed', 'icon', 'summary', 'precipProbability',
# #'visibility', 'cloudCover', 'pressure', 'windBearing', 'ozone', 'humidity', 'precipIntensity', 'temperature',
# #'dewPoint']
# print(fl_data['hourly']['data'][0].keys())
# # how many hours are left in the day in EDT: (24 - ((time % 86400)/3600 - 4))
# times = [1465423200, 1465426800]
# for time in times:
# print (24 - ((time % 86400)/3600 - 4))
hourly_data = fl_data['hourly']['data']
hours_left = range(int(24 - ((hourly_data[0]['time'] % 86400)/3600 - 4)))
print('Starting with this hour, the hourly forecasts for the rest of the day are for:')
for hour in hours_left:
if hourly_data[hour]['cloudCover'] > .5:
print(hourly_data[hour]['temperature'], 'degrees Fahrenheit and cloudy')
else:
print(hourly_data[hour]['temperature'], 'degrees Fahrenheit')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
7) What was the temperature in Central Park on Christmas Day, 1980? How about 1990? 2000?
Tip: You'll need to use UNIX time, which is the number of seconds since January 1, 1970. Google can help you convert a normal date!
Tip: You'll want to use Forecast.io's "time machine" API at https://developer.forecast.io/docs/v2
|
decades = range(3)
for decade in decades:
cp_url = 'https://api.forecast.io/forecast/' + apikey + '/' + coordinates['Central Park'] + ',' + str(10 * decade + 1980) + '-12-25T12:00:00'
cp_response = requests.get(cp_url)
cp_data = cp_response.json()
print('On Christmas Day in', str(1980 + decade * 10) + ', the high in Central Park was', cp_data['daily']['data'][0]['temperatureMax'], 'degrees Fahrenheit.')
|
06/homework-6-schuetz_graded.ipynb
|
raschuetz/foundations-homework
|
mit
|
Slicing our Corpus
To generate a time-variant network, we must first slice our Corpus temporally. Many research questions about social networks like coauthor networks involve how nodes recruit new neighbors. To look at this in the context of our dataset, we'll want to keep old nodes and edges around even if they don't show up in more recent slices. So we'll do a simple 1-year time-period slice, but with cumulative=True.
Note: In early versions of Tethne, slice() performed indexing on the spot, and stored the results in memory. As of v0.7.x, slice() returns a generator over slices. slice() also no longer operates on non-date fields, since this functionality is already provided by index().
|
MyCorpus.slice(window_size=3, cumulative=True)
|
.ipynb_checkpoints/4. Time-variant networks-checkpoint.ipynb
|
diging/tethne-notebooks
|
gpl-3.0
|
The following code builds a collection of co-authorship networks, using a 3-year cumulative time-window.
|
MyGraphCollection = GraphCollection(MyCorpus, coauthors, slice_kwargs={'window_size': 5, 'step_size': 2})
|
.ipynb_checkpoints/4. Time-variant networks-checkpoint.ipynb
|
diging/tethne-notebooks
|
gpl-3.0
|
Analyzing time-variant networks
The GraphCollection makes it easy to apply algorithms from NetworkX across the whole time-variant network (i.e. to all graphs in the GraphCollection).
The method analyze applies an algorithm to all of the graphs in the GraphCollection.
|
dc = MyGraphCollection.analyze('degree_centrality')
dc[1986].items()[20:30]
bcentrality = MyGraphCollection.analyze('betweenness_centrality')
|
.ipynb_checkpoints/4. Time-variant networks-checkpoint.ipynb
|
diging/tethne-notebooks
|
gpl-3.0
|
Some algorithms, like "degree_centrality" and "betweenness_centrality" return a value for each node in each graph. In that case, the nodes in each graph are updated with those values.
|
MyGraphCollection[2008].nodes(data=True)[15:17] # Shows the attributes for two of the nodes in the 2008 graph.
|
.ipynb_checkpoints/4. Time-variant networks-checkpoint.ipynb
|
diging/tethne-notebooks
|
gpl-3.0
|
The method plot_attr_distribution can help to visualize the results of an algorithm across the graphs in the GraphCollection. In the example below, attr='degree_centrality' selects the degree_centrality attribute, etype='node' indicates that the attribute belongs to nodes (not edges), and stat=mean specifies that the Python function mean should be applied to the collection of values in each graph.
We can use node_history to look at how the attribute of a particular node changes across graphs. In the example below, the specified node appears first in 2008, and its centrality increases through 2011.
|
node_id = MyGraphCollection.node_lookup[(u'WARWICK', u'SI')]
warwick_centrality = MyGraphCollection.node_history(node_id, 'degree_centrality')
warwick_centrality.items()[:20] # First 20 years.
plt.plot(warwick_centrality.keys(), warwick_centrality.values(), 'ro')
plt.ylabel('Degree Centrality')
plt.show()
|
.ipynb_checkpoints/4. Time-variant networks-checkpoint.ipynb
|
diging/tethne-notebooks
|
gpl-3.0
|
Algebraic Equations
Write a function that computes the quadratic equation.
|
def quadratic():
return ???
quadratic()
|
tutorial_exercises/Advanced-Solvers.ipynb
|
leosartaj/scipy-2016-tutorial
|
bsd-3-clause
|
Write a function that computes the general solution to the cubic $x^3 + ax^2 + bx + c$.
|
def cubic():
return ???
cubic()
|
tutorial_exercises/Advanced-Solvers.ipynb
|
leosartaj/scipy-2016-tutorial
|
bsd-3-clause
|
The API needs a file APIKEY with your API key in the work folder. We initialize a datahub and dataset objects.
|
dh = datahub.datahub(server='api.planetos.com',version='v1')
ds = dataset.dataset('ncep_cfsv2', dh, debug=False)
ds.vars=variables.variables(ds.variables(), {'reftimes':ds.reftimes,'timesteps':ds.timesteps},ds)
|
api-examples/CFSv2_usage_example.ipynb
|
planet-os/notebooks
|
mit
|
In order to the automatic location selection to work, add your custom location to the API_client.python.lib.predef_locations file.
|
for locat in ['Võru']:
ds.vars.Convective_Precipitation_Rate_surface.get_values(count=1000, location=locat, reftime='2018-04-20T18:00:00',
reftime_end='2018-05-02T18:00:00')
ds.vars.Maximum_temperature_height_above_ground.get_values(count=1000, location=locat, reftime='2018-04-20T18:00:00',
reftime_end='2018-05-02T18:00:00')
## uncomment following line to see full pandas table
## ds.vars.Convective_Precipitation_Rate_surface.values['Võru']
|
api-examples/CFSv2_usage_example.ipynb
|
planet-os/notebooks
|
mit
|
Here we clean the table just a bit and create time based index.
|
ddd = ds.vars.Convective_Precipitation_Rate_surface.values['Võru'][['reftime','time','Convective_Precipitation_Rate_surface']]
dd_test=ddd.set_index('time')
|
api-examples/CFSv2_usage_example.ipynb
|
planet-os/notebooks
|
mit
|
Next, we resample the data to 1-month totals.
|
reft_unique = ds.vars.Convective_Precipitation_Rate_surface.values['Võru']['reftime'].unique()
nf = []
for reft in reft_unique:
abc = dd_test[dd_test.reftime==reft].resample('M').sum()
abc['Convective_Precipitation_Rate_surface'+'_'+reft.astype(str)] = \
abc['Convective_Precipitation_Rate_surface']*6*3600
del abc['Convective_Precipitation_Rate_surface']
nf.append(abc)
nf2=pd.concat(nf,axis=1)
# uncomment to see full pandas table
nf2
|
api-examples/CFSv2_usage_example.ipynb
|
planet-os/notebooks
|
mit
|
Finally, we are visualizing the monthly precipitation for each different forecast, in a single plot.
|
fig=plt.figure(figsize=(10,8))
nf2.transpose().boxplot()
plt.ylabel('Monthly precipitation mm')
fig.autofmt_xdate()
plt.show()
|
api-examples/CFSv2_usage_example.ipynb
|
planet-os/notebooks
|
mit
|
Si se fijan, no hay ningún close en esa porción de código, pero sin embargo al salir del bloque que encierra el with, el archivo se encuentra cerrado sin importar si salió exitosamente o con una excepción
Pickles
Los pickles son una forma de guardar estructuras de datos complejas y recuperarlas fácilmente, sin necesidad de convertirlas a texto y luego parsearlas:
Ejemplo 1: Guardar de a un elemento
|
import pickle # Importo la biblioteca necesaria
# Creo la variable archivo
with open('ejemplo.pkl', 'wb') as archivo:
pkl = pickle.Pickler(archivo) # Creo mi punto de acceso a los datos a partir del archivo
lista1 = [1, 2, 3]
lista2 = [4, 5]
diccionario = {'campo1': 1, 'campo2': 'dos'}
pkl.dump(lista1) # Guardo la lista1 de [1, 2, 3]
pkl.dump(None) # Guardo el valor None
pkl.dump(lista2)
pkl.dump('Hola mundo')
pkl.dump(diccionario)
pkl.dump(1)
|
.ipynb_checkpoints/Clase 06 - Archivos binarios, Apareo de archivos-checkpoint.ipynb
|
gsorianob/fiuba-python
|
apache-2.0
|
Para leer de un archivo pickle no puedo usar el método readline que usa la estructura for, por lo que no me queda otra que siempre intentar leer y cuando lance una excepción del tipo EOFError dejar de hacerlo.
|
with open('ejemplo.pkl', 'rb') as archivo:
seguir_leyendo = True
while seguir_leyendo:
try:
data = pickle.load(archivo) # Leo del archivo un elemento
except EOFError:
seguir_leyendo = False
else:
print '### Esta línea no es del archivo ###'
print data
|
.ipynb_checkpoints/Clase 06 - Archivos binarios, Apareo de archivos-checkpoint.ipynb
|
gsorianob/fiuba-python
|
apache-2.0
|
Ejemplo 2: Guardo una lista de elementos
Así como guardo de a un elemento por vez, también puedo guardar una lista que tenga todos los elementos que tenga en memoria:
|
lista = [ # Creo la lista que quiero guardar
{'usuario': 'usuario1', 'puntaje': 5},
{'usuario': 'usuario2', 'puntaje': 3},
{'usuario': 'usuario3', 'puntaje': 1},
]
# Guardo la lista en el archivo
with open('ejemplo_2.pkl', 'wb') as archivo:
pkl = pickle.Pickler(archivo)
pkl.dump(lista)
# Leo del archivo
with open('ejemplo_2.pkl', 'rb') as archivo:
data = pickle.load(archivo)
print data # y muestro su contenido
|
.ipynb_checkpoints/Clase 06 - Archivos binarios, Apareo de archivos-checkpoint.ipynb
|
gsorianob/fiuba-python
|
apache-2.0
|
First, notice that the FullAdder is a subclass of Circuit. All Magma circuits are classes in python.
Second, the attribute IO defines the interface to the circuit.
IO is a list of alternating keys and values.
The key is the name of the argument, and the value is the type.
In this circuit, all the inputs and outputs have Magma type Bit.
We also qualify each type as an input or an output using the functions In and Out.
Third, we provide a function definition. definition must be a class method and this is indicated with the decorator @classmethod.
The purpose of the definition function is to create the actual full adder circuit.
The arguments are passed to definition as the object io.
This object has fields for each argument in the interface.
The body of definition calls our previously defined python function fulladder.
Note that when we call the python function fulladder inside definition
it is passed Magma values not standard python values.
When we tested fulladder sbove we called it with ints.
When we called it inside definition the values passed to the Python fulladder function
are Magma values of type Bit.
The Python bitwise operators are overloaded to compute logical functions of the Magma values (this corresponds to constructing the circuits to compute logical functions and, or, and xor, and wiring inputs to outputs).
fulladder returns two values.
These values are assigned to the python variables O and COUT.
Remember that assigning to a Python variable
sets the variable to refer to the object.
Magma values are Python objects,
so assigning an object to a variable creates a reference to that Magma value.
In order to complete the definition of the circuit,
O and COUT need to be wired to the outputs in the interface.
The python <= operator is overloaded to perform wiring.
Next we simulate the circuit and compare the results to the python function fulladder.
|
from magma.simulator import PythonSimulator
fulladder_magma = PythonSimulator(FullAdder)
assert fulladder_magma(1, 0, 0) == fulladder(1, 0, 0), "Failed"
assert fulladder_magma(0, 1, 0) == fulladder(0, 1, 0), "Failed"
assert fulladder_magma(1, 1, 0) == fulladder(1, 1, 0), "Failed"
assert fulladder_magma(1, 0, 1) == fulladder(1, 0, 1), "Failed"
assert fulladder_magma(1, 1, 1) == fulladder(1, 1, 1), "Failed"
print("Success!")
|
notebooks/tutorial/coreir/coreir-tutorial/full_adder.ipynb
|
phanrahan/magmathon
|
mit
|
Here is another way to test the circuit.
We define a set of test vectors and plot them in python.
|
from magma.waveform import waveform
test_vectors_raw = [
[0, 0, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 1, 1]
]
waveform(test_vectors_raw, ["a", "b", "cin", "sum", "cout"])
|
notebooks/tutorial/coreir/coreir-tutorial/full_adder.ipynb
|
phanrahan/magmathon
|
mit
|
We can use the simulator to also generate a set of test vectors.
|
from fault.test_vectors import generate_simulator_test_vectors
from bit_vector import BitVector
test_vectors = [
[BitVector(x) for x in test_vector]
for test_vector in test_vectors_raw
]
tests = generate_simulator_test_vectors(FullAdder, flatten=False)
|
notebooks/tutorial/coreir/coreir-tutorial/full_adder.ipynb
|
phanrahan/magmathon
|
mit
|
Finally, compare the simulated test vectors to the expected values.
|
print( "Success" if tests == test_vectors else "Failure" )
|
notebooks/tutorial/coreir/coreir-tutorial/full_adder.ipynb
|
phanrahan/magmathon
|
mit
|
The last step we will do is generate coreir and verilog for the full adder circuit.
|
m.compile("build/FullAdder", FullAdder, output="coreir")
%cat build/FullAdder.json
m.compile("build/FullAdder", FullAdder, output="coreir-verilog")
%cat build/FullAdder.v
|
notebooks/tutorial/coreir/coreir-tutorial/full_adder.ipynb
|
phanrahan/magmathon
|
mit
|
Fast Fourier Transform snippets
Documentation
Numpy implementation: http://docs.scipy.org/doc/numpy/reference/routines.fft.html
Scipy implementation: http://docs.scipy.org/doc/scipy/reference/fftpack.html
Import directives
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
|
python_numpy_fourier_transform_en.ipynb
|
jdhp-docs/python-notebooks
|
mit
|
Make data
|
pattern = np.zeros((4, 4))
pattern[1:3,1:3] = 1
pattern
signal = np.tile(pattern, (2, 2))
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
ax.imshow(signal, interpolation='nearest', cmap=cm.gray)
|
python_numpy_fourier_transform_en.ipynb
|
jdhp-docs/python-notebooks
|
mit
|
Fourier transform with Numpy
Do the fourier transform
|
transformed_signal = np.fft.fft2(signal)
#transformed_signal
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
ax.imshow(abs(transformed_signal), interpolation='nearest', cmap=cm.gray)
|
python_numpy_fourier_transform_en.ipynb
|
jdhp-docs/python-notebooks
|
mit
|
Filter
|
max_value = np.max(abs(transformed_signal))
filtered_transformed_signal = transformed_signal * (abs(transformed_signal) > max_value*0.5)
#filtered_transformed_signal[6, 6] = 0
#filtered_transformed_signal[2, 2] = 0
#filtered_transformed_signal[2, 6] = 0
#filtered_transformed_signal[6, 2] = 0
#filtered_transformed_signal[1, 6] = 0
#filtered_transformed_signal[6, 1] = 0
#filtered_transformed_signal[1, 2] = 0
#filtered_transformed_signal[2, 1] = 0
#filtered_transformed_signal
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
ax.imshow(abs(filtered_transformed_signal), interpolation='nearest', cmap=cm.gray)
|
python_numpy_fourier_transform_en.ipynb
|
jdhp-docs/python-notebooks
|
mit
|
Do the reverse transform
|
filtered_signal = np.fft.ifft2(filtered_transformed_signal)
#filtered_signal
fig = plt.figure(figsize=(16.0, 10.0))
ax = fig.add_subplot(111)
ax.imshow(abs(filtered_signal), interpolation='nearest', cmap=cm.gray)
#shifted_filtered_signal = np.fft.ifftshift(transformed_signal)
#shifted_filtered_signal
#shifted_transformed_signal = np.fft.fftshift(transformed_signal)
#shifted_transformed_signal
|
python_numpy_fourier_transform_en.ipynb
|
jdhp-docs/python-notebooks
|
mit
|
Isotherm display
To generate a quick plot of an isotherm, call the plot() function. The parameters to this function are the same as pygaps.plot_iso.
|
isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41')
ax = isotherm.plot()
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
Isotherm plotting and comparison
For more complex plots of multiple isotherms, the pygaps.plot_iso function is provided. Several examples of isotherm plotting are presented here:
A logarithmic isotherm graph comparing the adsorption branch of two isotherms up to 1 bar (x_range=(None, 1)).
The isotherms are measured on the same material and batch, but at different temperatures,
so we want this information to be visible in the legend (lgd_keys=[...]).
We also want the loading to be displayed in cm3 STP (loading_unit="cm3(STP)") and to select the colours manually (color=[...]).
|
import pygaps.graphing as pgg
ax = pgg.plot_iso(
isotherms_isosteric,
branch = 'ads',
logx = True,
x_range=(None,1),
lgd_keys=['temperature'],
loading_unit='cm3(STP)',
color=['b', 'r', 'g']
)
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
A black and white (color=False) full scale graph of both adsorption and desorption branches of an
isotherm (branch = 'all'), saving it to the local directory for a publication (save_path=path). The result file is found here. We also display the isotherm points using X markers (marker=['x']) and set the figure title (fig_title='Novel Behaviour').
|
import pygaps.graphing as pgg
from pathlib import Path
path = Path.cwd() / 'novel.png'
isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41')
ax = pgg.plot_iso(
isotherm,
branch = 'all',
color=False,
save_path=path,
marker=['x'],
)
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
A graph which plots the both the loading and enthalpy as a function of pressure on the left
and the enthalpy as a function of loading on the right, for a microcalorimetry experiment.
To do this, we separately generate the axes and pass them in to the plot_iso function (ax=ax1).
We want the legend to appear inside the graph (lgd_pos='inner') and, to limit the range of enthalpy
displayed to 40 kJ (either y2_range or y1_range, depending on where it is displayed).
Finally, we want to manually control the size of the pressure and enthalpy markers (y1_line_style=dict(markersize=0)).
|
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
pgg.plot_iso(
isotherms_calorimetry[1],
ax=ax1,
x_data='pressure',
y1_data='loading',
y2_data='enthalpy',
lgd_pos='lower right',
y2_range=(0,40),
y1_line_style=dict(markersize=0),
y2_line_style=dict(markersize=3),
)
pgg.plot_iso(
isotherms_calorimetry[1],
ax=ax2,
x_data='loading',
y1_data='enthalpy',
y1_range=(0,40),
lgd_pos='best',
marker=['^'],
y1_line_style=dict(linewidth=0)
)
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
A comparison graph of all the nitrogen isotherms, with both branches shown but without adding the desorption branch to the label (branch='all-nol'). We want each isotherm to use a different marker (marker=len(isotherms)) and to not display the desorption branch component of the legend (only lgd_keys=['material']).
|
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
lgd_keys=['material'],
marker=len(isotherms_n2_77k)
)
ax.set_title("Regular isotherms colour")
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
A black and white version of the same graph (color=False), but with absolute pressure in bar.
|
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
color=False,
lgd_keys=['material'],
pressure_mode='absolute',
pressure_unit='bar',
)
ax.set_title("Black and white")
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
Only some ranges selected for display from all the isotherms (x_range=(0.2, 0.6) and y1_range=(3, 10)).
|
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
x_range=(0.2, 0.6),
y1_range=(3, 10),
lgd_keys=['material']
)
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
The isosteric pressure isotherms, in relative pressure mode and loading in cm3(STP). No markers
are displayed (marker=False).
|
ax = pgg.plot_iso(
isotherms_isosteric,
branch='ads',
pressure_mode='relative',
loading_unit='cm3(STP)',
lgd_keys=['adsorbate', 'temperature'],
marker=False
)
ax.set_title("Different pressure mode or units")
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
Only desorption branch of some isotherms (branch='des'), displaying the user who recorded the isotherms
in the graph legend.
|
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='des',
lgd_keys=['material', 'user'],
lgd_pos='out bottom',
)
ax.set_title("Only desorption branch")
|
docs/examples/plotting.ipynb
|
pauliacomi/pyGAPS
|
mit
|
plot_images() is used to plot several images in the same figure. It supports many configurations and has many options available to customize the resulting output. The function returns a list of matplotlib axes, which can be used to further customize the figure. Some examples are given below.
Default usage
A common usage for plot_images() is to view the different slices of a multidimensional image (a hyperimage):
|
import scipy.ndimage
image = hs.signals.Signal2D(np.random.random((2, 3, 512, 512)))
for i in range(2):
for j in range(3):
image.data[i,j,:] = scipy.misc.ascent()*(i+0.5+j)
axes = image.axes_manager
axes[2].name = "x"
axes[3].name = "y"
axes[2].units = "nm"
axes[3].units = "nm"
image.metadata.General.title = 'multi-dimensional Lena'
hs.plot.plot_images(image, tight_layout=True)
|
hyperspy/tests/drawing/test_plot_image.ipynb
|
vidartf/hyperspy
|
gpl-3.0
|
Specified labels
By default, plot_images() will attempt to auto-label the images based on the Signal titles. The labels (and title) can be customized with the label and suptitle arguments. In this example, the axes labels and ticks are also disabled with axes_decor:
|
import scipy.ndimage
image = hs.signals.Signal2D(np.random.random((2, 3, 512, 512)))
for i in range(2):
for j in range(3):
image.data[i,j,:] = scipy.misc.ascent()*(i+0.5+j)
axes = image.axes_manager
axes[2].name = "x"
axes[3].name = "y"
axes[2].units = "nm"
axes[3].units = "nm"
image.metadata.General.title = 'multi-dimensional Lena'
hs.plot.plot_images(image, suptitle='Custom figure title',
label=['Signal2D 1', 'Signal2D 2', 'Signal2D 3', 'Signal2D 4', 'Signal2D 5', 'Signal2D 6'],
axes_decor=None, tight_layout=True)
|
hyperspy/tests/drawing/test_plot_image.ipynb
|
vidartf/hyperspy
|
gpl-3.0
|
List of images
plot_images() can also be used to easily plot a list of Images, comparing different Signals, including RGB images. This example also demonstrates how to wrap labels using labelwrap (for preventing overlap) and using a single colorbar for all the Images, as opposed to multiple individual ones:
|
import scipy.ndimage
# load red channel of raccoon as an image
image0 = hs.signals.Signal2D(scipy.misc.face()[:,:,0])
image0.metadata.General.title = 'Rocky Raccoon - R'
axes0 = image0.axes_manager
axes0[0].name = "x"
axes0[1].name = "y"
axes0[0].units = "mm"
axes0[1].units = "mm"
# load lena into 2x3 hyperimage
image1 = hs.signals.Signal2D(np.random.random((2, 3, 512, 512)))
image1.metadata.General.title = 'multi-dimensional Ascent'
for i in range(2):
for j in range(3):
image1.data[i,j,:] = scipy.misc.ascent()*(i+0.5+j)
axes1 = image1.axes_manager
axes1[2].name = "x"
axes1[3].name = "y"
axes1[2].units = "nm"
axes1[3].units = "nm"
# load green channel of raccoon as an image
image2 = hs.signals.Signal2D(scipy.misc.face()[:,:,1])
image2.metadata.General.title = 'Rocky Raccoon - G'
axes2 = image2.axes_manager
axes2[0].name = "x"
axes2[1].name = "y"
axes2[0].units = "mm"
axes2[1].units = "mm"
# load rgb image
rgb = hs.signals.Signal1D(scipy.misc.face())
rgb.change_dtype("rgb8")
rgb.metadata.General.title = 'RGB'
axesRGB = rgb.axes_manager
axesRGB[0].name = "x"
axesRGB[1].name = "y"
axesRGB[0].units = "nm"
axesRGB[1].units = "nm"
hs.plot.plot_images([image0, image1, image2, rgb], tight_layout=True,
#colorbar='single',
labelwrap=20)
|
hyperspy/tests/drawing/test_plot_image.ipynb
|
vidartf/hyperspy
|
gpl-3.0
|
Real-world use
Another example for this function is plotting EDS line intensities. Using a spectrum image with EDS data, one can use the following commands to get a representative figure of the line intensities. This example also demonstrates changing the colormap (with cmap), adding scalebars to the plots (with scalebar), and changing the padding between the images. The padding is specified as a dictionary, which is used to call matplotlib.figure.Figure.subplots_adjust() (see documentation).
Note, this padding can also be changed interactively by clicking on the subplots_adjust button (<img src="plot_images_subplots.png" style="display:inline-block;vertical-align:bottom">) in the GUI (button may be different when using different graphical backends).
The sample and the data used are described in
P. Burdet, et al., Acta Materialia, 61, p. 3090-3098 (2013) (see http://infoscience.epfl.ch/record/185861/).
Further information is available in the Hyperspy EDS tutorial:
http://nbviewer.ipython.org/github/hyperspy/hyperspy-demos/blob/master/electron_microscopy/EDS/Hyperpsy_EDS_TEM_tutorial_CAM_2015.ipynb
|
from urllib.request import urlretrieve
url = 'http://cook.msm.cam.ac.uk//~hyperspy//EDS_tutorial//'
urlretrieve(url + 'core_shell.hdf5', 'core_shell.hdf5')
si_EDS = hs.load("core_shell.hdf5")
im = si_EDS.get_lines_intensity()
hs.plot.plot_images(
im, tight_layout=True, cmap='RdYlBu_r', axes_decor='off',
colorbar='single', scalebar='all',
scalebar_color='black', suptitle_fontsize=16,
padding={'top':0.8, 'bottom':0.10, 'left':0.05,
'right':0.85, 'wspace':0.20, 'hspace':0.10})
# cleanup
!rm core_shell.hdf5
|
hyperspy/tests/drawing/test_plot_image.ipynb
|
vidartf/hyperspy
|
gpl-3.0
|
Transform Data
Dataset values are all of type "object" => convert to numeric types.
Label Encoder - replaces strings with an incrementing integer.
|
df = pd.DataFrame(dataset_part.data)
df.head(1)
df = df.apply(pd.to_numeric, errors='ignore')
# Example from http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html
'''
le = preprocessing.LabelEncoder()
le.fit(list(names))
# le.classes_ # Shows all labels.
print(le.transform([b'icmpeco_iSF', b'icmpecr_iSF', b'icmpred_iSF']) )
print(le.inverse_transform([0, 0, 1, 2]))
'''
# https://datascience.stackexchange.com/questions/16728/could-not-convert-string-to-float-error-on-kddcup99-dataset
for column in df.columns:
if df[column].dtype == object:
le = preprocessing.LabelEncoder()
df[column] = le.fit_transform(df[column])
df.head(1) # All strings removed.
|
2018-06-24-sklearn-KDDCUP99.ipynb
|
whiterd/Tutorial-Notebooks
|
mit
|
Preprocessing Data
|
X = df.values
le = preprocessing.LabelEncoder()
y = le.fit_transform(dataset_part.target)
y_dict = dict(zip(y,le.classes_)) # Saved for later lookup.
# Test options and evaluation metric
N_SPLITS = 7
SCORING = 'accuracy'
# Split-out validation dataset
test_size=0.33
SEED = 42
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=SEED)
|
2018-06-24-sklearn-KDDCUP99.ipynb
|
whiterd/Tutorial-Notebooks
|
mit
|
Train Model
|
# Algorithms
models = [
#('LR', LogisticRegression()),
('LDA', LinearDiscriminantAnalysis()),
#('KNN', KNeighborsClassifier()),
#('KMN', KMeans()),
#('CART', DecisionTreeClassifier()),
#('NB', GaussianNB()),
]
# evaluate each model in turn
results = []
names = []
print('{:8}{:^8}{:^8}'.format('Model','mean','std'))
print('-' * 23)
for name, model in models:
kfold = KFold(n_splits=N_SPLITS, random_state=SEED)
%timeit -n1 cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=SCORING)
results.append(cv_results)
names.append(name)
print('{:8}{:^8.2%}{:^8.2%}'.format(name, cv_results.mean(), cv_results.std()))
print(*cv_results)
previous_results = '''
LR: 98.87% (0.10%)
LDA: 99.49% (0.05%)
KNN: 99.84% (0.01%) <-- slow
CART: 99.94% (0.00%)
NB: 93.96% (0.96%)
SVM: <-- very slow
'''
# Compare Algorithms
fig = plt.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
plt.boxplot(results)
ax.set_xticklabels(y)
plt.show()
|
2018-06-24-sklearn-KDDCUP99.ipynb
|
whiterd/Tutorial-Notebooks
|
mit
|
Use model to make predictions:
|
test = [0, 1, 22, 9, 181, 5450, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 8, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 9, 9, 1.0, 0.0, 0.11, 0.0, 0.0, 0.0, 0.0, 0.0]
print(neigh.predict([test]))
print(neigh.predict_proba([test])) # TODO: research this.
|
2018-06-24-sklearn-KDDCUP99.ipynb
|
whiterd/Tutorial-Notebooks
|
mit
|
Sources
[1] - KDD Cup 99 dataset
[2] - M. Tavallaee, E. Bagheri, W. Lu, and A. Ghorbani, “A Detailed Analysis of the KDD CUP 99 Data Set,” Submitted to Second IEEE Symposium on Computational Intelligence for Security and Defense Applications (CISDA), 2009. link
Other Resources
PySpark solution to the KDDCup99
link
Logs
Public PCAP files for PCAP-based evaluation of network-based intrusion detection system (NIDS) evaluation.
The Cyber Research Center - DataSets - ITOC CDX (2009)
Labelled datasets
UNB ISCX (2012-) datasets contain a range of "sophisticated" intrusion attacks, botnets and DoS attacks.
CSIC 2010 HTTP Dataset in CSV format (for Weka Analysis) dataset is from a web penetration testing testbed for anomaly detection training.
Attack Challenge - ECML/PKDD Workshop (2007) dataset contains web penetration testing data.
NSL-KDD Data Set (2007) intended to replace the DARPA KDDCup99 dataset for IDS.
gureKddcup data base (2008) intended to replace the DARPA KDDCup99 dataset for IDS.
CTU-13 dataset - pcap files (Stratosphere IPS).
Where to go from here
Seeking more labelled datasets and determining the potential for other non-labelled datasets.
|
print('{:10}{:10}{:10}'.format('Model','mean','std'))
print('LDA: 99.49% (0.05%)')
print('{:8}{:^8}{:^8}'.format('Model','mean','std'))
print('-' * 23)
print('{:8}{:^8.2%}{:^8.2%}'.format('LDA', .9949, .0005))
|
2018-06-24-sklearn-KDDCUP99.ipynb
|
whiterd/Tutorial-Notebooks
|
mit
|
Leggere un file csv con Pandas
Un file csv è un file composto di record di campi separati da , di cui il primo è il record di intestazione che specifica il nome di ognuno dei campi dei record seguenti che contengono i dati.
Funzione read_csv() per leggere un file csv:
df = pd.read_csv(csv_file_name)
df è il riferimento a oggetto di tipo DataFrame
Ad esempio leggiamo il file 2017-german-election-overall.csv
Pandas riconosce automaticamente il file di input come file in formato csv e riconosce i tipi di dati in esso contenuti.
Ogni colonna della tabella contiene dati omogenei.
Ad ognuno dei record Pandas associa un indice progressivo che ha il ruolo di chiave primaria.
|
df = pd.read_csv("./2017-german-election-overall.csv")
type(df)
df
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ottenere informazioni sul data frame
informazioni generali sul data frame
df.info()
statistiche generali sul data frame
df.describe()
|
df.info()
df.describe()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ottenere la copia di un data frame
df.copy()
|
df.copy()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Variabili shape e columns
shape, tupla contenente il numero di righe e numero di colonne del data frame
columns, oggetto Index che contiene i nomi delle colonne del data frame
|
df.shape
list(df.columns)
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Cambiare i nomi delle colonne
df.rename(columns = name_dict, inplace = True|False)
name_dict, dizionario che mappa un nome a un nuovo nome
|
df.rename(columns = {'registered.voters':'registered_voters', 'area_names':'area'}, inplace = True)
df
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Rimuovere colonne
df.drop(column_list, axis = 1, inplace = True|False)
|
df.drop(['invalid_second_votes', 'valid_second_votes'], axis=1, inplace = True)
df
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Rimuovere righe per indice
df.drop(index_list, axis = 0, inplace = True|False)
|
df.drop([7,9,12], axis=0, inplace = True)
df
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ottenere le prime/ultime righe
df.head(n)
df.tail(n)
|
df.head(8)
df.tail(8)
df.head()
df.tail()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Selezionare righe per posizione (slicing)
df[start_pos:end_pos]
|
df[0:11]
df[:11]
df.head(11)
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Selezionare una colonna
L'espressione:
df[column_name]
restituisce la colonna con nome column_name in un oggetto Series, attraverso cui si possono applicare metodi come max(), min(), count(), var(), std(), mean() etc. oppure describe().
|
type(df['registered_voters'])
df['registered_voters']
df['registered_voters'].describe()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
In alternativa si può usare la notazione con il punto:
df.column_name
|
df.registered_voters.describe()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Selezionare colonne
L'espressione:
df[column_list]
restituisce un data frame con le colonne specificate in column_list, attraverso cui si possono applicare metodi come max(), min(), count(), var(), std(), mean(), corr() etc.
|
type(df[['registered_voters', 'total_votes']])
df[['registered_voters', 'total_votes']].mean()
df[['registered_voters', 'total_votes']].mean()[1]
df[['registered_voters', 'total_votes']].corr()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
NB: i metodi possono anche essere invocati sul dataset intero.
|
df.mean()
df.corr()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Controllare se ci sono valori nulli
Le espressioni:
pd.isnull(df)
df.isnull()
restituiscono un data frame di valori booleani.
|
pd.isnull(df)
df.isnull()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Le espressioni:
pd.isnull(series_obj)
series_obj.isnull()
restituiscono un data frame di valori booleani.
|
df['state'].isnull()
pd.isnull(df['state'])
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Metodo unique()
Il metodo unique() degli oggetti Series restituisce l'array dei valori distinti presenti nell'oggetto invocante.
|
df['state'].unique()
df.state.unique()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Selezionare le righe che verificano una certa condizione
Le istruzioni equivalenti:
mask = df[column_name] cfr_op value
mask = df.column_name cfr_op value
dove cfr_op è un operatore di confronto, assegnano alla variabile mask un oggetto Series di valori booleani in cui l'i-esimo booleano è True se il valore nell'i-esima riga in corrispondenza della colonna column_name verifica l'espressione di confronto.
|
mask = df['state'] == 'Berlin'
mask
mask = df.state == 'Berlin'
mask
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
L'espressione:
df[mask]
restituisce un data frame con le sole righe che corrispondono a un valore True in mask.
|
df[mask]
mask = (df['state'] == 'Berlin') | (df['state'] == 'Bayern')
df[mask]
df[mask][['area', 'registered_voters']]
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ottenere gli indici delle righe che verificano una certa condizione
df[mask].index
|
df[mask][['area', 'registered_voters']].index
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Localizzare righe con iloc[]
L'espressione:
df.iloc[pos_index]
restituisce in un oggetto di tipo Series la riga in posizione di indice pos_index.
|
df.iloc[7]
df.iloc[7]['area']
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
L'espressione:
df.iloc[start_pos_index:end_pos_index]
restituisce in un oggetto di tipo DataFrame tutte le righe dalla posizione di indice start_pos_index a quella di indice end_pos_index-1.
|
df.iloc[7:12]
df.iloc[7:12]['area']
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
L'espressione:
df.iloc[pos_index_list]
restituisce in un oggetto di tipo DataFrame tutte le righe dalla posizione di indice start_pos_index a quella di indice end_pos_index-1.
|
df.iloc[[7, 8, 11, 13]]
df.iloc[[7, 8, 11, 13]]['area']
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Uso di loc[]
accesso a una riga tramite il suo indicedf.loc[index]
|
df.loc[5]
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
accesso a più righe tramite i loro indicidf.loc[[index1, index2, ...]]
|
df.loc[[5,8,10]]
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
accesso a un valore del data framedf.loc[index, column_name]
|
df.loc[5, 'state']
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
accesso a più valori del data framedf.loc[[index1, index2, ...], column_name]
|
df.loc[[5,10,11], 'state']
df.loc[[5,10,11], 'state'] = 'unknown'
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
accesso a più valori del data framedf.loc[[index1, index2, ...], [column_name1, column_name2, ...]]
|
df.loc[[5,10,11], ['area', 'state']]
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
accesso alle righe che verificano una certa condizionedf.loc[mask]
|
df.loc[df['state'] == 'Berlin']
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ottenere un valore tramite un indice con at[]
df.at[index, column_name]
|
df.at[11, 'area']
df.at[11, 'area'] = 'unknown'
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Ordinare valori
Ordinare per valori di una colonna:
df.sort_values(column_name, ascending = True|False, inplace = True|False)
Ordinare per valori di più colonne:
df.sort_values(column_list, ascending = True|False, inplace = True|False)
|
df.sort_values('total_votes', ascending = False)
df.sort_values(['state', 'area'], ascending = True)
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Raggruppare i valori
L'espressione:
df.groupby(column_name)
df.groupby(column_list)
restituisce un oggetto DataFrameGroupBy.
|
df.groupby('state')['registered_voters'].sum()
df.groupby(['state', 'area'])['registered_voters'].sum()
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Aggiungere una colonna
df[new_column] = new_series_obj
|
df['difference'] = df['valid_first_votes'] - df['invalid_first_votes']
df
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Applicare una funzione a un oggetto Series
L'espressione:
series_obj.apply(fun)
applica la funzione fun a tutti i valori in series_obj e restituisce un altro oggetto di tipo Series.
|
df['registered_voters'].apply(lambda x: float(x+1))
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Applicare una funzione a un oggetto DataFrame
L'espressione:
df.applymap(fun)
applica la funzione fun a tutti i valori in df e restituisce un altro oggetto di tipo DataFrame.
|
df[['registered_voters', 'total_votes']].applymap(lambda x: 'votes='+str(x))
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Come iterare i record di un data frame
for (index, record) in df.iterrows():
do_something
|
for (index, record) in df.iterrows():
print(str(index) + ' ' + record['state'])
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Scrivere un data frame su un file in formato csv
df.to_csv(file_name, index=False|True)
|
df.to_csv('./output.csv', index = False)
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Richiamare matplotlib da Pandas
|
df.registered_voters.plot(label="Registered voters", legend=True)
|
laboratorio/lezione11-04nov21/lezione6-pandas.ipynb
|
bioinformatica-corso/lezioni
|
cc0-1.0
|
Kindly ignore the deprecation warnings and incompatibility errors.
Restart the kernel before proceeding further (On the Notebook menu, select Kernel > Restart Kernel > Restart).
Start by importing the necessary libraries for this lab.
|
# Importing necessary modules/libraries such as numpy, pandas and datetime.
import datetime
import os
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import aiplatform
from matplotlib import pyplot as plt
from tensorflow import feature_column as fc
from tensorflow import keras
from tensorflow.keras import Model
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Input, Dense, DenseFeatures, concatenate
from tensorflow.keras.models import Sequential
print(tf.__version__)
%matplotlib inline
# It sets the backend of matplotlib to the 'inline' backend. The output of plotting commands is displayed inline within frontends, directly
# below the code cell that produced it. The resulting plots will then also be stored in the notebook document.
%matplotlib inline
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Use tf.data to read the CSV files
We wrote these functions for reading data from the csv files above in the previous notebook. For this lab we will also include some additional engineered features in our model. In particular, we will compute the difference in latitude and longitude, as well as the Euclidean distance between the pick-up and drop-off locations. We can accomplish this by adding these new features to the features dictionary with the function add_engineered_features below.
Note that we include a call to this function when collecting our features dict and labels in the features_and_labels function below as well.
|
# Selecting specific CSV_COLUMNS, LABEL_COLUMN, DEFAULTS, UNWANTED_COLS.
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key'
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
UNWANTED_COLS = ['pickup_datetime', 'key']
# Create an input function reading a file using the Dataset API
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
for unwanted_col in UNWANTED_COLS:
features.pop(unwanted_col)
return features, label
# Reading CSV files into a dataset.
def create_dataset(pattern, batch_size=1, mode='eval'):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS)
dataset = dataset.map(features_and_labels)
if mode == 'train':
dataset = dataset.shuffle(buffer_size=1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Then, we'll define our custom RMSE evaluation metric and build our wide and deep model.
|
# Here, tf.reduce_mean computes the mean of elements across dimensions of a tensor.
# tf.sqrt Computes element-wise square root of the input tensor.
# tf.square computes square of x element-wise.
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# TODO 3
def build_model(dnn_hidden_units):
# Create the deep part of model
deep = DenseFeatures(deep_columns, name='deep_inputs')(inputs)
for num_nodes in dnn_hidden_units:
deep = Dense(num_nodes, activation='relu')(deep)
# Create the wide part of model
wide = DenseFeatures(wide_columns, name='wide_inputs')(inputs)
# Combine deep and wide parts of the model
combined = concatenate(inputs=[deep, wide], name='combined')
# Map the combined outputs into a single prediction value
output = Dense(units=1, activation=None, name='prediction')(combined)
# Finalize the model
model = Model(inputs=list(inputs.values()), outputs=output)
# Compile the keras model
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
return model
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Next, we can call the build_model to create the model. Here we'll have two hidden layers, each with 10 neurons, for the deep part of our model. We can also use plot_model to see a diagram of the model we've created.
|
HIDDEN_UNITS = [10,10]
# Calling the build model
model = build_model(dnn_hidden_units=HIDDEN_UNITS)
# Converts a Keras plot_model to see a diagram of the model that we have created.
tf.keras.utils.plot_model(model, show_shapes=False, rankdir='LR')
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Next, we'll set up our training variables, create our datasets for training and validation, and train our model.
(We refer you the the blog post ML Design Pattern #3: Virtual Epochs for further details on why express the training in terms of NUM_TRAIN_EXAMPLES and NUM_EVALS and why, in this training code, the number of epochs is really equal to the number of evaluations we perform.)
|
BATCH_SIZE = 1000
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset will repeat, wrap around
NUM_EVALS = 50 # how many times to evaluate
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample
trainds = create_dataset(
pattern='../data/taxi-train*',
batch_size=BATCH_SIZE,
mode='train')
evalds = create_dataset(
pattern='../data/taxi-valid*',
batch_size=BATCH_SIZE,
mode='eval').take(NUM_EVAL_EXAMPLES//1000)
%%time
# Here, %%time prints the wall time for the entire cell
steps_per_epoch = NUM_TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS)
OUTDIR = "./taxi_trained"
shutil.rmtree(path=OUTDIR, ignore_errors=True) # start fresh each time
history = model.fit(x=trainds,
steps_per_epoch=steps_per_epoch,
epochs=NUM_EVALS,
validation_data=evalds,
callbacks=[TensorBoard(OUTDIR)])
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Just as before, we can examine the history to see how the RMSE changes through training on the train set and validation set.
|
RMSE_COLS = ['rmse', 'val_rmse']
# Pandas DataFrame is two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns).
pd.DataFrame(history.history)[RMSE_COLS].plot()
|
courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/4_keras_functional_api.ipynb
|
GoogleCloudPlatform/training-data-analyst
|
apache-2.0
|
Escribir aquí el valor de la longitud de onda seleccionada (que aparece arriba)
longitud de onda $\lambda_0$= nm
Calculamos el espesor más pequeño de la monocapa escribir aquí su valor numérico modificando este texto
espesor1 = nm
Tarea 3. Caracterización del tratamiento antirreflejante en incidencia normal
Vamos a caracterizar la reflectancia de la lente de alto índice con el tratamiento antirreflejante en función de la longitud de onda en el rango del visible. Consideramos el caso de incidencia normal y el espesor calculado en la Tarea 2 (estas son las condiciones que se han empleado en el diseño del tratamiento).
En la siguiente celda aparece el código de programación que calcula y pinta dicha reflectancia (en tanto por ciento).
El texto que aparece después del símbolo # son comentarios.
|
# MODIFICAR LOS DOS PARAMETROS. LUEGO EJECUTAR
########################################################
nL = 1.8 # Incluir el índice de la lente de alto índice
espesor1 = 99.0 # Incluir el valor del espesor de la capa (en nm)
# DESDE AQUÍ NO TOCAR
##############################################################################################################################
%pylab inline
nc = 1.38 # Índice de la monocapa (MgF2)
longitud_de_onda = linspace(400,750,100) # Crea los valores de las longitudes de onda en el visible (en nm)
# Coeficientes de reflexion y transmision
rA = (1-nc)/(1+nc) # Coeficiente de reflexión aire --> monocapa
tA = 2*1/(1+nc) # Coeficiente de transmisión aire --> monocapa
rB = (nc-nL)/(nc+nL) # Coeficiente de reflexión monocapa --> lente
tC = 2*nc/(nc+1) # Coeficiente de transmissión monocapa --> aire
# Desfase y Reflectancia para el espesor mínimo
desfase1 = (2*pi/longitud_de_onda)*2*nc*espesor1 + 0*pi # desfase geométrico + desfase debido a las reflexiones
Reflectancia_tratamiento1 = 100*( rA**2 + (tA*rB*tC)**2 + 2*sqrt( (rA**2)*(tA*rB*tC)**2 )*cos(desfase1) ) # Reflectancia (%)
# Dibujamos la reflectancia en función de la longitud de onda
fig,ax = plt.subplots()
plot(longitud_de_onda,Reflectancia_tratamiento1,lw=2) # Pintamos la reflectancia
xlabel('$\lambda$ (nm)',fontsize=16)
ylabel('Reflectancia (%)',fontsize=16) # Escribimos los nombres de los ejes;
|
TratamientoAntirreflejante/Tratamiento_Antirreflejante_Ejercicio.ipynb
|
ecabreragranado/OpticaFisicaII
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.