markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Iterable dict values:
# Python 2 only: for value in heights.itervalues(): ... # Idiomatic Python 3 for value in heights.values(): # extra memory overhead on Py2 ... # Python 2 and 3: option 1 from builtins import dict heights = dict(Fred=175, Anne=166, Joe=192) for key in heights.values(): # efficient on Py2 and Py3 ......
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Iterable dict items:
# Python 2 only: for (key, value) in heights.iteritems(): ... # Python 2 and 3: option 1 for (key, value) in heights.items(): # inefficient on Py2 ... # Python 2 and 3: option 2 from future.utils import viewitems for (key, value) in viewitems(heights): # also behaves like a set ... # Python 2 a...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
dict keys/values/items as a list dict keys as a list:
# Python 2 only: keylist = heights.keys() assert isinstance(keylist, list) # Python 2 and 3: keylist = list(heights) assert isinstance(keylist, list)
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
dict values as a list:
# Python 2 only: heights = {'Fred': 175, 'Anne': 166, 'Joe': 192} valuelist = heights.values() assert isinstance(valuelist, list) # Python 2 and 3: option 1 valuelist = list(heights.values()) # inefficient on Py2 # Python 2 and 3: option 2 from builtins import dict heights = dict(Fred=175, Anne=166, Joe=192) valu...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
dict items as a list:
# Python 2 and 3: option 1 itemlist = list(heights.items()) # inefficient on Py2 # Python 2 and 3: option 2 from future.utils import listitems itemlist = listitems(heights) # Python 2 and 3: option 3 from future.utils import iteritems # or from six import iteritems itemlist = list(iteritems(heights))
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Custom class behaviour Custom iterators
# Python 2 only class Upper(object): def __init__(self, iterable): self._iter = iter(iterable) def next(self): # Py2-style return self._iter.next().upper() def __iter__(self): return self itr = Upper('hello') assert itr.next() == 'H' # Py2-style assert list(itr) == list...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Custom __str__ methods
# Python 2 only: class MyClass(object): def __unicode__(self): return 'Unicode string: \u5b54\u5b50' def __str__(self): return unicode(self).encode('utf-8') a = MyClass() print(a) # prints encoded string # Python 2 and 3: from future.utils import python_2_unicode_compatible @python_2_unico...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Custom __nonzero__ vs __bool__ method:
# Python 2 only: class AllOrNothing(object): def __init__(self, l): self.l = l def __nonzero__(self): return all(self.l) container = AllOrNothing([0, 100, 200]) assert not bool(container) # Python 2 and 3: from builtins import object class AllOrNothing(object): def __init__(self, l): ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Lists versus iterators xrange
# Python 2 only: for i in xrange(10**8): ... # Python 2 and 3: forward-compatible from builtins import range for i in range(10**8): ... # Python 2 and 3: backward-compatible from past.builtins import xrange for i in xrange(10**8): ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
range
# Python 2 only mylist = range(5) assert mylist == [0, 1, 2, 3, 4] # Python 2 and 3: forward-compatible: option 1 mylist = list(range(5)) # copies memory on Py2 assert mylist == [0, 1, 2, 3, 4] # Python 2 and 3: forward-compatible: option 2 from builtins import range mylist = list(range(5)) assert mylist ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
map
# Python 2 only: mynewlist = map(f, myoldlist) assert mynewlist == [f(x) for x in myoldlist] # Python 2 and 3: option 1 # Idiomatic Py3, but inefficient on Py2 mynewlist = list(map(f, myoldlist)) assert mynewlist == [f(x) for x in myoldlist] # Python 2 and 3: option 2 from builtins import map mynewlist = list(map(f,...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
imap
# Python 2 only: from itertools import imap myiter = imap(func, myoldlist) assert isinstance(myiter, iter) # Python 3 only: myiter = map(func, myoldlist) assert isinstance(myiter, iter) # Python 2 and 3: option 1 from builtins import map myiter = map(func, myoldlist) assert isinstance(myiter, iter) # Python 2 and ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
zip, izip As above with zip and itertools.izip. filter, ifilter As above with filter and itertools.ifilter too. Other builtins File IO with open()
# Python 2 only f = open('myfile.txt') data = f.read() # as a byte string text = data.decode('utf-8') # Python 2 and 3: alternative 1 from io import open f = open('myfile.txt', 'rb') data = f.read() # as bytes text = data.decode('utf-8') # unicode, not bytes # Python 2 and 3: alternative 2 ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
reduce()
# Python 2 only: assert reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) == 1+2+3+4+5 # Python 2 and 3: from functools import reduce assert reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) == 1+2+3+4+5
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
raw_input()
# Python 2 only: name = raw_input('What is your name? ') assert isinstance(name, str) # native str # Python 2 and 3: from builtins import input name = input('What is your name? ') assert isinstance(name, str) # native str on Py2 and Py3
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
input()
# Python 2 only: input("Type something safe please: ") # Python 2 and 3 from builtins import input eval(input("Type something safe please: "))
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Warning: using either of these is unsafe with untrusted input. file()
# Python 2 only: f = file(pathname) # Python 2 and 3: f = open(pathname) # But preferably, use this: from io import open f = open(pathname, 'rb') # if f.read() should return bytes # or f = open(pathname, 'rt') # if f.read() should return unicode text
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
exec
# Python 2 only: exec 'x = 10' # Python 2 and 3: exec('x = 10') # Python 2 only: g = globals() exec 'x = 10' in g # Python 2 and 3: g = globals() exec('x = 10', g) # Python 2 only: l = locals() exec 'x = 10' in g, l # Python 2 and 3: exec('x = 10', g, l)
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
But note that Py3's exec() is less powerful (and less dangerous) than Py2's exec statement. execfile()
# Python 2 only: execfile('myfile.py') # Python 2 and 3: alternative 1 from past.builtins import execfile execfile('myfile.py') # Python 2 and 3: alternative 2 exec(compile(open('myfile.py').read())) # This can sometimes cause this: # SyntaxError: function ... uses import * and bare exec ... # See https://githu...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
unichr()
# Python 2 only: assert unichr(8364) == '€' # Python 3 only: assert chr(8364) == '€' # Python 2 and 3: from builtins import chr assert chr(8364) == '€'
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
intern()
# Python 2 only: intern('mystring') # Python 3 only: from sys import intern intern('mystring') # Python 2 and 3: alternative 1 from past.builtins import intern intern('mystring') # Python 2 and 3: alternative 2 from six.moves import intern intern('mystring') # Python 2 and 3: alternative 3 from future.standard_libr...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
apply()
args = ('a', 'b') kwargs = {'kwarg1': True} # Python 2 only: apply(f, args, kwargs) # Python 2 and 3: alternative 1 f(*args, **kwargs) # Python 2 and 3: alternative 2 from past.builtins import apply apply(f, args, kwargs)
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
chr()
# Python 2 only: assert chr(64) == b'@' assert chr(200) == b'\xc8' # Python 3 only: option 1 assert chr(64).encode('latin-1') == b'@' assert chr(0xc8).encode('latin-1') == b'\xc8' # Python 2 and 3: option 1 from builtins import chr assert chr(64).encode('latin-1') == b'@' assert chr(0xc8).encode('latin-1') == b'\xc8...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
cmp()
# Python 2 only: assert cmp('a', 'b') < 0 and cmp('b', 'a') > 0 and cmp('c', 'c') == 0 # Python 2 and 3: alternative 1 from past.builtins import cmp assert cmp('a', 'b') < 0 and cmp('b', 'a') > 0 and cmp('c', 'c') == 0 # Python 2 and 3: alternative 2 cmp = lambda(x, y): (x > y) - (x < y) assert cmp('a', 'b') < 0 and ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
reload()
# Python 2 only: reload(mymodule) # Python 2 and 3 from imp import reload reload(mymodule)
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Standard library dbm modules
# Python 2 only import anydbm import whichdb import dbm import dumbdbm import gdbm # Python 2 and 3: alternative 1 from future import standard_library standard_library.install_aliases() import dbm import dbm.ndbm import dbm.dumb import dbm.gnu # Python 2 and 3: alternative 2 from future.moves import dbm from future....
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
commands / subprocess modules
# Python 2 only from commands import getoutput, getstatusoutput # Python 2 and 3 from future import standard_library standard_library.install_aliases() from subprocess import getoutput, getstatusoutput
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
subprocess.check_output()
# Python 2.7 and above from subprocess import check_output # Python 2.6 and above: alternative 1 from future.moves.subprocess import check_output # Python 2.6 and above: alternative 2 from future import standard_library standard_library.install_aliases() from subprocess import check_output
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
collections: Counter, OrderedDict, ChainMap
# Python 2.7 and above from collections import Counter, OrderedDict, ChainMap # Python 2.6 and above: alternative 1 from future.backports import Counter, OrderedDict, ChainMap # Python 2.6 and above: alternative 2 from future import standard_library standard_library.install_aliases() from collections import Counter,...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
StringIO module
# Python 2 only from StringIO import StringIO from cStringIO import StringIO # Python 2 and 3 from io import BytesIO # and refactor StringIO() calls to BytesIO() if passing byte-strings
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
http module
# Python 2 only: import httplib import Cookie import cookielib import BaseHTTPServer import SimpleHTTPServer import CGIHttpServer # Python 2 and 3 (after ``pip install future``): import http.client import http.cookies import http.cookiejar import http.server
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
xmlrpc module
# Python 2 only: import DocXMLRPCServer import SimpleXMLRPCServer # Python 2 and 3 (after ``pip install future``): import xmlrpc.server # Python 2 only: import xmlrpclib # Python 2 and 3 (after ``pip install future``): import xmlrpc.client
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
html escaping and entities
# Python 2 and 3: from cgi import escape # Safer (Python 2 and 3, after ``pip install future``): from html import escape # Python 2 only: from htmlentitydefs import codepoint2name, entitydefs, name2codepoint # Python 2 and 3 (after ``pip install future``): from html.entities import codepoint2name, entitydefs, name2c...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
html parsing
# Python 2 only: from HTMLParser import HTMLParser # Python 2 and 3 (after ``pip install future``) from html.parser import HTMLParser # Python 2 and 3 (alternative 2): from future.moves.html.parser import HTMLParser
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
urllib module urllib is the hardest module to use from Python 2/3 compatible code. You may like to use Requests (http://python-requests.org) instead.
# Python 2 only: from urlparse import urlparse from urllib import urlencode from urllib2 import urlopen, Request, HTTPError # Python 3 only: from urllib.parse import urlparse, urlencode from urllib.request import urlopen, Request from urllib.error import HTTPError # Python 2 and 3: easiest option from future.standard...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Tkinter
# Python 2 only: import Tkinter import Dialog import FileDialog import ScrolledText import SimpleDialog import Tix import Tkconstants import Tkdnd import tkColorChooser import tkCommonDialog import tkFileDialog import tkFont import tkMessageBox import tkSimpleDialog import ttk # Python 2 and 3 (after ``pip instal...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
socketserver
# Python 2 only: import SocketServer # Python 2 and 3 (after ``pip install future``): import socketserver
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
copy_reg, copyreg
# Python 2 only: import copy_reg # Python 2 and 3 (after ``pip install future``): import copyreg
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
configparser
# Python 2 only: from ConfigParser import ConfigParser # Python 2 and 3 (after ``pip install future``): from configparser import ConfigParser
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
queue
# Python 2 only: from Queue import Queue, heapq, deque # Python 2 and 3 (after ``pip install future``): from queue import Queue, heapq, deque
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
repr, reprlib
# Python 2 only: from repr import aRepr, repr # Python 2 and 3 (after ``pip install future``): from reprlib import aRepr, repr
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
UserDict, UserList, UserString
# Python 2 only: from UserDict import UserDict from UserList import UserList from UserString import UserString # Python 3 only: from collections import UserDict, UserList, UserString # Python 2 and 3: alternative 1 from future.moves.collections import UserDict, UserList, UserString # Python 2 and 3: alternative 2 fr...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
itertools: filterfalse, zip_longest
# Python 2 only: from itertools import ifilterfalse, izip_longest # Python 3 only: from itertools import filterfalse, zip_longest # Python 2 and 3: alternative 1 from future.moves.itertools import filterfalse, zip_longest # Python 2 and 3: alternative 2 from six.moves import filterfalse, zip_longest # Python 2 and ...
docs/notebooks/Writing Python 2-3 compatible code.ipynb
QuLogic/python-future
mit
Uploaded RH and temp data into Python¶ First I upload my data set(s). I am working with environmental data from different locations in the church at differnet dates. Files include: environmental characteristics (CO2, temperature (deg C), and relative humidity (RH) (%) measurements). I can discard the CO2_2 column valu...
#I import a temp and RH data file env=pd.read_table('../Data/CO2May.csv', sep=',') #assigning columns names env.columns=[['test', 'time','temp C', 'RH %', 'CO2_1', 'CO2_2']] #I display my dataframe env #Plot CO2 plt.plot(env['CO2_1'], color='navy') plt.title('CO2 Concentration Spikes with Chapel Population') plt....
organ_pitch/Scripts/upload_env_data.ipynb
taliamo/Final_Project
mit
Visualizing the expected pitch values by time 1. Plot calculated frequency, CO2 (ppm), and measured frequency values
print(calc_freq) #define variables from dataframe columns CO2_1 = env[['CO2_1']] calc_freq=env[['calc_freq']] #measured_pitch = output_from_'pitch_data.py' #want to set x-axis as date_time #how do I format the ax2 y axis scale def make_plot(variable_1, variable_2): '''Make a three variable plot with two axes'...
organ_pitch/Scripts/upload_env_data.ipynb
taliamo/Final_Project
mit
Here we see the relationship between CO2 concentration in parts per million and the expected changes in pitch. Measured pitch values did not match the time of sampling with that of CO2, so therefore could not be plotted. Measured pitch data would have been "ax3". End of script
#def make_fig(datasets, variable_1, variable_2, savename): #twinx layering ax1=plt.subplot() ax2=ax1.twinx() #plot 2 variables in predertermined plot above ax1.plot(dataset.index, variable_1, 'k-', linewidth=2) ax2.plot(dataset.index, variable_2, ) #moving plots lines variable_2_spine=ax2.spines['right'] variable_2_...
organ_pitch/Scripts/upload_env_data.ipynb
taliamo/Final_Project
mit
Lerne Lineare Regression auf Daten
#Lerne Lineare Regression print "Anzahl der Trainingsinstanzen:\t%d"%(X.shape[0]) print "Anzahl der Features:\t\t%d"%(X.shape[1]) model = LinearRegression() model.fit(X,y) #Plotte Daten und die gelernte Funktion plot_data(X,y,model,interactive=True);
RidgeRegression.ipynb
dominikgrimm/ridge_and_svm
mit
<span style="color:orange">Model beschreibt die zugrundeliegenden Daten nur schlecht -> Model ist Unterangepasst!</span> Polynomiale Regression Polynomiale Regression durch hinzufügen von Features höherer Ordnung, z.B. Polynom des 100. Grades: $\mathbf{y} = b + w_1 \mathbf{x}_1 + w_2 \mathbf{x}_1^2 + w_3 \mathbf{x}_...
#Funktion um eine Polynomielle Regression unterschiedlichen Grades zu plotten def render_polynomial_regression(degree=150): #Lerne Lineare Regression auf polynomiellen Features transformer = PolynomialFeatures(degree=degree, include_bias=False) scaler = StandardScaler() model = LinearRegression() #...
RidgeRegression.ipynb
dominikgrimm/ridge_and_svm
mit
<span style="color:orange">Model beschreibt die Daten zu gut --> Model ist Üeberangepasst und führt zu einer schlechten Generalisierung!</span> Einführung in Ridge Regression Ridge Regression Loss ist definiert als: $\mathcal{L}{Ridge}(\mathbf{w})=\frac{1}{n}\sum{i=1}^n \left[y_i - (b - \mathbf{w}^T \mathbf{x}i) \rig...
#Lerne Ridge Regression auf polynomiellen Features mit alpha=1.1 ridge_regression = Pipeline(( ('make_poly_features',PolynomialFeatures(degree=100, include_bias=False)), ("scale_features",StandardScaler()), ("run_ridgereg",Ridge(alpha=1.1)), )) ridge_regression.fit(X,y) plot_data(X,y,ridge_regression,inte...
RidgeRegression.ipynb
dominikgrimm/ridge_and_svm
mit
<span style="color:green">Optimale Abwägung zwischen zu einfachem und zu komplexem Model durch L2-Regularisierung! </span> Effekt von $\alpha$ auf die Gewichte
#Funktion um den Effekt von alpha auf die Gewichte zu illustrieren def plot_effect_of_alpha(interactive=False): coefs = [] alphas = sp.logspace(5,-6,200) poly_feat = PolynomialFeatures(degree=10, include_bias=False) scaler = StandardScaler() for alpha in alphas: model = Ridge(alpha=alpha) ...
RidgeRegression.ipynb
dominikgrimm/ridge_and_svm
mit
Create a gensim LSI document similarity model
from seldon.text import DocumentSimilarity,DefaultJsonCorpus import logging logger = logging.getLogger() logger.setLevel(logging.INFO) corpus = DefaultJsonCorpus(docs) ds = DocumentSimilarity(model_type='gensim_lsi') ds.fit(corpus) print "done"
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Run accuracy tests Run a test over the document to compute average jaccard similarity to the 1-nearest neighbour for each document using the "tags" field of the meta data as the ground truth.
ds.score()
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Run a test again but use the Annoy approximate nearest neighbour index that would have been built. Should be much faster.
ds.score(approx=True)
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Run single nearest neighbour query Run a nearest neighbour query on a single document and print the title and tag meta data
query_doc=6023 print "Query doc: ",ds.get_meta(query_doc)['title'],"Tagged:",ds.get_meta(query_doc)['tags'] neighbours = ds.nn(query_doc,k=5,translate_id=True,approx=True) print neighbours for (doc_id,_) in neighbours: j = ds.get_meta(doc_id) print "Doc id",doc_id,j['title'],"Tagged:",j['tags']
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Save recommender Save the recommender to the filesystem in reuters_recommender folder
import seldon rw = seldon.Recommender_wrapper() rw.save_recommender(ds,"reuters_recommender") print "done"
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Start a microservice to serve the recommender
from seldon.microservice import Microservices m = Microservices() app = m.create_recommendation_microservice("reuters_recommender") app.run(host="0.0.0.0",port=5000,debug=False)
python/examples/doc_similarity_reuters.ipynb
SeldonIO/seldon-server
apache-2.0
Maquetación Python 3 (Markdown)
# Los HEADERS o encabezados se definen mediante #, existen 6 niveles siendo un sólo # el de mayor tamaño y ###### el de menor # Ejemplo:
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Encabezado de tres "###" Encabezado de cinco "#####"
# Emphasis o estilos del texto # *cursiva* # **negrita** # ~~tachado~~ # Ejemplos:
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
texto en cursiva texto en negrita asteriscos y cursiva en una sola línea ~~texto tachado~~
# Como crear listas ordenadas o elementos no númerados # Ejemplos:
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Mediante número seguido de punto se define un elemento ordenado de una lista Segundo elemento del listado (2.) Tercer elemento (3.) Elemento no numerado (mediante "*", también son válidos "-" y "+") Elemento de segundo orden con numeración Elemento 1 Elemento 2 Elemento 3 Sub-elemento 1 Sub-elemento 2
# En iPython se puede escribir código HTML para maquetar o presentar datos # por ejemplo para definir un encabezado se puede escribir <h4>h1 Heading</h4> o, como ya hemos visto, #### Heading 1 # Ejemplo:
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Encabezado mediante MARKDOWN <h4>Encabezado mediante HTML</h4> '<!-- Se puede comentar texto como si de JAVA se tratase -->' <pre> <code> // Comentarios línea 1 de código línea 2 de código línea 3 de código </code> </pre>
# Creación de párrafos mediante MARKDOWN # > especifica el primer nivel de párrafo, sucesivos > profundizan en la sangría de los mismos
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Creación de párrafos Párrafo 1 Párrafo 2 Párrafo 3
# Mediante el uso de ''' ''' se puede mantener una estructura de un comentario, # por ejemplo cuando se escribe código para que se vea legible.
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Ejemplo de comentario de un código js grunt.initConfig({ assemble: { options: { assets: 'docs/assets', data: 'src/data/*.{json,yml}', helpers: 'src/custom-helpers.js', partials: ['src/partials/**/*.{hbs,md}'] }, pages: { options: { layout: 'default.hbs' }, ...
# Creación de tablas en MARKDOWN # | Option | Description | # | ------ | ----------- | # Si se usa : en la linea anterior se alinea el texto a izquiera o derecha, con : a ambos lados se alinea centrado # | ------: | :----------- | # | data: | path to data files to supply the data that will be passed into templates. |...
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
| Opción | Descripción | | :----: | :---------- | | datos 1 | texto 1 | | datos 2 | texto 2 | | datos 3 | texto 3 |
# Enlaces incrustados mediante MARKDOWN # [Texto](http://web "comentario mouseover")
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Enlace básico Enlace con información al realizar un mouseover
# Incrustado de imágenes # ![Texto](http://imagen "comentario mouseover")
python/howto/001_Markdown y maquetación.ipynb
xMyrst/BigData
gpl-3.0
Exercise 1 a)
std = 0.1 means = np.array([[-0.5, -0.2], [0, 0.6], [0.5, 0]]) num_samples_per_mean = 30 num_samples = len(means) * num_samples_per_mean x = np.vstack([np.random.normal(mean, std, size=[num_samples_per_mean, 2]) for mean in means]) plt.scatter(x[:, 0], x[:, 1], label='data') plt.scatter(means[:, 0], means[:, 1], c='...
sheet04/4.ipynb
jrieke/machine-intelligence-2
mit
b)
def rbf_kernel(x_alpha, x_beta, sigma=1): return np.exp(-np.linalg.norm(x_alpha - x_beta)**2 / (2 * sigma**2)) rbf_kernel(x[0], x[1]), rbf_kernel(x[0], x[-1]) kernel_matrix = np.zeros((num_samples, num_samples)) for (i, j), value in np.ndenumerate(kernel_matrix): kernel_matrix[i, j] = rbf_kernel(x[i], x[j], ...
sheet04/4.ipynb
jrieke/machine-intelligence-2
mit
c)
grids_pc_values = [] # one grid for each PC, containing the projected values of the test points for this PC grid_x = np.linspace(-0.8, 0.8, 10) grid_y = np.linspace(-0.6, 1, 10) for evec in evecs[:8]: grid = np.zeros((len(grid_x), len(grid_y))) for (i, j), _ in np.ndenumerate(grid): vec = np.array([...
sheet04/4.ipynb
jrieke/machine-intelligence-2
mit
Vhanilla RNN class and functions
class RNN_cell(object): """ RNN cell object which takes 3 arguments for initialization. input_size = Input Vector size hidden_layer_size = Hidden layer size target_size = Output vector size """ def __init__(self, input_size, hidden_layer_size, target_size): # Initialization of gi...
Vhanilla_RNN/RNN.ipynb
KnHuq/Dynamic-Tensorflow-Tutorial
mit
Placeholder and initializers
hidden_layer_size = 110 input_size = 8 target_size = 10 y = tf.placeholder(tf.float32, shape=[None, target_size],name='inputs')
Vhanilla_RNN/RNN.ipynb
KnHuq/Dynamic-Tensorflow-Tutorial
mit
Models
#Initializing rnn object rnn=RNN_cell( input_size, hidden_layer_size, target_size) #Getting all outputs from rnn outputs = rnn.get_outputs() #Getting final output through indexing after reversing last_output = outputs[-1] #As rnn model output the final layer through Relu activation softmax is used for final output. ...
Vhanilla_RNN/RNN.ipynb
KnHuq/Dynamic-Tensorflow-Tutorial
mit
Dataset Preparation
sess=tf.InteractiveSession() sess.run(tf.global_variables_initializer()) #Using Sklearn MNIST dataset. digits = load_digits() X=digits.images Y_=digits.target # One hot encoding Y = sess.run(tf.one_hot(indices=Y_, depth=target_size)) #Getting Train and test Dataset X_train, X_test, y_train, y_test = train_test_split...
Vhanilla_RNN/RNN.ipynb
KnHuq/Dynamic-Tensorflow-Tutorial
mit
First we need to define materials that will be used in the problem. We'll create three distinct materials for water, clad and fuel.
# 1.6% enriched fuel fuel = openmc.Material(name='1.6% Fuel') fuel.set_density('g/cm3', 10.31341) fuel.add_nuclide('U235', 3.7503e-4) fuel.add_nuclide('U238', 2.2625e-2) fuel.add_nuclide('O16', 4.6007e-2) # borated water water = openmc.Material(name='Borated Water') water.set_density('g/cm3', 0.740582) water.add_nucli...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
With our materials, we can now create a Materials object that can be exported to an actual XML file.
# Instantiate a Materials collection materials_file = openmc.Materials([fuel, water, zircaloy]) # Export to "materials.xml" materials_file.export_to_xml()
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Now let's move on to the geometry. Our problem will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces -- in this case two cylinders and six reflective planes.
# Create cylinders for the fuel and clad fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.39218) clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.45720) # Create box to surround the geometry box = openmc.model.rectangular_prism(1.26, 1.26, boundary_type='reflective')
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
With the surfaces defined, we can now create cells that are defined by intersections of half-spaces created by the surfaces.
# Create a Universe to encapsulate a fuel pin pin_cell_universe = openmc.Universe(name='1.6% Fuel Pin') # Create fuel Cell fuel_cell = openmc.Cell(name='1.6% Fuel') fuel_cell.fill = fuel fuel_cell.region = -fuel_outer_radius pin_cell_universe.add_cell(fuel_cell) # Create a clad Cell clad_cell = openmc.Cell(name='1.6%...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
We now must create a geometry with the pin cell universe and export it to XML.
# Create Geometry and set root Universe openmc_geometry = openmc.Geometry(pin_cell_universe) # Export to "geometry.xml" openmc_geometry.export_to_xml()
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Next, we must define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 10,000 particles.
# OpenMC simulation parameters batches = 50 inactive = 10 particles = 10000 # Instantiate a Settings object settings_file = openmc.Settings() settings_file.batches = batches settings_file.inactive = inactive settings_file.particles = particles settings_file.output = {'tallies': True} # Create an initial uniform spati...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Now we are finally ready to make use of the openmc.mgxs module to generate multi-group cross sections! First, let's define "coarse" 2-group and "fine" 8-group structures using the built-in EnergyGroups class.
# Instantiate a "coarse" 2-group EnergyGroups object coarse_groups = mgxs.EnergyGroups([0., 0.625, 20.0e6]) # Instantiate a "fine" 8-group EnergyGroups object fine_groups = mgxs.EnergyGroups([0., 0.058, 0.14, 0.28, 0.625, 4.0, 5.53e3, 821.0e3, 20.0e6])
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Now we will instantiate a variety of MGXS objects needed to run an OpenMOC simulation to verify the accuracy of our cross sections. In particular, we define transport, fission, nu-fission, nu-scatter and chi cross sections for each of the three cells in the fuel pin with the 8-group structure as our energy groups.
# Extract all Cells filled by Materials openmc_cells = openmc_geometry.get_all_material_cells().values() # Create dictionary to store multi-group cross sections for all cells xs_library = {} # Instantiate 8-group cross sections for each cell for cell in openmc_cells: xs_library[cell.id] = {} xs_library[cell.i...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Next, we showcase the use of OpenMC's tally precision trigger feature in conjunction with the openmc.mgxs module. In particular, we will assign a tally trigger of 1E-2 on the standard deviation for each of the tallies used to compute multi-group cross sections.
# Create a tally trigger for +/- 0.01 on each tally used to compute the multi-group cross sections tally_trigger = openmc.Trigger('std_dev', 1e-2) # Add the tally trigger to each of the multi-group cross section tallies for cell in openmc_cells: for mgxs_type in xs_library[cell.id]: xs_library[cell.id][mgx...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Now, we must loop over all cells to set the cross section domains to the various cells - fuel, clad and moderator - included in the geometry. In addition, we will set each cross section to tally cross sections on a per-nuclide basis through the use of the MGXS class' boolean by_nuclide instance attribute.
# Instantiate an empty Tallies object tallies_file = openmc.Tallies() # Iterate over all cells and cross section types for cell in openmc_cells: for rxn_type in xs_library[cell.id]: # Set the cross sections domain to the cell xs_library[cell.id][rxn_type].domain = cell # Tally cro...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Now we a have a complete set of inputs, so we can go ahead and run our simulation.
# Run OpenMC openmc.run()
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Tally Data Processing Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object.
# Load the last statepoint file sp = openmc.StatePoint('statepoint.082.h5')
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
The statepoint is now ready to be analyzed by our multi-group cross sections. We simply have to load the tallies from the StatePoint into each object as follows and our MGXS objects will compute the cross sections for us under-the-hood.
# Iterate over all cells and cross section types for cell in openmc_cells: for rxn_type in xs_library[cell.id]: xs_library[cell.id][rxn_type].load_from_statepoint(sp)
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
That's it! Our multi-group cross sections are now ready for the big spotlight. This time we have cross sections in three distinct spatial zones - fuel, clad and moderator - on a per-nuclide basis. Extracting and Storing MGXS Data Let's first inspect one of our cross sections by printing it to the screen as a microscopi...
nufission = xs_library[fuel_cell.id]['nu-fission'] nufission.print_xs(xs_type='micro', nuclides=['U235', 'U238'])
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Our multi-group cross sections are capable of summing across all nuclides to provide us with macroscopic cross sections as well.
nufission = xs_library[fuel_cell.id]['nu-fission'] nufission.print_xs(xs_type='macro', nuclides='sum')
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Although a printed report is nice, it is not scalable or flexible. Let's extract the microscopic cross section data for the moderator as a Pandas DataFrame .
nuscatter = xs_library[moderator_cell.id]['nu-scatter'] df = nuscatter.get_pandas_dataframe(xs_type='micro') df.head(10)
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Next, we illustate how one can easily take multi-group cross sections and condense them down to a coarser energy group structure. The MGXS class includes a get_condensed_xs(...) method which takes an EnergyGroups parameter with a coarse(r) group structure and returns a new MGXS condensed to the coarse groups. We illust...
# Extract the 8-group transport cross section for the fuel fine_xs = xs_library[fuel_cell.id]['transport'] # Condense to the 2-group structure condensed_xs = fine_xs.get_condensed_xs(coarse_groups)
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Group condensation is as simple as that! We now have a new coarse 2-group TransportXS in addition to our original 8-group TransportXS. Let's inspect the 2-group TransportXS by printing it to the screen and extracting a Pandas DataFrame as we have already learned how to do.
condensed_xs.print_xs() df = condensed_xs.get_pandas_dataframe(xs_type='micro') df
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Verification with OpenMOC Now, let's verify our cross sections using OpenMOC. First, we construct an equivalent OpenMOC geometry.
# Create an OpenMOC Geometry from the OpenMC Geometry openmoc_geometry = get_openmoc_geometry(sp.summary.geometry)
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Next, we we can inject the multi-group cross sections into the equivalent fuel pin cell OpenMOC geometry.
# Get all OpenMOC cells in the gometry openmoc_cells = openmoc_geometry.getRootUniverse().getAllCells() # Inject multi-group cross sections into OpenMOC Materials for cell_id, cell in openmoc_cells.items(): # Ignore the root cell if cell.getName() == 'root cell': continue # Get a referenc...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
We are now ready to run OpenMOC to verify our cross-sections from OpenMC.
# Generate tracks for OpenMOC track_generator = openmoc.TrackGenerator(openmoc_geometry, num_azim=128, azim_spacing=0.1) track_generator.generateTracks() # Run OpenMOC solver = openmoc.CPUSolver(track_generator) solver.computeEigenvalue()
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
We report the eigenvalues computed by OpenMC and OpenMOC here together to summarize our results.
# Print report of keff and bias with OpenMC openmoc_keff = solver.getKeff() openmc_keff = sp.k_combined.n bias = (openmoc_keff - openmc_keff) * 1e5 print('openmc keff = {0:1.6f}'.format(openmc_keff)) print('openmoc keff = {0:1.6f}'.format(openmoc_keff)) print('bias [pcm]: {0:1.1f}'.format(bias))
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
As a sanity check, let's run a simulation with the coarse 2-group cross sections to ensure that they also produce a reasonable result.
openmoc_geometry = get_openmoc_geometry(sp.summary.geometry) openmoc_cells = openmoc_geometry.getRootUniverse().getAllCells() # Inject multi-group cross sections into OpenMOC Materials for cell_id, cell in openmoc_cells.items(): # Ignore the root cell if cell.getName() == 'root cell': continue ...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
There is a non-trivial bias in both the 2-group and 8-group cases. In the case of a pin cell, one can show that these biases do not converge to <100 pcm with more particle histories. For heterogeneous geometries, additional measures must be taken to address the following three sources of bias: Appropriate transport-co...
# Create a figure of the U-235 continuous-energy fission cross section fig = openmc.plot_xs('U235', ['fission']) # Get the axis to use for plotting the MGXS ax = fig.gca() # Extract energy group bounds and MGXS values to plot fission = xs_library[fuel_cell.id]['fission'] energy_groups = fission.energy_groups x = ene...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Another useful type of illustration is scattering matrix sparsity structures. First, we extract Pandas DataFrames for the H-1 and O-16 scattering matrices.
# Construct a Pandas DataFrame for the microscopic nu-scattering matrix nuscatter = xs_library[moderator_cell.id]['nu-scatter'] df = nuscatter.get_pandas_dataframe(xs_type='micro') # Slice DataFrame in two for each nuclide's mean values h1 = df[df['nuclide'] == 'H1']['mean'] o16 = df[df['nuclide'] == 'O16']['mean'] #...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Matplotlib's imshow routine can be used to plot the matrices to illustrate their sparsity structures.
# Create plot of the H-1 scattering matrix fig = plt.subplot(121) fig.imshow(h1, interpolation='nearest', cmap='jet') plt.title('H-1 Scattering Matrix') plt.xlabel('Group Out') plt.ylabel('Group In') # Create plot of the O-16 scattering matrix fig2 = plt.subplot(122) fig2.imshow(o16, interpolation='nearest', cmap='jet...
examples/jupyter/mgxs-part-ii.ipynb
smharper/openmc
mit
Compute the model The simplest way to perform the Noddy simulation through Python is simply to call the executable. One way that should be fairly platform independent is to use Python's own subprocess module:
# Change to sandbox directory to store results os.chdir(os.path.join(repo_path, 'sandbox')) # Path to exmaple directory in this repository example_directory = os.path.join(repo_path,'examples') # Compute noddy model for history file history_file = 'strike_slip.his' history = os.path.join(example_directory, history_fil...
docs/notebooks/9-Topology.ipynb
flohorovicic/pynoddy
gpl-2.0
For convenience, the model computations are wrapped into a Python function in pynoddy:
pynoddy.compute_model(history, output_name) pynoddy.compute_topology(root_name, files)
docs/notebooks/9-Topology.ipynb
flohorovicic/pynoddy
gpl-2.0