markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
QUESTION 3
import numpy as np A = np.array([2,7,4]) B = np.array([3,9,8]) cross = np.cross(A,B) print(cross)
[20 -4 -3]
Apache-2.0
PRELIM_EXAM.ipynb
Singko25/Linear-Algebra-58020
from google.colab import drive drive.mount('/content/drive') import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchvision.datasets as dset import torchvision.transforms as T from torch.utils.data import TensorDataset from torch.utils.data import DataLoader from torch...
_____no_output_____
MIT
Mean & SD.ipynb
sharlenechen0113/Real-Estate-Price-Prediction
Install Python. jupyter --no-browserIn the favorite browser, typehttp://localhost:8888 (or the port that is assigned)Basic usage of jupyter notebooks.- create a newdocument by clicking the New Notebook- start typing code in the shaded textbox- execute the code
x = 0.1 N = 3 a = 1 b = 0 c = -1 print('f(' + str(x) + ') = ' + str(a*x**2 + b*x + c)) a = 1 b = 1 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 2 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 3 print(a*b,a*(b+1),a*(b+2),a*(b+3)) a = 4 print(a*b,a*(b+1),a*(b+2),a*(b+3))
(1, 2, 3, 4) (2, 4, 6, 8) (3, 6, 9, 12) (4, 8, 12, 16)
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Fibionacci Series
N = 0 a_1 = 1 a_2 = 0 x = 1 if N>0: print('x_' + str(0) + ' = ' + str(x) ) for i in range(1,N): x = a_1 + a_2 print('x_' + str(i) + ' = ' + str(x) ) a_2 = a_1 a_1 = x l = -1 r = 1 delta = 0.1 steps = (r-l)/delta+1 print '-'*20 print('| '), print('x'), print('| '), print('3*x**2 + 2*x +...
1 112.0 2 125.44 3 140.4928 4 157.351936 5 176.23416832 6 197.382268518 7 221.068140741 8 247.596317629 9 277.307875745
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Arrays, lists
a = [1,2,5,7,5, 3.2, 7] names = ['Ali','Veli','Fatma','Asli'] #for s in names: # print(s) print(names[3]) print(len(names)) for i in range(len(names)-1,-1,-1): print(names[i]) for i in range(len(names)): print(names[len(names)-i]) for n in reversed(names): print(n)
Asli Fatma Veli Ali
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Average and standard deviation
#x = [0.1,3,-2.1,5,12,3,17] x = [1,-1,0] s1 = 0.0 for a in x: s1 += a mean = s1/len(x) s2 = 0.0 for a in x: s2 += (a-mean)**2 variance = s2/len(x) print('mean = '), print(mean) print('variance = '), print(variance)
mean = 0.0 variance = 0.666666666667
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Find the minimum in an array
a = [2,5,1.2, 0,-4, 3] mn = a[0] for i in range(1,len(a)): if a[i]<mn: mn = a[i] print(mn) a.sort() a.append(-7) v = a.pop() a.reverse() v = a.pop(0) a.sort a = 5 a.bit_length
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Homework: Value counts given an array of integers
a = [5, 3, 1, 1, 6, 3, 2] ua = [] for j in a: found = False for i in ua: if j==i: found = True; break; if not found: ua.append(j) print(ua) for i in ua: s = 0 for j in a: if i==j: s = s+1 print(i, s...
[True, True, False, False, True, True, False, True, False] [False, False, True, True, False, False, True, False, False]
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Generate random walk in an array
import random N = 10 mu = 0 sig = 1 x = 0 a = [x] for i in range(N): w = random.gauss(mu, sig) x = x + w a.append(x) print(a) len(a)
[0, 0.07173293905450501, -0.3652340160453349, -0.07610430577230803, -1.4172015782500376, -0.31469586619290335, -1.4458834127459201, -0.7189045208807692, 0.9895551731951309, 0.1012103597338051, -1.0353093339238497]
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
List Comprehension
N = 100 mu = 0 sig = 1 a = [random.gauss(mu, sig) for i in range(N)] for i in range(len(a)-1): a[i+1] = a[i] + a[i+1] %matplotlib inline import matplotlib.pylab as plt plt.plot(a) plt.show()
/Users/cemgil/anaconda/envs/py27/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment. warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Moving Average
# Window Lenght W = 20 y = [] for i in range(len(a)): s = 0 n = 0 for j in range(W): if i-j < 0: break; s = s + a[i-j] n = n + 1 y.append(s/n) plt.plot(a) plt.plot(y) plt.show()
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Moving average, second version
# Window Lenght W = 20 y = [] s = 0 n = 0 for i in range(len(a)): s = s + a[i] if i>=W: s = s - a[i-W] else: n = n + 1 y.append(s/n) plt.plot(a) plt.plot(y) plt.show() def mean(a): s = 0 for x in a: s = s+x return s/float(len(a)) def var(a): m...
2.5 1.25
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Mean and Variance, online calculation
def mean(a): mu = 0.0 for i in range(len(a)): mu = i/(i+1.0)*mu + 1.0/(i+1.0)*a[i] return mu a = [3,4,1,2] #print(a) print(mean(a))
2.5
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Implement the recursive formula for the variance
for i in range(1,len(a)+1): print(i) a = [i**2 for i in range(10)] a st = 'if' if st == 'if': print('if') elif st == 'elif': print('elif') else: print('not elif') if x<10 and x>3: for i in range(10): if i%2: continue print i x del x from math import exp import math as m ...
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Catalog
def fun(x, par): print(x, par['volatility']) params = {'volatility': 0.1, 'interest_rate': 0.08} sig = params['volatility'] r = params['interest_rate'] fun(3, params) plate = {'Istanbul':34} city = 'Istanbul' print 'the number plate for', city,'is', plate[city] plate = {'Istanbul':34, 'Adana': '01', 'Ankara': ...
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Tuples (Immutable Arrays, no change possible after creation)
a = ('Ankara', '06') a.count('Istanbul') %matplotlib inline import numpy as np import matplotlib.pylab as plt x = np.arange(-2,2,0.1) plt.plot(x,x) plt.plot(x,x**2) plt.plot(x,np.sin(x)) plt.show()
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Numpy arrays versus matrices
A = np.random.rand(3,5) x = np.random.rand(5,1) print(A.dot(x)) A = np.mat(A) x = np.mat(x) print(A*x) a = np.mat(np.random.rand(3,1)) b = np.mat(np.random.rand(3,1)) print(a) print(b) a.T*b N = 1000 D = 3 X = np.random.rand(N, D) mu = X.mean(axis=0, keepdims=True) #print(mu) print((X - mu).T.dot(X-mu)/(N-1.)) n...
[[ 1 2 3 4 5 6 7 8 9 10] [ 2 4 6 8 10 12 14 16 18 20] [ 3 6 9 12 15 18 21 24 27 30] [ 4 8 12 16 20 24 28 32 36 40] [ 5 10 15 20 25 30 35 40 45 50] [ 6 12 18 24 30 36 42 48 54 60] [ 7 14 21 28 35 42 49 56 63 70] [ 8 16 24 32 ...
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
B&S with Monte Carlo Call and Put pricing, use a catalog and numpy, avoid using for loops
import numpy as np def European(Param, S0=1., T=1., Strike=1.,N=10000 ): ''' Price_Call, Price_Put = European(Param, S0, T, Strike,N) Param: Market parameters, a catalog with fields Param['InterestRate'] : Yearly risk free interest rate Param['Volatility'] : S0 : ...
European Call= 15.5726197769 Put = 5.13556380233 Asian Call= 8.16477817074 Put = 3.17271035914 Lookback Call= 25.6819276647 Put = 12.5838549789 FloatingLookback Call= 23.0385882044 Put = 15.3296952253
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Next week Assignment:Consolidate all pricing methods into one single function avoiding code repetitions.
def OptionPricer(type_of_option, Params): ''' Price_Call, Price_Put = OptionPricer(type_of_option, Param, S0, T, Strike, Steps, N) type_of_option = 'European' 'Asian', 'Lookback', 'FloatingLookback' Param: Parameter catalog with fields Param['InterestRate'] :...
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Next week: Kalman Filtering (Learn Numpy and matplotlib)
th = 0.5 A = np.mat([[np.cos(th), np.sin(th)],[-np.sin(th), np.cos(th)]]) x = np.mat([1,0]).T x = np.mat([[1],[0]]) for t in range(10): x = A*x + 0*np.random.randn(2,1) print(x) name = raw_input("What is your name? ") print name def fun(x): print x, x = map(fun,range(1,10+1)) x = map(fun,range(1,10+...
_____no_output_____
MIT
fe588/fe588_introduction.ipynb
bkoyuncu/notes
Importing classes from the `tcalc` module
from TCalc.tcalc import eyepiece, telescope, barlow_lens, focal_reducer
_____no_output_____
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
To quickly access the docstring, run `help(classname)`
help(eyepiece)
Help on class eyepiece in module TCalc.tcalc: class eyepiece(builtins.object) | eyepiece(f_e, fov_e=50) | | Class representing a single eyepiece | Args: | f_e: focal length of the eyepiece (mm) | fov_e: field of view of the eyepiece (deg). Defaults to 50 degrees. | | Methods defined here: |...
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
For an example, let's try to have estimate the specifications of Celestron's 8 SE telescope.
c8 = telescope(D_o=203.2, f_o=2032, user_D_eye=None, user_age=22) # adding configuration of 8in scope omni_40 = eyepiece(40, 52) # defining 40 mm eyepiece omni_25 = eyepiece(25, 52) # defining 25 mm eyepiece # adding eyepiece to the telescope c8.add_eyepiece(omni_40, id='omni_40', select=True) c8.add_eyepiece(omni_25, ...
The telescope has the following layout: Aperture diameter: 203.2 mm Focal length: 2032 mm, corresponding to a focal ratio of 10.0 'barlow 1', a Barlow lens, has been added to the optical path. This increases the focal length by 2 This results in Focal length: 4064 mm, corresponding to a foca...
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
You can notice that if used a *2x barlow lens* on a *40mm eyepiece*, the brightness of the object will be decresead by **4 times!**This way you can simulate different scenarios and find out which accesories are optimal for your purpose. This will save you both time and money on costly accesories! For advanced users, t...
c8.show_resolving_power() c8.show_magnification_limits() c8.show_eyepiece_limits()
_____no_output_____
MIT
docs/tutorials/TCalc_tutorial.ipynb
Bhavesh012/Telescope-Calculator
Week 6 - SMM695Matteo DevigiliJune, 28th 2021[_PySpark_](https://spark.apache.org/docs/latest/api/python/index.html): during this lecture, we will approach Spark through Python **Agenda**:1. Introduction to Spark1. Installing PySpark1. PySpark Basics1. PySpark and Pandas1. PySpark and SQL1. Load data from your DBMS I...
#to create a spark session object from pyspark.sql import SparkSession # functions import pyspark.sql.functions as F # data types from pyspark.sql.types import * # import datetime from datetime import date as dt
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
* More info on **Functions** at these [link-1](https://spark.apache.org/docs/latest/api/python/pyspark.sql.htmlmodule-pyspark.sql.functions) & [link-2](https://spark.apache.org/docs/2.3.0/api/sql/index.htmlyear)* More info on **Data Types** at this [link](https://spark.apache.org/docs/latest/sql-ref-datatypes.html) Op...
# to open a Session spark = SparkSession.builder.appName('last_dance').getOrCreate()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
**Spark UI**The spark UI is useful to monitor your application. You have the following tabs:* *Jobs*: info concerning Spark jobs* *Stages*: info on individual stages and their tasks* *Storage*: info on data that is currently in our spark application* *Environment*: info on configurations and current settings of our app...
spark
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Create DataframeIn order to create a dataframe from scratch, we need to:1. Create a schema, passing: * Column names * Data types1. Pass values as an array of tuples
# Here, I define a schema # .add(field, data_type=None, nullable=True, metadata=None) schema = StructType().add("id", "integer", True).add("first_name", "string", True).add( "last_name", "string", True).add("dob", "date", True) ''' schema = StructType().add("id", IntegerType(), True).add("first_name", StringType(...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
**Transformations*** Immutability: once created, data structures can not be changed* Lazy evaluation: computational instructions will be executed at the very last **Actions*** view data* collect data* write to output data sources PySpark and Pandas Load a csv Loading a csv file from you computer, you need to type:* P...
# import pandas import pandas as pd # import SparkFiles from pyspark import SparkFiles # target dataset url = 'https://raw.githubusercontent.com/fivethirtyeight/data/master/bechdel/movies.csv' # loading data with pandas db = pd.read_csv(url) # loading data with pyspark spark.sparkContext.addFile(url) df = spark.read...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Inspecting dataframes
# pandas info db.info() # pyspark schema df.printSchema() # pandas fetch 5 db.head(5) # pyspark fetch 5 df.show(5) df.take(5) # pandas filtering: db[db.year == 1970] # pyspark filtering: df[df.year == 1970].show() # get columns and data types print(""" Pandas db.columns: =================== {} PySpark df.columns: ===...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Columns
# pandas add a column db['newcol'] = db.domgross/db.intgross # pyspark add a column df=df.withColumn('newcol', df.domgross/df.intgross) # pandas rename columns db.rename(columns={'newcol': 'dgs/igs'}, inplace=True) # pyspark rename columns df=df.withColumnRenamed('newcol', 'dgs/igs')
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Drop
# pandas drop `code' column db.drop('code', axis=1, inplace=True) # pyspark drop `code' column df=df.drop('code') # pandas dropna() db.dropna(subset=['domgross'], inplace=True) # pyspark dropna() df=df.dropna(subset='domgross')
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Stats
# pandas describe db.describe() # pyspark describe df.describe(['year', 'budget']).show()
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Pyspark and SQL
# pyspark rename 'budget_2013$' df=df.withColumnRenamed('budget_2013$', 'budget_2013') # Create a temporary table df.createOrReplaceTempView('bechdel') # Run a simple SQL command sql = spark.sql("""SELECT imdb, year, title, budget FROM bechdel LIMIT(5)""") sql.show() # AVG budget differences sql_avg = spark.sql( ...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
Load data from DBMS To run the following you need to restart the notebook.
# to create a spark session object from pyspark.sql import SparkSession
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
PostgreSQL To interact with postgre you need to: * Download the *postgresql-42.2.22.jar file* [here](https://jdbc.postgresql.org/download.html)* Include the path to the downloaded jar file into SparkSession()
# Open a session running data from PostgreSQL spark_postgre = SparkSession \ .builder \ .appName("last_dance_postgre") \ .config("spark.jars", "/Users/matteodevigili/py3venv/dms695/share/py4j/postgresql-42.2.22.jar") \ .getOrCreate() spark_postgre # Read data from PostgreSQL running at localhost df = sp...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
MongoDB For further reference check the [Python Guide provided by Mongo](https://docs.mongodb.com/spark-connector/current/python-api/) or the [website for the mongo-spark connector](https://spark-packages.org/package/mongodb/mongo-spark).
# add path to Mongo spark_mongo = SparkSession \ .builder \ .appName("last_dance_mongo") \ .config("spark.mongodb.input.uri", "mongodb://127.0.0.1/amazon.music") \ .config("spark.mongodb.output.uri", "mongodb://127.0.0.1/amazon.music") \ .config('spark.jars.packages', 'org.mongodb.spark:mongo-spark-...
_____no_output_____
MIT
week-6/sc_6.ipynb
mattDevigili/dms-smm695
______Universidad Tecnológica Nacional, Buenos Aires__\__Ingeniería Industrial__\__Cátedra de Investigación Operativa__\__Autor: Rodrigo Maranzana______ Ejercicio 3 Un agente comercial realiza su trabajo en tres ciudades A, B y C. Para evitar desplazamientos innecesarios está todo el día en la misma ciudad y allí pern...
import numpy as np
_____no_output_____
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ingresamos los datos de la matriz de transición en una matriz numpy:
# Matriz de transición como numpy array: T = np.array([[0.1, 0.3, 0.6], [0.2, 0.2, 0.6], [0.2, 0.4, 0.4]]) # Printeamos T print(f'Matriz de transición: \n{T}')
Matriz de transición: [[0.1 0.3 0.6] [0.2 0.2 0.6] [0.2 0.4 0.4]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ejercicio A En primer lugar, calculamos la matriz de transición habiendo pasado 4 días: elevamos la matriz a la cuarta usando el método de la potencia de álgebra lineal de la librería Numpy.
# Cálculo de la matriz de transición a tiempo 4: T4 = np.linalg.matrix_power(T, 4) # printeamos la matriz de transicion de 4 pasos: print(f'Matriz de transición a tiempo 4: \n{T4}\n')
Matriz de transición a tiempo 4: [[0.1819 0.3189 0.4992] [0.1818 0.319 0.4992] [0.1818 0.3174 0.5008]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Sabiendo que $p_0$ considera que el agente está en el nodo C:$ p_0 = (0, 0, 1) $
# Generación del vector inicial p_0: p_0 = np.array([0, 0, 1]) # printeamos el vector inicial: print(f'Vector de estado a tiempo 0: \n{p_0}\n')
Vector de estado a tiempo 0: [0 0 1]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Calculamos: $ p_0 T^4 = p_4 $
# Cálculo del estado a tiempo 4, p_4: p_4 = np.dot(p_0, T4) # printeamos p4: print(f'Vector de estado a tiempo 4: \n{p_4}\n')
Vector de estado a tiempo 4: [0.1818 0.3174 0.5008]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Dado el vector $ p_4 $, nos quedamos con el componente perteneciente al estado C.
# Componente del nodo C: p_4_c = p_4[2] # printeamos lo obtenido: print(f'Probabilidad de estar en c habiendo iniciado en c: \n{p_4_c}\n')
Probabilidad de estar en c habiendo iniciado en c: 0.5008
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Forma alternativa de resolución:El resultado es el mismo si consideramos que la componente ${T^4}_{cc}$ es la probabilidad de transición del nodo c al mismo nodo habiendo pasado 4 ciclos.Veamos cómo se obtiene esa componente:
# Componente de cc de la matriz de transición a tiempo 4: T4cc = T4[2,2] print('\n ** Probabilidad de estar en c habiendo iniciado en c: \n %.5f' % T4cc)
** Probabilidad de estar en c habiendo iniciado en c: 0.50080
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Ejercicio B Dada una matriz $A$ proveniente del sistema de ecuaciones que resuelve $\pi T = \pi$
# Matriz A: A = np.array([[-0.9, 0.2, 0.2], [ 0.3, -0.8, 0.4], [ 0.6, 0.6, -0.6], [1, 1, 1]]) # Printeamos A: print(f'Matriz asociada al sistema lineal de ecuaciones: \n{A}')
Matriz asociada al sistema lineal de ecuaciones: [[-0.9 0.2 0.2] [ 0.3 -0.8 0.4] [ 0.6 0.6 -0.6] [ 1. 1. 1. ]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Y dado un vector $B$ relacionado con los términos independientes del sistema de ecuaciones anteriormente mencionado.
# Vector B: B = np.array([0, 0, 0, 1]) # Printeamos B: print(f'Vector de términos independientes: \n{B}')
Vector de términos independientes: [0 0 0 1]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Dado que el solver de numpy solamente admite sistemas lineales cuadrados por el algoritmo que usa para la resolución [1], debemos eliminar una de las filas (cualquiera) de la matriz homogénea y quedarnos con la fila relacionada a la ecuación $ \sum_i{\pi_i} = 1$.Hacemos lo mismo para el vector de términos independiente...
# Copio la matriz A original, para que no se modifique. A_s = A.copy() # Eliminamos la primer fila de la matriz A: A_s = np.delete(A_s, 0, 0) # Printeamos: print(f'Matriz asociada al sistema lineal de ecuaciones: \n{A_s}') print(f'\n -> Dimensión: {A_s.shape}') # Copio el vector B original, para que no se modifique....
Vector de términos independientes: [0 0 1] -> Dimensión: (3,)
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Cumpliendo con un sistema cuadrado, usamos el método solve de numpy para obtener $x$ del sistema $Ax = B$
x = np.linalg.solve(A_s, B_s) print('\n ** Vector solución de estado estable: \n %s' % x)
** Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Forma alternativa: usando una matriz no cuadradaComo explicamos anteriormente no podemos usar el método $solve$ en matrices no cuadradas. En su lugar podemos usar el método de los mínimos cuadrados para aproximar la solución[2]. Este método no tiene restricciones en cuanto a la dimensión de la matriz.El desarrollo del...
x_lstsq, _, _, _ = np.linalg.lstsq(A, B, rcond=None) print('\n ** Vector solución de estado estable: \n %s' % x_lstsq)
** Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Cálculo auxiliar: partiendo directamente de la matriz de transiciónEn la resolución original, usamos una matriz A relacionada al sistema lineal de ecuaciones que resolvimos a mano. Ahora veremos otra forma de llegar a la solución solamente con los datos dados y tratamiento de matrices.Partiendo del sistema original: $...
# Primero calculamos la traspuesta de la matriz de transición: Tt = np.transpose(T) print(f'\nT traspuesta: \n{Tt}') # Luego con calculamos la matriz A, sabiendo que es la traspuesta de T menos la identidad. A1 = Tt - np.identity(Tt.shape[0]) print(f'\nMatriz A: \n{A1}')
T traspuesta: [[0.1 0.2 0.2] [0.3 0.2 0.4] [0.6 0.6 0.4]] Matriz A: [[-0.9 0.2 0.2] [ 0.3 -0.8 0.4] [ 0.6 0.6 -0.6]]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Seguimos con: $B = 0$
# El vector B, es un vector de ceros: B1 = np.zeros(3) print(f'\nVector B: \n{B1}')
Vector B: [0. 0. 0.]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
A partir de aca, simplemente aplicamos el método que ya sabemos. Agregamos la información correspondiente a: $\sum_i{\pi_i} = 1$.
# Copio la matriz A1 original, para que no se modifique. A1_s = A1.copy() # Agregamos las probabilidades a la matriz A eq_suma_p = np.array([[1, 1, 1]]) A1_s = np.concatenate((A1_s, eq_suma_p), axis=0) # Printeamos: print(f'Matriz A: \n{A1_s}') # Copio el vector B1 original, para que no se modifique. B1_s = B1.copy...
Vector B: [0. 0. 0. 1.]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Resolvemos por mínimos cuadrados:
# Resolvemos con método de mínimos cuadrados: x_lstsq, _, _, _ = np.linalg.lstsq(A1_s, B1_s, rcond=None) # Printeamos la solucion: print(f'\nVector solución de estado estable: {x_lstsq}')
Vector solución de estado estable: [0.18181818 0.31818182 0.5 ]
Apache-2.0
04_markov/.ipynb_checkpoints/ejercicio_3-checkpoint.ipynb
juanntripaldi/pyOperativ
Wavefront set inpainting real phantom In this notebook we are implementing a Wavefront set inpainting algorithm based on a hallucination network
%matplotlib inline import os os.environ["CUDA_VISIBLE_DEVICES"]="0" # Import the needed modules from data.data_factory import generate_realphantom_WFinpaint, DataGenerator_realphantom_WFinpaint from ellipse.ellipseWF_factory import plot_WF import matplotlib.pyplot as plt import numpy.random as rnd import numpy as np ...
/store/kepler/datastore/andrade/GitHub_repos/Joint_CTWF_Recon/WF_inpaint/data/data_factory.py:7: UserWarning: This call to matplotlib.use() has no effect because the backend has already been chosen; matplotlib.use() must be called *before* pylab, matplotlib.pyplot, or matplotlib.backends is imported for the first time...
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
Data generator
batch_size = 1 size = 256 nClasses = 180 lowd = 40 y_arr, x_true_arr =generate_realphantom_WFinpaint(batch_size, size, nClasses, lowd) plt.figure(figsize=(6,6)) plt.axis('off') plot_WF(y_arr[0,:,:,0]) plt.figure(figsize=(6,6)) plt.axis('off') plot_WF(x_true_arr[0,:,:,0])
_____no_output_____
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
Load the model
# Tensorflow and seed seed_value = 0 import random random.seed(seed_value) import tensorflow as tf tf.set_random_seed(seed_value) # Importing relevant keras modules from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger from tensorflow.keras.models import load_model from shared.shared import create_increasi...
Epoch 1/10000 111/112 [============================>.] - ETA: 13s - loss: 0.9985 - my_mean_squared_error: 111.1464 - mean_squared_error: 111.1464 - mean_absolute_error: 0.9985 - l2_on_wedge: 107.8654 - my_psnr: -5.8870
MIT
WF_inpaint/WF_inpaint_realphantom_unet_train.ipynb
arsenal9971/DeeMicrolocalReconstruction
7. Vertical Vibration of Quarter Car ModelThis notebook introduces the base excitation system by examning the behavior of a quarter car model.After the completion of this assignment students will be able to:- excite a system with a sinusoidal input- understand the difference in transient and steady state solutions- cr...
import numpy as np import matplotlib.pyplot as plt %matplotlib notebook from resonance.linear_systems import SimpleQuarterCarSystem sys = SimpleQuarterCarSystem()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The simple quarter car model has a suspension stiffness and damping, along with the sprung car mass in kilograms, and a travel speed parameter in meters per second.
sys.constants sys.coordinates sys.speeds
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
A sinusoidal roadThe road is described as:$$y(t) = Ysin\omega_b t$$where $Y$ is the amplitude of the sinusoidal road undulations and $\omega_b$ is the frequency of the a function of the car's speed. If the distance between the peaks (amplitude 0.01 meters) of the sinusoidal road is 6 meters and the car is traveling at...
Y = 0.01 # m v = sys.constants['travel_speed'] bump_distance = 6 # m wb = v / bump_distance * 2 * np.pi # rad /s print(wb)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now with the amplitude and frequency set you can use the `sinusoidal_base_displacing_response()` function to simulate the system.
traj = sys.sinusoidal_base_displacing_response(Y, wb, 20.0) traj.head() traj.plot(subplots=True);
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
We've written an animation for you. You can play it with:
sys.animate_configuration(fps=20)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
**Exercise**Try different travel speeds and see what kind of behavior you can observe. Make sure to set the `travel_speed` constant and the frequency value for `sinusoidal_base_displacing_response()` to be consistent. TransmissibilityWhen designing a car the designer wants the riders to feel comfortable and to isolate...
from scipy.optimize import curve_fit def cosine_func(times, amp, freq, phase_angle): return amp * np.cos(freq * times - phase_angle) frequencies = np.linspace(1.0, 20.0, num=100) amplitudes = [] for omega in frequencies: traj = sys.sinusoidal_base_displacing_response(Y, omega, 20.0) popt, pcov = cu...
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The second thing to investigate is the *force transmissibility*. This is the ratio of the force applied by the suspension to the sprung car. Riders will feel this force when the car travels over bumps. Reducing this is also preferrable. The force applied to the car can be compared to the **Excersice**Create a measureme...
Y = 0.01 # m bump_distance = 6 # m def force_on_car(suspension_damping, suspension_stiffness, car_vertical_position, car_vertical_velocity, travel_speed, time): wb = travel_speed / bump_distance * 2 * np.pi y = Y * np.sin(wb * time) yd = Y * wb * np.cos(wb * ti...
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Force transmissibility will be visited more in your next homework. Arbitrary Periodic Forcing (Fourier Series)Fourier discovered that any periodic function with a period $T$ can be described by an infinite series of sums of sines and cosines. See the wikipedia article for more info (https://en.wikipedia.org/wiki/Four...
import sympy as sm
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The function `init_printing()` enables LaTeX based rendering in the Jupyter notebook of all SymPy objects.
sm.init_printing()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Symbols can be created by using the `symbols()` function.
x, y, z = sm.symbols('x, y, z') x, y, z
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `integrate()` function allows you to do symbolic indefinite or definite integrals. Note that the constants of integration are not included in indefinite integrals.
sm.integrate(x * y, x)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `Integral` class creates and unevaluated integral, where as the `integrate()` function automatically evaluates the integral.
expr = sm.Integral(x * y, x) expr
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
To evaluate the unevaluated form you call the `.doit()` method. Note that all unevaluated SymPy objects have this method.
expr.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
This shows how to create an unevaluated definite integral, store it in a variable, and then evaluate it.
expr = sm.Integral(x * y, (x, 0, 5)) expr expr.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Fourier Coefficients for the Sawtooth functionNow let's compute the Fourier coefficients for a saw tooth function. The function that describes the saw tooth is:$$F(t) = \begin{cases} A \left( \frac{4t}{T} - 1 \right) & 0 \leq t \leq T/2 \\ A \left( 3 - \frac{4t}{t} \right) & T/2 \leq t \leq T \end{cases}$$w...
A, T, wT, t = sm.symbols('A, T, omega_T, t', real=True, positive=True) A, T, wT, t
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The first Fourier coefficient $a_0$ describes the average value of the periodic function. and is:$$a_0 = \frac{2}{T} \int_0^T F(t) dt$$This integral will have to be done in two parts:$$a_0 = a_{01} + a_{02} = \frac{2}{T} \int_0^{T/2} F(t) dt + \frac{2}{T} \int_{T/2}^T F(t) dt$$These two integrals are evaluated below. N...
ao_1 = 2 / T * sm.Integral(A * (4 * t / T - 1), (t, 0, T / 2)) ao_1 ao_1.doit() ao_2 = 2 / T * sm.Integral(A * (3 - 4 * t / T), (t, T / 2, T)) ao_2 ao_2.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
But SymPy can also handle piecewise directly. The following shows how to define a piecewise function.
F_1 = A * (4 * t / T - 1) F_2 = A * (3 - 4 * t / T) F = sm.Piecewise((F_1, t<=T/2), (F_2, T/2<t)) F F_of_t_only = F.xreplace({A: 0.01, T: 2 * sm.pi / wb}) F_of_t_only sm.plot(F_of_t_only, (t, 0, 2 * np.pi / wb))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The integral can be taken of the entire piecewise function in one call.
sm.integrate(F, (t, 0, T))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now the Fourier coefficients $a_n$ and $b_n$ can be computed.$$a_n = \frac{2}{T}\int_0^T F(t) \cos n\omega_Tt dt \\b_n = \frac{2}{T}\int_0^T F(t) \sin n\omega_Tt dt$$
n = sm.symbols('n', real=True, positive=True)
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
For $a_n$:
an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an an.doit()
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
This can be simplified:
an = an.doit().simplify() an
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now substitute the $2\pi/T$ for $\omega_T$.
an = an.subs({wT: 2 * sm.pi / T}) an
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Let's see how this function varies with increasing $n$. We will use a loop but the SymPy expressions will not automatically display because they are inside a loop. So we need to use SymPy's `latex()` function and the IPython display tools. SymPy's `latex()` function transforms the SymPy expression into a string of mat...
sm.latex(an, mode='inline')
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The `display()` and `LaTeX()` functions then turn the LaTeX string in to a displayed version.
from IPython.display import display, Latex
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Now we can see how $a_n$ varies with $n=1,2,\ldots$.
for n_i in range(1, 6): ans = an.subs({n: n_i}) display(Latex('$a_{} = $'.format(n_i) + sm.latex(ans, mode='inline')))
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
For even $n$ values the coefficient is zero and for even values it varies with the inverse of $n^2$. More precisely:$$a_n =\begin{cases}0 & \textrm{if }n\textrm{ is even} \\-\frac{8A}{n^2\pi^2} & \textrm{if }n\textrm{ is odd}\end{cases}$$SymPy can actually reduce this further if your set the assumption that $n$ is an i...
n = sm.symbols('n', real=True, positive=True, integer=True) an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an = an.doit().simplify() an.subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The odd and even versions can be computed by setting the respective assumptions.
n = sm.symbols('n', real=True, positive=True, integer=True, odd=True) an = 2 / T * sm.Integral(F * sm.cos(n * wT * t), (t, 0, T)) an = an.doit().simplify() an.subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Note that $b_n$ is always zero:
bn = 2 / T * sm.Integral(F * sm.sin(n * wT * t), (t, 0, T)) bn bn.doit().simplify().subs({wT: 2 * sm.pi / T})
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Numerical evalution of the Fourier SeriesNow the Fourier coefficients can be used to plot the approximation of the saw tooth forcing function.
import numpy as np
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
The following function plots the actual sawtooth function. It does it all in one line by cleverly using the absolute value and the modulo functions.
def sawtooth(A, T, t): return (4 * A / T) * (T / 2 - np.abs(t % T - T / 2) ) - A A = 1 T = 2 t = np.linspace(0, 5, num=500) plt.figure() plt.plot(t, sawtooth(A, T, t));
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
ExerciseWrite a function that computes the Fourier approximation of the sawtooth function for a given value of $n$, i.e. using a finite number of terms. Then plot it for $n=2, 4, 6, 8, 10$ on top of the actual sawtooth function. How many terms of the infinite series are needed to get a good sawtooth?```pythondef sawto...
def sawtooth_approximation(n, A, T, t): # odd values of indexing variable up to n n = np.arange(1, n+1)[:, np.newaxis] # cos coefficients an = A *(8 * (-1)**n - 8) / 2 / np.pi**2 / n**2 # sawtooth frequency wT = 2 * np.pi / T # sum of n cos functions f = np.sum(an * np.cos(n * wT * t), a...
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Below is a interactive plot that shows the same thing as above.
A = 1 T = 2 t = np.linspace(0, 5, num=500) fig, ax = plt.subplots(1, 1) f = sawtooth(A, T, t) saw_tooth_lines = ax.plot(t, f, color='k') n = 2 f_approx = sawtooth_approximation(n, A, T, t) approx_lines = ax.plot(t, f_approx) leg = ax.legend(['true', 'approx, n = {}'.format(n)]) # zoom in a bit on the interestin...
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Apply the sawtooth to the quarter carNow that you know the Fourier series coefficients. Calculate them for a suitable number of terms and simulate them with the `sys.periodic_base_displacing_response()` function.Your code should look something like:```pythondef fourier_coeffs(A, T, N): write your code herea0, an, ...
def fourier_coeffs(A, T, N): n = np.arange(1, N+1) an = A *(8 * (-1)**n - 8) / 2 / np.pi**2 / n**2 return 0, an, np.zeros_like(an) a0, an, bn = fourier_coeffs(0.01, 2 * np.pi / wb, 100) traj = sys.periodic_base_displacing_response(a0, an, bn, wb, 20.0) traj.plot(subplots=True) sys.animate_configuration(fp...
_____no_output_____
MIT
notebooks/07/07_vertical_vibration_of_a_quarter_car.ipynb
gbrault/resonance
Define the data_directory of preprocessed data
data_directory = "C:/Users/kwokp/OneDrive/Desktop/Study/zzz_application project/Final/data_after_preprocessing.csv"
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
We devide the data into 3 groups:* Group 1: full data* Group 2: data with four large categories which have more than 1000 companies each* Group 3: seven categories of data, number of companies in each category is same but small In the function selectGroup, giving 1, 2 or 3 as input parameter to selet the relevant data...
# read the data from directory, then select the group # of data we want to process. def selectGroup(directory, group_nr): data = pd.read_csv(directory, sep='\t') if group_nr == 1: return data if group_nr == 2: df_healthcare_group=data[data['Category'] == 'HEALTHCARE GROUP'].sample(n=1041,re...
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
List Occurence of words in Top 50 Keywords in Categories
#visualize top_n words with occurence def visulaze_topwords_occurence(top_n, word_list, occurence_list): objects = word_list y_pos = np.arange(len(word_list)) performance = occurence_list plt.figure(figsize=(10,24)) plt.barh(y_pos, performance, align='center', alpha=0.5) plt.yticks(y_pos, objec...
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
We remove the redundunt words which appears in multiple category . Main steps are as follows:1. select the group of data to do the test2. generate TF-IDF score matrix3. get the top 50 words in each category4. find the words which appears in more than one category's top-50 words, set them as stopwords5. remove these st...
#get the data, remove the frequent words which appear in more than one category, and update the tf-idf score matrix data = selectGroup(data_directory, 1) score_matrix, feature_extraction = tf_idf_func(data['clean'], 8000) sortedDict = get_top_keywords_with_frequence(50, score_matrix, data, feature_extraction) _, _, fre...
Cluster BUSINESS & FINANCIAL SERVICES learn,agreement,need,insurance,media,experience,financial,companies,clients,marketing Cluster CONSUMER GOODS GROUP read,sites,address,brand,organic,home,shipping,ingredients,foods,food Cluster CONSUMER SERVICES GROUP experience,media,sites,world,address,parties,people,day,agreem...
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Split the data 80% for training and 20% for testing
df_final = df_score_valid[df_score_valid.columns.difference(['Keep', 'Category'])] #remove columns'Keep' and 'Category' df_category = df_score_valid['Category'].reset_index(drop=True) msk = np.random.rand(len(df_final)) < 0.8 train_x = np.nan_to_num(df_final[msk]) test_x = np.nan_to_num(df_final[~msk]) train_y = df_c...
_____no_output_____
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform Linear SVM
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score #use svm classifier to classify TF-IDF of each website def linear_svc_classifier(train_x, train_y, test_x, test_y): print("start svm") classifier_svm = svm.LinearSVC() classifier_svm.fit(train_x, train_y) predictions = ...
start svm [[145 4 24 2 3 5 66] [ 4 28 16 0 3 7 5] [ 24 7 115 1 8 2 30] [ 6 0 0 21 0 2 3] [ 8 5 6 1 135 4 15] [ 15 3 6 3 1 45 9] [ 68 4 32 1 6 9 225]] precision recall f1-score support BUSINESS &...
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform KNN with 5 Neighbours
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score #use knn classifier to classify TF-IDF of each website def knn_classifier(x_train, y_train, x_test, y_test): print("start knn") modelknn = KNeighborsClassifier(n_neighbors=5) modelknn.fit(x_train, y_train) predictions =...
start knn [[153 7 22 4 5 4 54] [ 8 25 19 1 3 2 5] [ 33 17 89 1 12 2 33] [ 10 1 0 16 2 0 3] [ 18 5 8 0 133 3 7] [ 21 4 4 4 2 34 13] [106 6 40 2 8 7 176]] precision recall f1-score support BUSINESS &...
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Perform K means and Plot SSE, PCA and TSNE
from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import PCA from sklearn.manifold import TSNE import matplotlib.cm as cm import itertools #Find the optimal clusters from 2 to maximum of clusters of data group, plot respective SSE. def fi...
Fit 2 clusters Fit 4 clusters Fit 6 clusters Cluster 0 devices,storage,application,performance,networks,infrastructure,enterprise,solution,wireless,network Cluster 1 reserved,click,read,learn,world,copyright,need,home,wordpress,domain Cluster 2 dr,healthcare,treatment,cancer,care,health,patient,medical,clinical,pati...
MIT
Codes/2.2 Remove redundant words - SVM,KNN,Kmeans_v2.ipynb
matchlesswei/application_project_nlp_company_description
Carpetplots
import opengrid as og from opengrid.library import plotting as og_plot import pandas as pd from joule import meta, filter_meta plt = og.plot_style() #%matplotlib notebook #%matplotlib notebook for building in meta['RecordNumber'].unique(): ts = pd.read_pickle('data/Electricity_{}.pkl'.format(building)).sum(axis=1)*...
_____no_output_____
Apache-2.0
Carpet.ipynb
saroele/jouleboulevard
Zbozinek TD, Perez OD, Wise T, Fanselow M, & Mobbs D
import numpy as np import pandas as pd import matplotlib.pyplot as plt from theano import scan import theano.tensor as T import pymc3 as pm import theano import seaborn as sns import os, sys, subprocess
_____no_output_____
Apache-2.0
modeling/modeling code/Experiment_2_Direct_Associations.ipynb
tzbozinek/2nd-order-occasion-setting
Load Data
data = pd.read_csv(os.path.join('../data/', "2nd_POS_Modeling_Data_Direct_Associations.csv")) data['DV'] = ((data['DV'].values - 1) / 2) - 1 observed_R = data.pivot(columns = 'ID', index = 'trialseq', values = 'DV').values[:, np.newaxis, :] #values.T transposes the data, so you can make trials the first dimension or p...
_____no_output_____
Apache-2.0
modeling/modeling code/Experiment_2_Direct_Associations.ipynb
tzbozinek/2nd-order-occasion-setting