markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Upload dataNow provide the path to the csv file
f1 = "DN150417.csv" data = Table.read(f1, format='ascii.csv', guess=False, delimiter=',') slope = 50* u.deg v0 = [] vel_col = "D_DT_geo" h_col = "height" data.sort([h_col]) data = data[::-1]
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 1 This section calculates and plots $\alpha$ and $\beta$ values Section 1.1 - Normalising Data
alt = [] vel = [] # remove any nan values from velocity column for v in range(len(data[vel_col])): if data[vel_col][v] >1.: vel.append(data[vel_col][v]) alt.append(data[h_col][v]) vel = np.asarray(vel) alt = np.asarray(alt) # approximate initial velocity, if not already given if v0 == []...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 1.2 - Calculate $\alpha$ and $\beta$ values
Gparams= Q4_min(Vvalues, Yvalues) alpha_1 = Gparams[0] beta_1 = Gparams[1] print(f"alpha = {alpha_1}") print(f"beta = {beta_1}") print(f"using inital velocity v0 = {v0}") print(f"\nOther useful values:") print(f"log alpha = {np.log(alpha_1)}") print(f"log beta = {np.log(beta_1)}") print(f"log(alpha sin(slope)) = {np....
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 1.3 - Plotting plot the fit using the calculated $\alpha$ and $\beta$ values
plt.close() # plt.rcParams['figure.dpi'] = 10 plt.rcParams['figure.figsize'] = [5, 5] x = np.arange(0.1,1, 0.00005); #create a matrix of x values fun = lambda x:np.log(alpha_1) + beta_1 - np.log((scipy.special.expi(beta_1) - scipy.speci...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
determine the final mass, and if it lies above an expected final mass limit
plt.close() print("mf = the final mass boundary limit plotted,\nmu = the shape change coefficient (2/3 for uniform ablation across the surface for high spin, 0 flow no spin and fronal ablaiton only)\ncd = the drag coefficient (1 for sphere,\nrho = the density of the meteoroid\nA = the cross sectional area to volume rat...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 2 This section calculates and plots $\alpha$, $\beta$ and $V_0$ values Section 2.1 - smoothing data
alt1 = [] vel1 = [] # smooth data for v in range(1, len(vel)-1): vel1.append((vel[v-1]+vel[v]+vel[v+1]) / 3) alt1.append(alt[v]) alt_smooth = [] vel_smooth = [] # smooth data again for v in range(1, len(vel1)-1): vel_smooth.append((vel1[v-1]+vel1[v]+vel1[v+1]) / 3) alt_smooth.append(alt1[v]) ...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 2.2 - Calculate $\alpha$, $\beta$ and $V_0$ valuesThis calls the Q4_min_v function
Gparams= Q4_min_v(vel_smooth, alt_smooth, h0, v0) alpha_2 = Gparams[0] beta_2 = Gparams[1] v0_calc = Gparams[2] Yvalues_2 = [j / h0 for j in alt] Vvalues_2 = [j / v0_calc for j in vel] print(f"alpha = {alpha_2}") print(f"beta = {beta_2}") print(f"v0 = {v0_calc}") print(f"\nOther useful values:") print(f"log alpha...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 2.3 - Plotting
plt.close() # plt.rcParams['figure.dpi'] = 10 plt.rcParams['figure.figsize'] = [5, 5] x = np.arange(0.1,1, 0.00005); #create a matrix of x values fun = lambda x:np.log(alpha_2) + beta_2 - np.log((scipy.special.expi(beta_2) - scipy.speci...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
determine the final mass, and if it lies above an expected final mass limit
plt.close() print("mf = the final mass boundary limit plotted,\nmu = the shape change coefficient (2/3 for uniform ablation across the surface for high spin, 0 flow no spin and fronal ablaiton only)\ncd = the drag coefficient (1 for sphere,\nrho = the density of the meteoroid\nA = the cross sectional area to volume rat...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 3 This section allows you to manipulate $\alpha$, $\beta$ and $V_0$ values Section 3.1 - interactive $\alpha$, $\beta$ and $V_0$ determination
plt.close() x = np.arange(0.1,1, 0.00005); #create a matrix of x values def f(V0,alpha, beta):#, mu, cd):#, rho, A): cd = 1 A = 1.21 rho = 3500 mu =2/3. yvalues = [j / h0 for j in alt] vvalues = [j / V0 for j i...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 3.2 - interactive mass determination
plt.close() print("mf = the final mass boundary limit plotted,\nmu = the shape change coefficient (2/3 for uniform ablation across the surface for high spin, 0 flow no spin and fronal ablaiton only)\ncd = the drag coefficient (1 for sphere,\nrho = the density of the meteoroid\nA = the cross sectional area to volume rat...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Section 4 interactive mass determination for 3 sections combined
plt.close() print("mf = the final mass boundary limit plotted,\nmu = the shape change coefficient (2/3 for uniform ablation across the surface for high spin, 0 flow no spin and fronal ablaiton only)\ncd = the drag coefficient (1 for sphere,\nrho = the density of the meteoroid\nA = the cross sectional area to volume rat...
_____no_output_____
MIT
alpha_beta_v0_fun.ipynb
desertfireballnetwork/alpha_beta_modules
Train test split
X = pd.get_dummies(X_df,drop_first=True) print(type(X),X.shape) print(type(X_df),X_df.shape) #X = pd.get_dummies(X_df,drop_first=True) Y = Ybin Z = Zbin test_train_ratio = 0.5 #TODO: Can I do this in Keras? # TODO : what should be left to the user # split into train/test set X_train, X_test, y_train, y_test, Z_tra...
(16280, 100) (16280, 100) (16280, 100) (16280, 100)
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Performance metrics
def main_task_performance(X_test,y_test,y_hat): main_task_accuracy = accuracy_score(y_test, y_hat) return main_task_accuracy def bias_accuracy_performance(X_test,y_test,Z_test,trained_model): # predict on test set y_pred = trained_model.predict(X_test).ravel()#, index=y_test.index y_hat = (y_pred>...
_____no_output_____
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Architectures
def Classifier_arch(n_features): inputs = Input(shape=(n_features,)) dense1 = Dense(32, activation='relu')(inputs) dropout1 = Dropout(0.2)(dense1) dense2 = Dense(32, activation='relu')(dropout1) dropout2 = Dropout(0.2)(dense2) dense3 = Dense(32, activation="relu")(dropout2) dropout3 = Dropou...
_____no_output_____
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Creating and saving the classifer architecture
main_task_arch = Classifier_arch(n_features=X_train.shape[1]) main_task_arch_json_string = main_task_arch.to_json()
_____no_output_____
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Creating and saving the adversary architecture
adv_inputs = Input(shape=(1,)) adv_task_arch = Adversary_arch(adv_inputs) adv_task_arch_json_string = adv_task_arch.to_json()
_____no_output_____
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Try reading the classifier architecture and model/compile and check prediction results
main_task_ori = model_from_json(main_task_arch_json_string) # initialise NeuralNet Classifier main_task_ori.compile(loss='binary_crossentropy', optimizer='adam') # train on train set main_task_ori.fit(X_train, y_train, epochs=20, verbose=0) main_task_accuracy, p_rule_for_Y1 = bias_accuracy_performance(X_test,y_test,Z_...
Accuracy: 85.21 p_rule_for_Y1 29.30
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Combining the main task arch with the adversarial arch
# HIDE class FairClassifier(object): def __init__(self, tradeoff_lambda,main_task_arch_json_string,adv_task_arch_json_string,pre_load_flag=True): self.tradeoff_lambda = tradeoff_lambda clf_net = self._create_clf_net(main_task_arch_json_string) adv_net = self....
_____no_output_____
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Creating the combied architecture and checking the performance of main task after the pre-training using trained model weights
tradeoff_lambda = 100 pre_load_flag = True # initialise FairClassifier clf = FairClassifier(tradeoff_lambda=tradeoff_lambda, main_task_arch_json_string=main_task_arch_json_string, adv_task_arch_json_string=adv_task_arch_json_string,pre_load_flag=pre_load_flag) # pre-train...
Accuracy: 85.21 p_rule_for_Y1 29.30
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
without using trained model weights
tradeoff_lambda = 100 pre_load_flag = False # initialise FairClassifier clf = FairClassifier(tradeoff_lambda=tradeoff_lambda, main_task_arch_json_string=main_task_arch_json_string, adv_task_arch_json_string=adv_task_arch_json_string,pre_load_flag=pre_load_flag) # pre-trai...
Accuracy: 85.10 p_rule_for_Y1 29.19
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
Fair model - training adversarial and classifier together using trained model weights
tradeoff_lambda_v = [100] pre_load_flag = True for tradeoff_lambda in tradeoff_lambda_v: print('tradeoff_lambda = ', tradeoff_lambda) # initialise FairClassifier clf = FairClassifier(tradeoff_lambda=tradeoff_lambda, main_task_arch_json_string=main_task_arch_json_string, ...
Accuracy: 81.97 p_rule_for_Y1 93.76
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
without using trained model weights
tradeoff_lambda_v = [100] pre_load_flag = False for tradeoff_lambda in tradeoff_lambda_v: print('tradeoff_lambda = ', tradeoff_lambda) # initialise FairClassifier clf = FairClassifier(tradeoff_lambda=tradeoff_lambda, main_task_arch_json_string=main_task_arch_json_string, ...
Accuracy: 82.08 p_rule_for_Y1 93.47
MIT
step_by_step-10.ipynb
OzgunBu/Data-Fairness---part1
classifcation algorithms * Logistic regression* K Nearest Neighbor* Support vector machine* Kernal SVM* Naive bayes* Decision Trees* Random Forest loading libraries
import numpy as np import matplotlib.pyplot as plt import pandas as pd #packages for splitting dataset from sklearn.model_selection import train_test_split #Feature scaling from sklearn.preprocessing import StandardScaler #Model building from sklearn.linear_model import LogisticRegression from sklearn.neighbors import...
_____no_output_____
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Loading Dataset
dataset = pd.read_csv('Social_Network_Ads.csv')
_____no_output_____
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Exploratory data analysis
dataset.head(1) dataset.describe() dataset.shape #checking the shape of our dataframe o.p : 400 , 3 dataset.columns dataset["Age"].hist(bins = 75) dataset["EstimatedSalary"].hist(bins = 150) dataset["Purchased"].value_counts()
_____no_output_____
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Logistic Regression
#splitting data into dependent and independent variables X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values # Splitting the dataset into the Training set and Test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # Feature Scaling sc = StandardScaler() X_train...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
KNN
# Training the K-NN model on the Training set classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2) classifier.fit(X_train, y_train) # Predicting a new result print(classifier.predict(sc.transform([[30,87000]]))) # Predicting the Test set results y_pred = classifier.predict(X_test) print(np.co...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
support vector machine
# Training the SVM model on the Training set classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, y_train) # Predicting a new result print(classifier.predict(sc.transform([[30,87000]]))) # Predicting the Test set results y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.reshape(l...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
kernal SVM
# Training the Kernel SVM model on the Training set classifier = SVC(kernel = 'rbf', random_state = 0) classifier.fit(X_train, y_train) # Predicting a new result print(classifier.predict(sc.transform([[30,87000]]))) # Predicting the Test set results y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.resha...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Naive Bayes
from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) print(classifier.predict(sc.transform([[30,87000]]))) y_pred = classifier.predict(X_test) print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1)) cm = confusion_matrix(y_test, y_pred) pr...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Decision trees
from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0) classifier.fit(X_train, y_train) # Predicting a new result print(classifier.predict(sc.transform([[30,87000]]))) # Predicting the Test set results y_pred = classifier.predict(X_test) print(np.con...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Random Forest
from sklearn.ensemble import RandomForestClassifier classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0) classifier.fit(X_train, y_train) # Predicting a new result print(classifier.predict(sc.transform([[30,87000]]))) # Predicting the Test set results y_pred = classifier.predi...
*c* argument looks like a single numeric RGB or RGBA sequence, which should be avoided as value-mapping will have precedence in case its length matches with *x* & *y*. Please use the *color* keyword-argument or provide a 2-D array with a single row if you intend to specify the same RGB or RGBA value for all points. *c...
MIT
Classification.ipynb
ManishSreerangam/ClassificationAlgorithms
Visualization Matplotlib Objectives1. Create a basic line plot.1. Add labels and grid lines to the plot.1. Plot multiple series of data.1. Plot imshow, contour, and filled contour plots.*This notebook was modified from one developed by Unidata* Getting Help with MatplotlibHere are some important resources for learn...
%matplotlib inline
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Next we import the matplotlib library's `pyplot` interface; this interface is the simplest way to create new Matplotlib figures. To shorten this long name, we import it as `plt` to keep things short but clear.
import matplotlib.pyplot as plt import numpy as np
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Now we generate some data to use while experimenting with plotting:
times = np.array([ 93., 96., 99., 102., 105., 108., 111., 114., 117., 120., 123., 126., 129., 132., 135., 138., 141., 144., 147., 150., 153., 156., 159., 162.]) temps = np.array([310.7, 308.0, 296.4, 289.5, 288.5, 287.1, 301.1, 308.3, 311.5, 305.1, 295.6, 292.4, 2...
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Now we come to two quick lines to create a plot. Matplotlib has two core objects: the `Figure` and the `Axes`. The `Axes` is an individual plot with an x-axis, a y-axis, labels, etc; it has all of the various plotting methods we use. A `Figure` holds one or more `Axes` on which we draw; think of the `Figure` as the lev...
# Create a figure fig = plt.figure(figsize=(10, 6)) # Ask, out of a 1x1 grid, the first axes. ax = fig.add_subplot(1, 1, 1) # Plot times as x-variable and temperatures as y-variable ax.plot(times, temps)
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
From there, we can do things like ask the axis to add labels for x and y:
# Add some labels to the plot ax.set_xlabel('Time') ax.set_ylabel('Temperature') # Prompt the notebook to re-display the figure after we modify it fig
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
We can also add a title to the plot:
ax.set_title('GFS Temperature Forecast', fontdict={'size':16}) fig
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Of course, we can do so much more...
# Set up more temperature data temps_1000 = np.array([316.0, 316.3, 308.9, 304.0, 302.0, 300.8, 306.2, 309.8, 313.5, 313.3, 308.3, 304.9, 301.0, 299.2, 302.6, 309.0, 311.8, 304.7, 304.6, 301.8, 300.6, 299.9, 306.3, 311.3])
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Here we call `plot` more than once to plot multiple series of temperature on the same plot; when plotting we pass `label` to `plot` to facilitate automatic creation. This is added with the `legend` call. We also add gridlines to the plot using the `grid()` call.
fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) # Plot two series of data # The label argument is used when generating a legend. ax.plot(times, temps, label='Temperature (surface)') ax.plot(times, temps_1000, label='Temperature (1000 mb)') # Add labels and title ax.set_xlabel('Time') ax.set_ylabel('Te...
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
We're not restricted to the default look of the plots, but rather we can override style attributes, such as `linestyle` and `color`. `color` can accept a wide array of options for color, such as `red` or `blue` or HTML color codes. Here we use some different shades of red taken from the Tableau color set in matplotlib,...
fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) # Specify how our lines should look ax.plot(times, temps, color='tab:red', label='Temperature (surface)') ax.plot(times, temps_1000, color='tab:red', linestyle='--', label='Temperature (isobaric level)') # Same as above ax.set_xlabel('Time') ax.s...
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Exercise* Use `add_subplot` to create two different subplots on the figure* Create one subplot for temperature, and one for dewpoint* Set the title of each subplot as appropriate* Use `ax.set_xlim` and `ax.set_ylim` to control the plot boundaries* **BONUS:** Experiment with passing `sharex` and `sharey` to `add_subplo...
# Fake dewpoint data to plot dewpoint = 0.9 * temps dewpoint_1000 = 0.9 * temps_1000 # Create the figure fig = plt.figure(figsize=(10, 6)) # YOUR CODE GOES HERE
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Solution
# %load solutions/subplots.py
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Scatter PlotsMaybe it doesn't make sense to plot your data as a line plot, but with markers (a scatter plot). We can do this by setting the `linestyle` to none and specifying a marker type, size, color, etc.
fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) # Specify no line with circle markers ax.plot(temps, temps_1000, linestyle='None', marker='o', markersize=5) ax.set_xlabel('Temperature (surface)') ax.set_ylabel('Temperature (1000 hPa)') ax.set_title('Temperature Cross Plot') ax.grid(True)
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
You can also use the `scatter` methods, which is slower, but will give you more control, such as being able to color the points individually based upon a third variable.
fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) # Specify no line with circle markers ax.scatter(temps, temps_1000) ax.set_xlabel('Temperature (surface)') ax.set_ylabel('Temperature (1000 hPa)') ax.set_title('Temperature Cross Plot') ax.grid(True)
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Exercise* Beginning with our code above, add the `c` keyword argument to the `scatter` call and color the points by the difference between the surface and 1000 hPa temperature.* Add a 1:1 line to the plot (slope of 1, intercept of zero). Use a black dashed line.* **BONUS:** Change the color map to be something more ap...
fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1, 1, 1) # YOUR CODE GOES HERE ax.set_xlabel('Temperature (surface)') ax.set_ylabel('Temperature (1000 hPa)') ax.set_title('Temperature Cross Plot') ax.grid(True)
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Solution
# %load solutions/color_scatter.py
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
imshow/contour- `imshow` displays the values in an array as colored pixels, similar to a heat map.- `contour` creates contours around data.- `contourf` creates filled contours around data.First let's create some fake data to work with - let's use a bivariate normal distribution.
x = y = np.arange(-3.0, 3.0, 0.025) X, Y = np.meshgrid(x, y) Z1 = np.exp(-X**2 - Y**2) Z2 = np.exp(-(X - 1)**2 - (Y - 1)**2) Z = (Z1 - Z2) * 2
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Let's start with a simple imshow plot.
fig, ax = plt.subplots() im = ax.imshow(Z, interpolation='bilinear', cmap='RdYlGn', origin='lower', extent=[-3, 3, -3, 3])
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
We can also create contours around the data.
fig, ax = plt.subplots() ax.contour(X, Y, Z) fig, ax = plt.subplots() c = ax.contour(X, Y, Z, levels=np.arange(-2, 2, 0.25)) ax.clabel(c) fig, ax = plt.subplots() c = ax.contourf(X, Y, Z)
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Exercise* Create a figure using imshow and contour that is a heatmap in the colormap of your choice. Overlay black contours with a 0.5 contour interval.
# YOUR CODE GOES HERE
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
Solution
# %load solutions/contourf_contour.py
_____no_output_____
CC-BY-4.0
notebooks/bytopic/data-visualization/01_matplotlib.ipynb
khallock/ncar-python-tutorial
SimplyP v.0.2 (alpha)This notebook demonstrates basic usage of the [SimplyP model](https://github.com/LeahJB/SimplyP). Version 0.1 of the model is applied in> Jackson-Blake LA, Sample JE, Wade AJ, Helliwell RC, Skeffington RA. 2017. *Are our dynamic water quality models too complex? A comparison of a new parsimonious ...
# Path to the Excel file containing parameters for setup and all the model parameters params_fpath = (r'Parameters_v0-2A_Tarland.xlsx') # Plotting options: either 'paper' or 'notebook' fig_display_type = 'notebook' # Set within-notebook plots to be interactive (zoomable and pannable)? # If yes, comment out the line b...
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
2. Read input dataThe function `sp.read_input_data()` reads the Excel setup file and the datasets specified within it (i.e. input meteorology data and any observed data). If necessary, any derived inputs are calculated (e.g. snow accumulation and melt, PET).
# Import the simplyP module import simplyP as sp # Read in model inputs, parameters, and any observations. If necessary calculate derived inputs p_SU, dynamic_options, p, p_LU, p_SC, p_struc, met_df, obs_dict = sp.inputs.read_input_data(params_fpath)
Parameter values successfully read in Input meteorological data read in Snow accumulation and melt module run to estimate snowmelt inputs to the soil
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
3. Run the modelThe input data is passed to the model function `sp.run_simply_p()`. After running, check there are no pink warning messages below the code cell. Warning boxes likely suggest the ODE solver did not run propery, most likely due to issues with the input parameters.Model results will be saved as csvs if yo...
%%time # Run model df_TC_dict, df_R_dict, Kf, ode_output_dict = sp.run_simply_p(met_df, p_struc, p_SU, p_LU, p_SC, p, dynamic_options)
Starting model run for sub-catchment: 1 No reaches directly upstream of this reach Finished! Starting model run for sub-catchment: 2 Reaches directly upstream of this reach: [1.0] Finished! Kf (the soil P sorption coefficient; mm/kg): 0.00011315280464216634 Results saved to csv Wall time: 10.8 s
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
4. Explore model resultsThe following code cells summarise the model output graphically and, if there are observations, calculates goodness-of-fit statistics. If you opted to save graphs in the setup sheet in the parameter file, then these will be saved to file. 4.1. Results from snow moduleThis plot is only really re...
# Plot snowfall/melt sp.plot_snow(met_df, p_SU, fig_display_type)
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
4.2. Results from terrestrial processingResults from the terrestrial compartment. Acronyms in chart axes are as follows: ET: evapotranspiration, Q: water flow, SW: soil water, Agri: agricultural land, SN: semi-natural land, GW: groundwater, vol: water volume.If soil P dynamics are turned on ('Dynamic_EPC0' = 'y' in Se...
# Plot terrestrial sp.plot_terrestrial(p_SU, p_SC, p, df_TC_dict, met_df, fig_display_type)
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
4.3. Instream resultsChoose how many reaches you want to plot results for in the setup sheet of the parameter file. If you provide a file path to observations in the parameter setup sheet, then observations will also be plotted. Some plotting options are available in the parameter setup sheet (e.g. which variables to ...
# Plot in-stream # N.B. if you selected a log scale, you may get a warning if any of the simulated values are close to 0. sp.plot_in_stream(p_SU, obs_dict, df_R_dict, fig_display_type)
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
4.4. Calculate goodness-of-fit statisticsStatistics are only calculated for reaches and parameters which have more than 10 observations.Statistics in the table include: Nobs (number of observations), NSE (Nash-Sutcliffe Efficiency), log NSE (NSE on logged values), Spearman's r (Spearman's correlation coefficient), r$^...
# GoF summary gof_stats = sp.goodness_of_fit_stats(p_SU, df_R_dict, obs_dict) gof_stats
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
5. Optional: Calculate summed inputs to a downstream waterbody (e.g. a lake or an estuary)If you would like to aggregate inputs from a number of sub-catchments into a single time series (e.g. to look at total inputs to a receiving waterbody), then that can be done here. First, make sure you have correctly specified yo...
# Sum discharges and masses from reaches directly upstream, and calculate # in-stream concentrations df_summed = sp.sum_to_waterbody(p_struc, p_SU['n_SC'], df_R_dict, p['f_TDP']) # Plot results and save to csv if parameter 'save_output_csvs' set to 'y' in # parameter 'Setup' sheet if df_summed is not None: # Only plot...
_____no_output_____
MIT
Current_Release/v0-2A/Run_SimplyP_v0-2A_LongExample.ipynb
LeahJB/Simple_P_model
ๆŸๅคฑๅ‡ฝๆ•ฐๆ˜ฏๆจกๅž‹ไผ˜ๅŒ–็š„็›ฎๆ ‡๏ผŒ็”จไบŽ่กก้‡ๅœจๆ— ๆ•ฐ็š„ๅ‚ๆ•ฐๅ–ๅ€ผไธญ๏ผŒๅ“ชไธ€ไธชๆ˜ฏๆœ€็†ๆƒณ็š„ใ€‚ๆŸๅคฑๅ‡ฝๆ•ฐ็š„่ฎก็ฎ—ๅœจ่ฎญ็ปƒ่ฟ‡็จ‹็š„ไปฃ็ ไธญ๏ผŒๆฏไธ€่ฝฎ่ฎญ็ปƒไปฃ็ ็š„่ฟ‡็จ‹ๅ‡็›ธๅŒ๏ผšๅ…ˆๆ นๆฎ่พ“ๅ…ฅๆ•ฐๆฎๆญฃๅ‘่ฎก็ฎ—้ข„ๆต‹่พ“ๅ‡บ๏ผŒๅ†ๆ นๆฎ้ข„ๆต‹ๅ€ผๅ’Œ็œŸๅฎžๅ€ผ่ฎก็ฎ—ๆŸๅคฑ๏ผŒๆœ€ๅŽๆ นๆฎๆŸๅคฑๅๅ‘ไผ ๆ’ญๆขฏๅบฆๅนถๆ›ดๆ–ฐๅ‚ๆ•ฐใ€‚ๅœจไน‹ๅ‰็š„ๆ–นๆกˆไธญ๏ผŒๆˆ‘ไปฌๅค็”จไบ†ๆˆฟไปท้ข„ๆต‹ๆจกๅž‹็š„ๆŸๅคฑๅ‡ฝๆ•ฐ-ๅ‡ๆ–น่ฏฏๅทฎใ€‚่™ฝ็„ถไปŽ้ข„ๆต‹ๆ•ˆๆžœๆฅ็œ‹๏ผŒไฝฟ็”จๅ‡ๆ–น่ฏฏๅทฎไฝฟๅพ—ๆŸๅคฑไธๆ–ญไธ‹้™๏ผŒๆจกๅž‹็š„้ข„ๆต‹ๅ€ผ้€ๆธ้€ผ่ฟ‘็œŸๅฎžๅ€ผ๏ผŒไฝ†ๆจกๅž‹็š„ๆœ€็ปˆๆ•ˆๆžœไธๅคŸ็†ๆƒณใ€‚็ฉถๅ…ถๆ นๆœฌ๏ผŒไธๅŒ็š„ๆœบๅ™จๅญฆไน ไปปๅŠกๆœ‰ๅ„่‡ช้€‚ๅฎœ็š„ๆŸๅคฑๅ‡ฝๆ•ฐ๏ผŒ่ฟ™้‡Œๆˆ‘ไปฌ่ฏฆ็ป†ๅ‰–ๆžไธ€ไธ‹ๅ…ถไธญ็š„็ผ˜็”ฑ๏ผšๆˆฟไปท้ข„ๆต‹ๆ˜ฏๅ›žๅฝ’ไปปๅŠก๏ผŒ่€Œๆ‰‹ๅ†™ๆ•ฐๅญ—่ฏ†ๅˆซๅฑžไบŽๅˆ†็ฑปไปปๅŠกใ€‚ๅˆ†็ฑปไปปๅŠกไธญไฝฟ็”จๅ‡ๆ–น่ฏฏๅทฎไฝœไธบๆŸๅคฑๅญ˜ๅœจ้€ป่พ‘ๅ’Œๆ•ˆๆžœไธŠ็š„็ผบๆฌ ๏ผŒๆฏ”ๅฆ‚ๆˆฟไปทๅฏไปฅๆ˜ฏ0-9ไน‹้—ด็š„ไปปไฝ•ๆตฎ็‚นๆ•ฐ๏ผŒๆ‰‹ๅ†™ๆ•ฐๅญ—่ฏ†ๅˆซ็š„ๆ•ฐๅญ—ๅชๅฏ...
import matplotlib.pyplot as plt import numpy as np x = np.arange(0.01,1,0.01) y = np.log(x) plt.title("y=log(x)") plt.xlabel("x") plt.ylabel("y") plt.plot(x,y) plt.show() plt.figure()
_____no_output_____
Apache-2.0
chapter-2-step_to_deep_learning/2-5-mnist_recognition-Loss_Function.ipynb
lcsss1995/tutorials
ๅฆ‚่‡ช็„ถๅฏนๆ•ฐ็š„ๅ›พๅฝขๆ‰€็คบ๏ผŒๅฝ“x็ญ‰ไบŽ1ๆ—ถ๏ผŒyไธบ0๏ผ›้š็€xๅ‘0้ ่ฟ‘๏ผŒy้€ๆธๅ˜ๅฐใ€‚ๅ› ๆญค๏ผŒๆญฃ็กฎ่งฃๆ ‡็ญพๅฏนๅบ”็š„่พ“ๅ‡บ่ถŠๅคง๏ผŒไบคๅ‰็†ต็š„ๅ€ผ่ถŠๆŽฅ่ฟ‘0๏ผ›ๅฝ“่พ“ๅ‡บไธบ1ๆ—ถ๏ผŒไบคๅ‰็†ต่ฏฏๅทฎไธบ0ใ€‚ๅไน‹๏ผŒๅฆ‚ๆžœๆญฃ็กฎ่งฃๆ ‡็ญพๅฏนๅบ”็š„่พ“ๅ‡บ่ถŠๅฐ๏ผŒๅˆ™ไบคๅ‰็†ต็š„ๅ€ผ่ถŠๅคงใ€‚ๅœจๆ‰‹ๅ†™ๆ•ฐๅญ—่ฏ†ๅˆซไปปๅŠกไธญ๏ผŒๅฆ‚ๆžœๅœจ็Žฐๆœ‰ไปฃ็ ไธญๅฐ†ๆจกๅž‹็š„ๆŸๅคฑๅ‡ฝๆ•ฐๆ›ฟๆขๆˆไบคๅ‰็†ต๏ผˆcross_entropy๏ผ‰๏ผŒไป…ๆ”นๅŠจไธ‰่กŒไปฃ็ ๅณๅฏ๏ผšๅœจ่ฏปๅ–ๆ•ฐๆฎ้ƒจๅˆ†๏ผŒๅฐ†ๆ ‡็ญพ็š„็ฑปๅž‹่ฎพ็ฝฎๆˆint๏ผŒไฝ“็Žฐๅฎƒๆ˜ฏไธ€ไธชๆ ‡็ญพ่€Œไธๆ˜ฏๅฎžๆ•ฐๅ€ผ๏ผˆ้ฃžๆกจๆก†ๆžถ้ป˜่ฎคๅฐ†ๆ ‡็ญพๅค„็†ๆˆint64๏ผ‰๏ผ›ๅœจ็ฝ‘็ปœๅฎšไน‰้ƒจๅˆ†๏ผŒๅฐ†่พ“ๅ‡บๅฑ‚ๆ”นๆˆโ€œ่พ“ๅ‡บๅไธชๆ ‡็ญพ็š„ๆฆ‚็އโ€็š„ๆจกๅผ๏ผ›ไปฅๅŠๅœจ่ฎญ็ปƒ่ฟ‡็จ‹้ƒจๅˆ†๏ผŒๅฐ†ๆŸๅคฑๅ‡ฝๆ•ฐไปŽๅ‡ๆ–น่ฏฏๅทฎๆขๆˆไบคๅ‰็†ตใ€‚- ๆ•ฐๆฎๅค„็†้ƒจๅˆ†๏ผšlabel = np.reshape(labels[i], [1]).ast...
#ไฟฎๆ”นๆ ‡็ญพๆ•ฐๆฎ็š„ๆ ผๅผ๏ผŒไปŽfloat32ๅˆฐint64 import os import random import paddle import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC import numpy as np from PIL import Image import gzip import json # ๅฎšไน‰ๆ•ฐๆฎ้›†่ฏปๅ–ๅ™จ def load_data(mode='train'): # ๆ•ฐๆฎๆ–‡ไปถ datafile = './work/mnist.json.gz' print('load...
_____no_output_____
Apache-2.0
chapter-2-step_to_deep_learning/2-5-mnist_recognition-Loss_Function.ipynb
lcsss1995/tutorials
ๅฆ‚ไธ‹ๆ˜ฏๅœจ็ฝ‘็ปœๅฎšไน‰้ƒจๅˆ†๏ผŒไฟฎๆ”น่พ“ๅ‡บๅฑ‚็ป“ๆž„ใ€‚- ไปŽ๏ผšself.fc = FC(name_scope, size=1, act=None)- ๅˆฐ๏ผšself.fc = FC(name_scope, size=10, act='softmax')
# ๅฎšไน‰ๆจกๅž‹็ป“ๆž„ class MNIST(fluid.dygraph.Layer): def __init__(self, name_scope): super(MNIST, self).__init__(name_scope) name_scope = self.full_name() # ๅฎšไน‰ไธ€ไธชๅท็งฏๅฑ‚๏ผŒไฝฟ็”จreluๆฟ€ๆดปๅ‡ฝๆ•ฐ self.conv1 = Conv2D(name_scope, num_filters=20, filter_size=5, stride=1, padding=2, act='relu') # ๅฎšไน‰ไธ€ไธชๆฑ ๅŒ–...
_____no_output_____
Apache-2.0
chapter-2-step_to_deep_learning/2-5-mnist_recognition-Loss_Function.ipynb
lcsss1995/tutorials
ๅฆ‚ไธ‹ไปฃ็ ไป…ไฟฎๆ”น่ฎก็ฎ—ๆŸๅคฑ็š„ๅ‡ฝๆ•ฐ๏ผŒไปŽๅ‡ๆ–น่ฏฏๅทฎ๏ผˆๅธธ็”จไบŽๅ›žๅฝ’้—ฎ้ข˜๏ผ‰ๅˆฐไบคๅ‰็†ต่ฏฏๅทฎ๏ผˆๅธธ็”จไบŽๅˆ†็ฑป้—ฎ้ข˜๏ผ‰ใ€‚- ไปŽ๏ผšloss = fluid.layers.square_error_cost(predict, label)- ๅˆฐ๏ผšloss = fluid.layers.cross_entropy(predict, label)
#ไป…ไฟฎๆ”น่ฎก็ฎ—ๆŸๅคฑ็š„ๅ‡ฝๆ•ฐ๏ผŒไปŽๅ‡ๆ–น่ฏฏๅทฎ๏ผˆๅธธ็”จไบŽๅ›žๅฝ’้—ฎ้ข˜๏ผ‰ๅˆฐไบคๅ‰็†ต่ฏฏๅทฎ๏ผˆๅธธ็”จไบŽๅˆ†็ฑป้—ฎ้ข˜๏ผ‰ with fluid.dygraph.guard(): model = MNIST("mnist") model.train() #่ฐƒ็”จๅŠ ่ฝฝๆ•ฐๆฎ็š„ๅ‡ฝๆ•ฐ train_loader = load_data('train') optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01) EPOCH_NUM = 5 for epoch_id in range(EPOCH_NUM): for batch_id, data in e...
loading mnist dataset from ./work/mnist.json.gz ...... epoch: 0, batch: 0, loss is: [2.4464767] epoch: 0, batch: 200, loss is: [0.35378253] epoch: 0, batch: 400, loss is: [0.19394642] epoch: 1, batch: 0, loss is: [0.2654959] epoch: 1, batch: 200, loss is: [0.26667175] epoch: 1, batch: 400, loss is: [0.22529164] epoch: ...
Apache-2.0
chapter-2-step_to_deep_learning/2-5-mnist_recognition-Loss_Function.ipynb
lcsss1995/tutorials
่™ฝ็„ถไธŠ่ฟฐ่ฎญ็ปƒ่ฟ‡็จ‹็š„ๆŸๅคฑๆ˜Žๆ˜พๆฏ”ไฝฟ็”จๅ‡ๆ–น่ฏฏๅทฎ็ฎ—ๆณ•่ฆๅฐ๏ผŒไฝ†ๅ› ไธบๆŸๅคฑๅ‡ฝๆ•ฐ้‡็บฒ็š„ๅ˜ๅŒ–๏ผŒๆˆ‘ไปฌๆ— ๆณ•ไปŽๆฏ”่พƒไธคไธชไธๅŒ็š„Lossๅพ—ๅ‡บ่ฐๆ›ดๅŠ ไผ˜็ง€ใ€‚ๆ€Žไนˆ่งฃๅ†ณ่ฟ™ไธช้—ฎ้ข˜ๅ‘ข๏ผŸๆˆ‘ไปฌๅฏไปฅๅ›žๅฝ’ๅˆฐ้—ฎ้ข˜็š„็›ดๆŽฅ่กก้‡๏ผŒ่ฐ็š„ๅˆ†็ฑปๅ‡†็กฎ็އ้ซ˜ๆฅๅˆคๆ–ญใ€‚ๅœจๅŽ้ขไป‹็ปๅฎŒ่ฎก็ฎ—ๅ‡†็กฎ็އๅ’Œไฝœๅ›พ็š„ๅ†…ๅฎนๅŽ๏ผŒ่ฏป่€…ๅฏไปฅ่‡ช่กŒๆต‹่ฏ•้‡‡็”จไธๅŒๆŸๅคฑๅ‡ฝๆ•ฐไธ‹๏ผŒๆจกๅž‹ๅ‡†็กฎ็އ็š„้ซ˜ไฝŽใ€‚ๅ› ไธบๆˆ‘ไปฌไฟฎๆ”นไบ†ๆจกๅž‹็š„่พ“ๅ‡บๆ ผๅผ๏ผŒๆ‰€ไปฅไฝฟ็”จๆจกๅž‹ๅš้ข„ๆต‹ๆ—ถ็š„ไปฃ็ ไนŸ้œ€่ฆๅš็›ธๅบ”็š„่ฐƒๆ•ดใ€‚ไปŽๆจกๅž‹่พ“ๅ‡บ10ไธชๆ ‡็ญพ็š„ๆฆ‚็އไธญ้€‰ๆ‹ฉๆœ€ๅคง็š„๏ผŒๅฐ†ๅ…ถๆ ‡็ญพ็ผ–ๅท่พ“ๅ‡บใ€‚
# ่ฏปๅ–ไธ€ๅผ ๆœฌๅœฐ็š„ๆ ทไพ‹ๅ›พ็‰‡๏ผŒ่ฝฌๅ˜ๆˆๆจกๅž‹่พ“ๅ…ฅ็š„ๆ ผๅผ def load_image(img_path): # ไปŽimg_pathไธญ่ฏปๅ–ๅ›พๅƒ๏ผŒๅนถ่ฝฌไธบ็ฐๅบฆๅ›พ im = Image.open(img_path).convert('L') im.show() im = im.resize((28, 28), Image.ANTIALIAS) im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32) # ๅ›พๅƒๅฝ’ไธ€ๅŒ– im = 1.0 - im / 255. return im # ๅฎšไน‰้ข„ๆต‹่ฟ‡็จ‹ with fluid.dyg...
ๆœฌๆฌก้ข„ๆต‹็š„ๆ•ฐๅญ—ๆ˜ฏ: 0
Apache-2.0
chapter-2-step_to_deep_learning/2-5-mnist_recognition-Loss_Function.ipynb
lcsss1995/tutorials
Introduction In this notebook we'll have a very quick look at an image segmentation problem: locating the lateral ventricles in 2D slices of brain MRI images > **Note:** To run this notebook locally you have to have the fastai library installed. See https://docs.fast.ai for instructions if you want to install on your...
# This is a quick check of whether the notebook is currently running on Google Colaboratory, as that makes some difference for the code below. # We'll do this in every notebook of the course. if 'google.colab' in str(get_ipython()): print('The notebook is running on Colab. colab=True.') colab=True else: pri...
_____no_output_____
MIT
Lab3-DL/Extra-ELMED219-2022-Lab3-DL-Example-3-image_segmentation.ipynb
MMIV-ML/ELMED219-2022
Load data Based on the freely availabe brain MRI IXI dataset (https://brain-development.org/ixi-dataset/), we've created a segmentation task by using the (slow) software package FreeSurfer to segment the lateral ventricles.Our task is to take a T1w MRI image as input, and output a segmentation mask for the lateral ven...
DATA = Path('/data-ssd/data/IXI/FS_outputs/2D-images') DATA.ls() def get_brains_and_masks(ax): brain = sorted([str(b) for b in DATA.glob(f'*{ax}*') if 'brain' in str(b)]) mask = sorted([str(b) for b in (DATA/'ventricles').glob(f'*{ax}*') if 'mask' in str(b)]) df = pd.DataFrame(zip(brain, mask), columns...
_____no_output_____
MIT
Lab3-DL/Extra-ELMED219-2022-Lab3-DL-Example-3-image_segmentation.ipynb
MMIV-ML/ELMED219-2022
Create data loaders
size=224 bs=8 db = DataBlock(blocks=(ImageBlock(PILImageBW), MaskBlock(codes)), splitter=RandomSplitter(), get_x=ColReader('img'), get_y=ColReader('label'), batch_tfms=[*aug_transforms(size=size), Normalize.f...
_____no_output_____
MIT
Lab3-DL/Extra-ELMED219-2022-Lab3-DL-Example-3-image_segmentation.ipynb
MMIV-ML/ELMED219-2022
Construct and train a model
learn = unet_learner(dls, resnet34, loss_func=BCEWithLogitsLossFlat(), n_out=1, self_attention=True, act_cls=Mish, opt_func=ranger) lr = learn.lr_find() learn.fit_one_cycle(10, lr_max=lr)
_____no_output_____
MIT
Lab3-DL/Extra-ELMED219-2022-Lab3-DL-Example-3-image_segmentation.ipynb
MMIV-ML/ELMED219-2022
Evaluate results
learn.show_results(max_n=2, figsize=(12,12)) learn.show_results(max_n=2, figsize=(12,12)) learn.show_results(max_n=2, figsize=(12,12))
_____no_output_____
MIT
Lab3-DL/Extra-ELMED219-2022-Lab3-DL-Example-3-image_segmentation.ipynb
MMIV-ML/ELMED219-2022
Statistical modelling of coffee desk data Import and read data from csv file
!pip install statsmodels import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import statsmodels.formula.api as smf coffee_df = pd.read_csv('data\coffee_desk_dataset_ead.csv') coffee_df.drop(columns='idx', axis=1, inplace=True) #dropping index not to be treated as vector dimension...
_____no_output_____
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
Train-val-test split
X_df = coffee_df.drop('price_per_kg', axis=1) #defining predictors y_df = coffee_df['price_per_kg'] #defining target variable X_train, X_test, y_train, y_test = train_test_split(X_df, y_df, test_size=0.1, random_state=True) #using random state to ensure I always have random division with the same random numbers train_d...
_____no_output_____
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
Initial stats modelling Forward stepwise selection1. Begins with a model that contains no variables (called the Null Model)2. Then starts adding the most significant variables one after the other3. Until a pre-specified stopping rule is reached or until all the variables under consideration are included in the model S...
model_0 = smf.ols(formula="price_per_kg ~ 1", data=train_df).fit() # the initial model with no variables print(model_0.summary()) model_0.ssr # sum in squares for the original model # process - not such a good variable, high granularity and high pvalues process_model = smf.ols(formula="price_per_kg ~ C(process)", data=...
OLS Regression Results ============================================================================== Dep. Variable: price_per_kg R-squared: 0.426 Model: OLS Adj. R-squared: 0.422 Meth...
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
**Observations:** We see that adding region of origin to roast decreases srr of the model
# Adding next best variable to the model model_3 = smf.ols(formula="price_per_kg ~ C(brewing_method) + C(roast) + C(pure_arabica)", data=train_df).fit() print(model_3.summary()) model_3.ssr # again the ssr decreased # Adding next best variable to the model model_4 = smf.ols(formula="price_per_kg ~ C(brewing_method) + C...
OLS Regression Results ============================================================================== Dep. Variable: price_per_kg R-squared: 0.461 Model: OLS Adj. R-squared: 0.454 Meth...
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
Backward stepwise selection
# Modelling with all variables # columns = ['process', 'brewing_method', 'roast', 'grind', 'origin', 'percentage_of_arabica', 'pure_arabica', 'roast_brew', 'Washed', 'Natural', 'Fermented_traditional', 'Fermented_closedtank', 'process_general', 'region_of_origin'] # for idx, col in enumerate(colu...
_____no_output_____
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
Observations:* variables chosen in the forward and backward stepwise selection differ by 1 variable: - Forward: brewing_method, roast, pure_arabica, grind - Backward: brewing_method, roast, pure_arabica, grind, Fermented_closedtank* the ssr for each model is different with backward being smaller: - Forward: ...
model_inter = smf.ols(formula="price_per_kg ~ C(brewing_method) * C(pure_arabica) + C(grind) + C(roast) + C(Fermented_closedtank)", data=train_df).fit() print(model_inter.summary())
OLS Regression Results ============================================================================== Dep. Variable: price_per_kg R-squared: 0.460 Model: OLS Adj. R-squared: 0.454 Meth...
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
The R^2 increased but the BIC and AIC values decreased, and the error message: "smallest eigenvalue is 9.97e-28. This might indicate that there are strong multicollinearity problems or that the design matrix is singular." indicates this is not a better solution to the chosen model from backward stepwise selection.
model_inter_2 = smf.ols(formula="price_per_kg ~ C(brewing_method) + C(grind) + C(roast) + C(Fermented_closedtank) * C(pure_arabica)", data=train_df).fit() print(model_inter_2.summary())
OLS Regression Results ============================================================================== Dep. Variable: price_per_kg R-squared: 0.447 Model: OLS Adj. R-squared: 0.441 Meth...
MIT
coffee_desk_data_statistical_modelling.ipynb
dominikacecylia/Coffee-regression-project
1์žฅ Python ์ž…๋ฌธPyTorchใ‚’ไฝฟใฃใŸใƒ‡ใ‚ฃใƒผใƒ—ใƒฉใƒผใƒ‹ใƒณใ‚ฐใƒปใƒ—ใƒญใ‚ฐใƒฉใƒŸใƒณใ‚ฐใง้‡่ฆใซใชใ‚‹ๆฆ‚ๅฟตใ ใ‘ใ‚’ๆŠœใๅ‡บใ—ใฆ่ชฌๆ˜Žใ™ใ‚‹
# ๅฟ…่ฆใƒฉใ‚คใƒ–ใƒฉใƒชใฎๅฐŽๅ…ฅ !pip install japanize_matplotlib | tail -n 1 pip install matplotlib pip install japanize_matplotlib # ๅฟ…่ฆใƒฉใ‚คใƒ–ใƒฉใƒชใฎใ‚คใƒณใƒใƒผใƒˆ %matplotlib inline import numpy as np import matplotlib.pyplot as plt import japanize_matplotlib # warning่กจ็คบoff import warnings warnings.simplefilter('ignore') # ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใƒ•ใ‚ฉใƒณใƒˆใ‚ตใ‚คใ‚บๅค‰ๆ›ด plt.rcParam...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
1.2 ์ปจํ…Œ์ด๋„ˆ ํƒ€์ž… ๋ณ€์ˆ˜์— ์ฃผ์˜Pythonใงใฏใ€ๅค‰ๆ•ฐใฏๅ˜ใซๅฎŸ้š›ใฎใƒ‡ใƒผใ‚ฟๆง‹้€ ใธใฎใƒใ‚คใƒณใ‚ฟใซ้ŽใŽใชใ„ใ€‚ Numpy้…ๅˆ—ใชใฉใงใฏใ€ใ“ใฎใ“ใจใ‚’ๆ„่ญ˜ใ—ใชใ„ใจๆ€ใ‚ใฌ็ตๆžœใ‚’ๆ‹›ใๅ ดๅˆใŒใ‚ใ‚‹ใ€‚ NumPyๅค‰ๆ•ฐ้–“
# ๋„˜ํŒŒ์ด ๋ฐฐ์—ด x๋ฅผ ์ •์˜ x = np.array([5, 7, 9]) # ๋ณ€์ˆ˜ y์— x๋ฅผ ๋Œ€์ž… y = x # ๊ฒฐ๊ณผ ํ™•์ธ print(x) print(y) # x์˜ ํŠน์ • ์š”์†Œ๋ฅผ ๋ณ€๊ฒฝ x[1] = -1 # y๋„ ๋”ฐ๋ผ์„œ ๊ฐ’์ด ๋ฐ”๋€œ print(x) print(y) # y๋„ ๋™์‹œ์— ๋ณ€ํ•˜๋ฉด ์•ˆ ๋˜๋Š” ๊ฒฝ์šฐ๋Š”, ๋Œ€์ž… ์‹œ copy ํ•จ์ˆ˜๋ฅผ ์ด์šฉ x = np.array([5, 7, 9]) y = x.copy() # x์˜ ํŠน์ • ์š”์†Œ ๊ฐ’์ด ๋ณ€ํ•ด๋„, y์—๋Š” ์˜ํ–ฅ์ด ์—†์Œ x[1] = -1 print(x) print(y)
[ 5 -1 9] [5 7 9]
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
ใƒ†ใƒณใ‚ฝใƒซใจNumPy้–“
import torch # x1: shape=[5]๊ฐ€ ๋˜๋Š” ๋ชจ๋“  ๊ฐ’์ด 1์ธ ํ…์„œ x1 = torch.ones(5) # ๊ฒฐ๊ณผ ํ™•์ธ print(x1) # x2: x1๋กœ๋ถ€ํ„ฐ ์ƒ์„ฑํ•œ ๋„˜ํŒŒ์ด ๋ฐฐ์—ด x2 = x1.data.numpy() # ๊ฒฐ๊ณผ ํ™•์ธ print(x2) # x1์˜ ๊ฐ’์„ ๋ณ€๊ฒฝ x1[1] = -1 # x2์˜ ๊ฐ’๋„ ๊ฐ™์ด ๋ณ€ํ•จ print(x1) print(x2) # ์•ˆ์ „ํ•œ ๋ฐฉ๋ฒ• # x1: ํ…์„œ x1 = torch.ones(5) # x2: x1๋ฅผ copyํ•œ ๋„˜ํŒŒ์ด x2 = x1.data.numpy().copy() x1[1] = -1 # ๊ฒฐ๊ณผ ํ™•์ธ print...
tensor([ 1., -1., 1., 1., 1.]) [1. 1. 1. 1. 1.]
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
1.3 โ€˜ํ•ฉ์„ฑ ํ•จ์ˆ˜โ€™๋ฅผ ํŒŒ์ด์ฌ์œผ๋กœ ๊ตฌํ˜„ํ•˜๊ธฐๆ•ฐๅญฆไธŠใฎๅˆๆˆ้–ขๆ•ฐใŒPythonใงใฉใ†ๅฎŸ่ฃ…ใ•ใ‚Œใ‚‹ใ‹็ขบ่ชใ™ใ‚‹ $f(x) = 2x^2 + 2$ใ‚’้–ขๆ•ฐใจใ—ใฆๅฎš็พฉใ™ใ‚‹
def f(x): return (2 * x**2 + 2) # ๋„˜ํŒŒ์ด ๋ฐฐ์—ด๋กœ x๋ฅผ ์ •์˜ x = np.arange(-2, 2.1, 0.25) print(x) # f(x)์˜ ๊ฒฐ๊ณผ๋ฅผ y์— ๋Œ€์ž… y = f(x) print(y) # ํ•จ์ˆ˜๋ฅผ ๊ทธ๋ž˜ํ”„๋กœ ๊ทธ๋ฆฌ๊ธฐ fig1 = plt.gcf() plt.plot(x, y) plt.show() fig1.savefig('ex01-09.tif', format='tif') # ์„ธ ๊ฐ€์ง€ ๊ธฐ๋ณธ ํ•จ์ˆ˜์˜ ์ •์˜ def f1(x): return(x**2) def f2(x): return(x*2) def f3(x): retu...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
1.5 ์ปค์Šคํ…€ ํด๋ž˜์Šค ์ •์˜ํ•˜๊ธฐPythonใงใฏใ€้–ขๆ•ฐใ‚‚ใพใŸใ€ๅค‰ๆ•ฐๅใฏๅ˜ใชใ‚‹ใƒใ‚คใƒณใ‚ฟใงใ€ๅฎŸไฝ“ใฏๅˆฅใซใ‚ใ‚‹ใ€‚ ใ“ใฎใ“ใจใ‚’ๅˆฉ็”จใ™ใ‚‹ใจใ€ใ€Œ้–ขๆ•ฐใ‚’ๅผ•ๆ•ฐใจใ™ใ‚‹้–ขๆ•ฐใ€ใ‚’ไฝœใ‚‹ใ“ใจใŒๅฏ่ƒฝใซใชใ‚‹ใ€‚ ใ“ใ“ใง้–ขๆ•ฐใ‚’ๆ•ฐๅ€คๅพฎๅˆ†ใ™ใ‚‹้–ขๆ•ฐ``diff``ใ‚’ๅฎš็พฉใ™ใ‚‹ใ€‚ ๆ•ฐๅ€คๅพฎๅˆ†ใฎ่จˆ็ฎ—ใซใฏใ€ๆ™ฎ้€šใฎๅพฎๅˆ†ใฎๅฎš็พฉๅผใ‚ˆใ‚Šใ„ใ„่ฟ‘ไผผๅผใงใ‚ใ‚‹ $f'(x) = \dfrac{f(x+h)-f(x-h)}{2h}$ใ‚’ๅˆฉ็”จใ™ใ‚‹ใ€‚
# ํ•จ์ˆ˜๋ฅผ ๋ฏธ๋ถ„ํ•˜๋Š” ํ•จ์ˆ˜ fdiff์˜ ์ •์˜ def fdiff(f): # ํ•จ์ˆ˜ f๋ฅผ ์ธ์ˆ˜๋กœ ๋ฏธ๋ถ„ํ•œ ๊ฒฐ๊ณผ ํ•จ์ˆ˜๋ฅผ diff ๋กœ ์ •์˜ def diff(x): h = 1e-6 return (f(x+h) - f(x-h)) / (2*h) # fdiff์˜ ๋ฐ˜ํ™˜์€ ๋ฏธ๋ถ„ํ•œ ๊ฒฐ๊ณผ ํ•จ์ˆ˜ diff return diff
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
2ๆฌก้–ขๆ•ฐfใซๅฏพใ—ใฆใ€ไปŠไฝœใฃใŸ้–ขๆ•ฐfdiffใ‚’้ฉ็”จใ—ใฆใ€ๆ•ฐๅ€คๅพฎๅˆ†่จˆ็ฎ—ใ‚’ใ—ใฆใฟใ‚‹ใ€‚
# 2์ฐจํ•จ์ˆ˜์˜ ์ˆ˜์น˜๋ฏธ๋ถ„ # f์˜ ๋ฏธ๋ถ„๊ฒฐ๊ณผ ํ•จ์ˆ˜ diff๋ฅผ ์ทจ๋“ diff = fdiff(f) # ๋ฏธ๋ถ„๊ฒฐ๊ณผ๋ฅผ ๊ณ„์‚ฐํ•˜๊ณ  y_dash์— ๋Œ€์ž… y_dash = diff(x) # ๊ฒฐ๊ณผ ํ™•์ธ print(y_dash) # ๊ฒฐ๊ณผ ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ fig1 = plt.gcf() plt.plot(x, y, label=r'y = f(x)', c='b') plt.plot(x, y_dash, label=r"y = f '(x)", c='k') plt.legend() plt.show() fig1.savefig('ex01-13.tif', format='tif')
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐ $g(x) = \dfrac{1}{1 + \exp(-x)}$ใซๅฏพใ—ใฆๅŒใ˜ใ“ใจใ‚’ใ‚„ใฃใฆใฟใ‚‹ใ€‚
# ์‹œ๊ทธ๋ชจ์ด๋“œ ํ•จ์ˆ˜์˜ ์ •์˜ def g(x): return 1 / (1 + np.exp(-x)) # ์‹œ๊ทธ๋ชจ์ด๋“œ ํ•จ์ˆ˜ ๊ณ„์‚ฐ y = g(x) print(y) # ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ fig1 = plt.gcf() plt.plot(x, y) plt.show() fig1.savefig('ex01-16.tif', format='tif', dpi=300) # ์‹œ๊ทธ๋ชจ์ด๋“œ ํ•จ์ˆ˜์˜ ์ˆ˜์น˜๋ฏธ๋ถ„ # g๋ฅผ ๋ฏธ๋ถ„ํ•œ ํ•จ์ˆ˜ ์ทจ๋“ diff = fdiff(g) # diff๋ฅผ ์‚ฌ์šฉํ•ด ๋ฏธ๋ถ„ ๊ฒฐ๊ณผ y_dash๋ฅผ ๊ณ„์‚ฐ y_dash = diff(x) # ๊ฒฐ๊ณผ ํ™•์ธ print(y_dash) # ็ตๆžœใฎใ‚ฐใƒฉใƒ•...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
ใ‚ทใ‚ฐใƒขใ‚คใƒ‰้–ขๆ•ฐใฎๅพฎๅˆ†็ตๆžœใฏ$y(1-y)$ใจใชใ‚‹ใ“ใจใŒใ‚ใ‹ใฃใฆใ„ใ‚‹ใ€‚ ใ“ใ‚ŒใฏyใฎไบŒๆฌก้–ขๆ•ฐใงใ€$y=\dfrac{1}{2}$ใฎๆ™‚ใซๆœ€ๅคงๅ€ค$\dfrac{1}{4}$ใ‚’ๅ–ใ‚‹ใ€‚ ไธŠใฎใ‚ฐใƒฉใƒ•ใฏใใฎ็ตๆžœใจไธ€่‡ดใ—ใฆใ„ใฆใ€ๆ•ฐๅ€คๅพฎๅˆ†ใŒๆญฃใ—ใใงใใฆใ„ใ‚‹ใ“ใจใŒใ‚ใ‹ใ‚‹ใ€‚ 1.5 ์ปค์Šคํ…€ ํด๋ž˜์Šค ์ •์˜ํ•˜๊ธฐ
# ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ์„ ์œ„ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ import matplotlib.pyplot as plt # ์›์„ ๊ทธ๋ฆฌ๊ธฐ ์œ„ํ•ด ํ•„์š”ํ•œ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ import matplotlib.patches as patches # Point ํด๋ž˜์Šค ์ •์˜ class Point: # ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ ์‹œ์— ๋‘๊ฐœ๋‘ ๊ฐœ์˜ ์ธ์ˆ˜ x์™€ y๋ฅผ ๊ฐ€์ง def __init__(self, x, y): # ์ธ์Šคํ„ด์Šค ์†์„ฑ x์— ์ฒซ ๋ฒˆ์งธ ์ธ์ˆ˜๋ฅผ ํ• ๋‹น self.x = x # ์ธ์Šคํ„ด์Šค ์†์„ฑ y์— ๋‘ ๋ฒˆ์งธ ์ธ์ˆ˜๋ฅผ ํ• ๋‹น self.y = y # draw...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
ใ“ใฎๆฎต้šŽใงdraw้–ขๆ•ฐใฏ่ฆชใงๅฎš็พฉใ—ใŸ้–ขๆ•ฐใŒๅ‘ผใฐใ‚Œใฆใ„ใ‚‹ใ“ใจใŒใ‚ใ‹ใ‚‹
# Point์˜ ์ž์‹ ํด๋ž˜์Šค Circle์˜ ์ •์˜ 2 class Circle2(Point): # Circle์€ ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ ์‹œ์— ์ธ์ˆ˜ x, y, r์„ ๊ฐ€์ง def __init__(self, x, y, r): # x์™€ y๋Š” ๋ถ€๋ชจ ํด๋ž˜์Šค์˜ ์†์„ฑ์œผ๋กœ ์„ค์ • super().__init__(x, y) # r์€ Circle์˜ ์†์„ฑ์œผ๋กœ ์„ค์ • self.r = r # draw ํ•จ์ˆ˜๋Š” ์ž์‹ ํด๋ž˜์Šค๋งŒ ๋”ฐ๋กœ ์›์„ ๊ทธ๋ฆผ def draw(self): # ์› ๊ทธ๋ฆฌ๊ธฐ c =...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
่ฆชใฎdarw้–ขๆ•ฐใฎไปฃใ‚ใ‚Šใซๅญใฎdraw้–ขๆ•ฐใŒๅ‘ผใฐใ‚ŒใŸใ“ใจใŒใ‚ใ‹ใ‚‹ ใงใฏใ€ใ“ใฎ้–ขๆ•ฐใจ่ฆชใฎ้–ขๆ•ฐใ‚’ไธกๆ–นๅ‘ผใณใŸใ„ใจใใฏใฉใ†ใ—ใŸใ‚‰ใ„ใ„ใ‹
# Point์˜ ์ž์‹ ํด๋ž˜์Šค Circle์˜ ์ •์˜ 3 class Circle3(Point): # Circle์€ ์ธ์Šคํ„ด์Šค ์ƒ์„ฑ ์‹œ์— ์ธ์ˆ˜ x, y, r์„ ๊ฐ€์ง def __init__(self, x, y, r): # x์™€ y๋Š” ๋ถ€๋ชจ ํด๋ž˜์Šค์˜ ์†์„ฑ์œผ๋กœ ์„ค์ • super().__init__(x, y) # r์€ Circle์˜ ์†์„ฑ์œผ๋กœ ์„ค์ • self.r = r # Circle์˜ draw ํ•จ์ˆ˜๋Š” ๋ถ€๋ชจ์˜ ํ•จ์ˆ˜๋ฅผ ํ˜ธ์ถœ ํ•œ ๋‹ค์Œ, ์› ๊ทธ๋ฆฌ๊ธฐ๋ฅผ ๋…์ž์ ์œผ๋กœ ์ˆ˜ํ–‰ํ•จ def draw(self): ...
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
็„กไบ‹ใ€ไธกๆ–นใ‚’ๅ‘ผใณๅ‡บใ™ใ“ใจใŒใงใใŸ 1.6 ์ธ์Šคํ„ด์Šค๋ฅผ ํ•จ์ˆ˜๋กœ ์‚ฌ์šฉํ•˜๋Š” ๋ฐฉ๋ฒ•
# ํ•จ์ˆ˜ ํด๋ž˜์Šค H์˜ ์ •์˜ class H: def __call__(self, x): return 2*x**2 + 2 # h๊ฐ€ ํ•จ์ˆ˜๋กœ ๋™์ž‘ํ•˜๋Š”์ง€ ํ™•์ธ # ๋„˜ํŒŒ์ด ๋ฐฐ์—ด x๋ฅผ ์ •์˜ x = np.arange(-2, 2.1, 0.25) print(x) # H ํด๋ž˜์Šค์˜ ์ธ์Šคํ„ด์Šค๋กœ h๋ฅผ ์ƒ์„ฑ h = H() # ํ•จ์ˆ˜ h ํ˜ธ์ถœ y = h(x) print(y) # ๊ทธ๋ž˜ํ”„ ์ถœ๋ ฅ fig1 = plt.gcf() plt.plot(x, y) plt.show() fig1.savefig('ex01-32.tif', format='tif', dpi=300)
_____no_output_____
Apache-2.0
notebooks/ch01_python.ipynb
ychoi-kr/pytorch_book_info
005.005 EDA - Shots on Target
import pathlib # import os import sys # import pickle # import inspect import pandas as pd import scipy.stats as stats from statsmodels.graphics.mosaicplot import mosaic import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as ticker import seaborn as sns %matplotlib inline # Load the "autorelo...
_____no_output_____
MIT
notebooks/005.005 EDA - Shots on Target.ipynb
DMacGillivray/soccer-predictions
NOTE - FUTURE DIRECTIONIf we assume that historical sots is a reasonably good predictor of future sotsAnd sots is related to goals scored, but it is related very noisily by the distribution shown aboveIf we approximate this as a skewed normal, or an exponentially modified gaussian, then we can predict goals from sotOR,...
fig, ax = plt.subplots(figsize=(10,7)) sns.set(style='ticks', context='talk') #iris = sns.load_dataset('iris') sns.swarmplot(df['h_ftGoals'], df['h_shots'])#'species', 'sepal_length', data=iris) ax.set_xlabel('Home Team Goals') ax.set_ylabel('Home Team Shots') ax.set_title('Home Team Shots vs Goals') sns.despine() ...
_____no_output_____
MIT
notebooks/005.005 EDA - Shots on Target.ipynb
DMacGillivray/soccer-predictions
Retrieving data
# Reading csv data file data = pd.read_csv('crypto_data.csv', index_col = 0) data
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Data Processing
# Keep only cryptocurrencies that are on trading data = data.loc[data['IsTrading'] == True] data.shape # Remove the "IsTrading" column data = data.drop(columns='IsTrading') data.shape # Removing rows with any null value data = data.dropna() data # Removing rows with cryptocurrencies without coins mined data = data.loc[...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Dimensionality Reduction
# Using PCA to reduce dimension and preserving 90% of the data # PCA model pca = PCA(n_components=.90) data_pca = pca.fit_transform(data_scaled) data_pca.shape # getting sum of the explained variance pca.explained_variance_.sum() # Initialize t-SNE model on the dataframe to reduce dimensions to 2 tsne = TSNE(perplexi...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Clustering Crytocurrencies Using K-Means
# Finding the best value for k using Elbow curve inertia = [] k = list(range(1, 11)) # Looking for the best k for i in k: km = KMeans(n_clusters=i, random_state=0) km.fit(pca_tsne_features) inertia.append(km.inertia_) # Define a DataFrame to plot the Elbow Curve using hvPlot elbow_data = {"k": k, "inerti...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Running K-Means with k = 6
kmodel = KMeans(n_clusters=6, random_state=0) # Fit the model kmodel.fit(pca_tsne_features) #predict clusters predictions = kmodel.predict(pca_tsne_features) predictions data['class'] = predictions data.head() #plot the reduced dimensions from tsne and colorcode them with the labels plt.scatter(pca_tsne_features[:, 0...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
It can be determined that the crypto dataset has no clear clusters to differentiate. The data majorly is a big blob of many clusters. Reduce dimension with number of principal component = 3
# Using dimension reduction with Principal component = 3 principal_component = PCA(n_components=3) data_reduced_comp = principal_component.fit_transform(data_scaled) data_reduced_comp.shape # Creating a DataFrame with the principal components data pca_df = pd.DataFrame(data=data_reduced_comp, columns=["PC 1", "PC 2",...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Visualizing Results
# 3D scatter with clusters fig = px.scatter_3d( clustered_df, x="PC 1", y="PC 2", z="PC 3", hover_name = "CoinName", hover_data= ['Algorithm'], color="Class", symbol="Class", ) fig.update_layout(legend=dict(x=0, y=1)) fig.show() # Table with tradable cryptos import hvplot.pandas colum...
_____no_output_____
MIT
crypto_clustering.ipynb
tsubedy/Unsupervised_learning
Decorator Application (Timing)
def timed(fn): from time import perf_counter from functools import wraps @wraps(fn) def inner(*args, **kwargs): start = perf_counter() result = fn(*args, **kwargs) end = perf_counter() elapsed = end -start args_ = [str(a) for a in args] kwargs_ = ['{0}={...
_____no_output_____
Unlicense
my_classes/ScopesClosuresAndDecorators/decorator_app_timing.ipynb
minefarmer/deep-Dive-1
1. recursion2. loop3. reduce
def calc_recursive_fib(n): if n <= 2: return 1 else: return calc_recursive_fib(n-1) + calc_recursive_fib(n-2) calc_recursive_fib(6) @timed def fib_recursive(n): return calc_recursive_fib(n) fib_recursive(6) fib_recursive(20) fib_recursive(25) fib_recursive(30) fib_recursive(36) fib_recursive...
_____no_output_____
Unlicense
my_classes/ScopesClosuresAndDecorators/decorator_app_timing.ipynb
minefarmer/deep-Dive-1