markdown stringlengths 0 1.02M | code stringlengths 0 832k | output stringlengths 0 1.02M | license stringlengths 3 36 | path stringlengths 6 265 | repo_name stringlengths 6 127 |
|---|---|---|---|---|---|
Load data | fold_set = pd.read_csv('../input/aptos-split-oldnew/5-fold.csv')
X_train = fold_set[fold_set['fold_2'] == 'train']
X_val = fold_set[fold_set['fold_2'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
# Preprocecss data
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
display(X_train.head()) | Number of train samples: 18697
Number of validation samples: 733
Number of test samples: 1928
| MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model parameters | # Model parameters
model_path = '../working/effNetB4_img256_noBen_fold3.h5'
FACTOR = 4
BATCH_SIZE = 8 * FACTOR
EPOCHS = 20
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-3/2 * FACTOR
WARMUP_LEARNING_RATE = 1e-3/2 * FACTOR
HEIGHT = 256
WIDTH = 256
CHANNELS = 3
TTA_STEPS = 5
ES_PATIENCE = 5
LR_WARMUP_EPOCHS = 5
STEP_SIZE = len(X_train) // BATCH_SIZE
TOTAL_STEPS = EPOCHS * STEP_SIZE
WARMUP_STEPS = LR_WARMUP_EPOCHS * STEP_SIZE | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Pre-procecess images | old_data_base_path = '../input/diabetic-retinopathy-resized/resized_train/resized_train/'
new_data_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(image_id, base_path, save_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
# image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
def preprocess_data(df, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
item = df.iloc[i]
image_id = item['id_code']
item_set = item['fold_2']
item_data = item['data']
if item_set == 'train':
if item_data == 'new':
preprocess_image(image_id, new_data_base_path, train_dest_path)
if item_data == 'old':
preprocess_image(image_id, old_data_base_path, train_dest_path)
if item_set == 'validation':
if item_data == 'new':
preprocess_image(image_id, new_data_base_path, validation_dest_path)
if item_data == 'old':
preprocess_image(image_id, old_data_base_path, validation_dest_path)
def preprocess_test(df, base_path=test_base_path, save_path=test_dest_path, HEIGHT=HEIGHT, WIDTH=WIDTH, sigmaX=10):
df = df.reset_index()
for i in range(df.shape[0]):
image_id = df.iloc[i]['id_code']
preprocess_image(image_id, base_path, save_path)
n_cpu = mp.cpu_count()
train_n_cnt = X_train.shape[0] // n_cpu
val_n_cnt = X_val.shape[0] // n_cpu
test_n_cnt = test.shape[0] // n_cpu
# Pre-procecss old data train set
pool = mp.Pool(n_cpu)
dfs = [X_train.iloc[train_n_cnt*i:train_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_train.iloc[train_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss validation set
pool = mp.Pool(n_cpu)
dfs = [X_val.iloc[val_n_cnt*i:val_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = X_val.iloc[val_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_data, [x_df for x_df in dfs])
pool.close()
# Pre-procecss test set
pool = mp.Pool(n_cpu)
dfs = [test.iloc[test_n_cnt*i:test_n_cnt*(i+1)] for i in range(n_cpu)]
dfs[-1] = test.iloc[test_n_cnt*(n_cpu-1):]
res = pool.map(preprocess_test, [x_df for x_df in dfs])
pool.close() | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Data generator | datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="raw",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
def classify(x):
if x < 0.5:
return 0
elif x < 1.5:
return 1
elif x < 2.5:
return 2
elif x < 3.5:
return 3
return 4
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
def plot_metrics(history, figsize=(20, 14)):
fig, (ax1, ax2) = plt.subplots(2, 1, sharex='col', figsize=figsize)
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
def apply_tta(model, generator, steps=10):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(np.append(train_preds, validation_preds), np.append(train_labels, validation_labels), weights='quadratic'))
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""
Cosine decay schedule with warm up period.
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
:param global_step {int}: global step.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param global_step {int}: global step.
:Returns : a float representing learning rate.
:Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(Callback):
"""Cosine decay with warmup learning rate scheduler"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""
Constructor for cosine decay with warmup learning rate scheduler.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param global_step_init {int}: initial global step, e.g. from previous checkpoint.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param verbose {int}: quiet, 1: update messages. (default: {0}).
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr)) | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model | def create_model(input_shape):
input_tensor = Input(shape=input_shape)
base_model = EfficientNetB4(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/efficientnet-keras-weights-b0b5/efficientnet-b4_imagenet_1000_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
final_output = Dense(1, activation='linear', name='final_output')(x)
model = Model(input_tensor, final_output)
return model | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Train top layers | model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS))
for layer in model.layers:
layer.trainable = False
for i in range(-2, 0):
model.layers[i].trainable = True
metric_list = ["accuracy"]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=2).history | Epoch 1/5
- 282s - loss: 1.2568 - acc: 0.3093 - val_loss: 1.4649 - val_acc: 0.4148
Epoch 2/5
- 269s - loss: 1.0960 - acc: 0.3228 - val_loss: 1.2900 - val_acc: 0.2810
Epoch 3/5
- 267s - loss: 1.0743 - acc: 0.3280 - val_loss: 1.3244 - val_acc: 0.2967
Epoch 4/5
- 266s - loss: 1.0808 - acc: 0.3231 - val_loss: 1.2172 - val_acc: 0.3338
Epoch 5/5
- 267s - loss: 1.0612 - acc: 0.3245 - val_loss: 1.1476 - val_acc: 0.2853
| MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Fine-tune the model | for layer in model.layers:
layer.trainable = True
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
cosine_lr = WarmUpCosineDecayScheduler(learning_rate_base=LEARNING_RATE,
total_steps=TOTAL_STEPS,
warmup_learning_rate=0.0,
warmup_steps=WARMUP_STEPS,
hold_base_rate_steps=(3 * STEP_SIZE))
callback_list = [checkpoint, es, cosine_lr]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=metric_list)
model.summary()
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callback_list,
verbose=2).history
fig, ax = plt.subplots(1, 1, sharex='col', figsize=(20, 4))
ax.plot(cosine_lr.learning_rates)
ax.set_title('Fine-tune learning rates')
plt.xlabel('Steps')
plt.ylabel('Learning rate')
sns.despine()
plt.show() | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model loss graph | plot_metrics(history)
# Create empty arays to keep the predictions and labels
df_preds = pd.DataFrame(columns=['label', 'pred', 'set'])
train_generator.reset()
valid_generator.reset()
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN + 1):
im, lbl = next(train_generator)
preds = model.predict(im, batch_size=train_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'train']
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID + 1):
im, lbl = next(valid_generator)
preds = model.predict(im, batch_size=valid_generator.batch_size)
for index in range(len(preds)):
df_preds.loc[len(df_preds)] = [lbl[index], preds[index][0], 'validation']
df_preds['label'] = df_preds['label'].astype('int')
# Classify predictions
df_preds['predictions'] = df_preds['pred'].apply(lambda x: classify(x))
train_preds = df_preds[df_preds['set'] == 'train']
validation_preds = df_preds[df_preds['set'] == 'validation'] | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Model Evaluation Confusion Matrix Original thresholds | plot_confusion_matrix((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Quadratic Weighted Kappa | evaluate_model((train_preds['label'], train_preds['predictions']), (validation_preds['label'], validation_preds['predictions'])) | Train Cohen Kappa score: 0.738
Validation Cohen Kappa score: 0.899
Complete set Cohen Kappa score: 0.746
| MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Apply model to test set and output predictions | preds = apply_tta(model, test_generator, TTA_STEPS)
predictions = [classify(x) for x in preds]
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path) | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
Predictions class distribution | fig = plt.subplots(sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d").set_title('Test')
sns.despine()
plt.show()
results.to_csv('submission.csv', index=False)
display(results.head()) | _____no_output_____ | MIT | Model backlog/EfficientNet/EfficientNetB4/5-Fold/274 - EfficientNetB4-Reg-Img256 Old&New Fold3.ipynb | ThinkBricks/APTOS2019BlindnessDetection |
# To keep the page organized do all imports here
from sqlalchemy import create_engine
import pandas as pd
from scipy import stats
# Database credentials
postgres_user = 'dabc_student'
postgres_pw = '7*.8G9QH21'
postgres_host = '142.93.121.174'
postgres_port = '5432'
postgres_db = 'kickstarterprojects'
# use the credentials to start a connection
engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(
postgres_user, postgres_pw, postgres_host, postgres_port, postgres_db))
projects_df = pd.read_sql_table('ksprojects', con=engine)
# remove the connection
engine.dispose()
#projects_df.shape
#describes column name and fill tyope
#projects_df.info()
#projects_df.head(2)
# count the number of unique values in this column
projects_df['category'].nunique()
# find the frequency of each value in the column
category_counts = projects_df['category'].value_counts()
# only print the first 10, because 158 are too many to print
#category_counts.head(10)
d | _____no_output_____ | CC-BY-3.0 | ksStatsPy.ipynb | tastiz/story_scape.html | |
Translate `dzn` to `smt2` for z3 Check Versions of Tools | import os
import subprocess
my_env = os.environ.copy()
output = subprocess.check_output(f'''/home/{my_env['USER']}/optimathsat/bin/optimathsat -version''', shell=True, universal_newlines=True)
output
output = subprocess.check_output(f'''/home/{my_env['USER']}/minizinc/build/minizinc --version''', shell=True, universal_newlines=True)
output
output = subprocess.check_output(f'''/home/{my_env['USER']}/z3/build/z3 --version''', shell=True, universal_newlines=True)
output | _____no_output_____ | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
First generate the FlatZinc files using the MiniZinc tool. Make sure that a `smt2` folder is located inside `./minizinc/share/minizinc/`. Else, to enable OptiMathSAT's support for global constraints download the [smt2.tar.gz](http://optimathsat.disi.unitn.it/data/smt2.tar.gz) package and unpack it there using```zshtar xf smt2.tar.gz $MINIZINC_PATH/share/minizinc/```If next output shows a list of `.mzn` files, then this dependency is satified. | output = subprocess.check_output(f'''ls -la /home/{my_env['USER']}/minizinc/share/minizinc/smt2/''', shell=True, universal_newlines=True)
print(output) | total 292
drwxr-xr-x 2 jovyan jovyan 4096 Jan 15 2018 .
drwxr-xr-x 11 jovyan jovyan 4096 Jul 11 12:34 ..
-rw-r--r-- 1 jovyan jovyan 328 Nov 13 2017 alldifferent_except_0.mzn
-rw-r--r-- 1 jovyan jovyan 382 Nov 13 2017 all_different_int.mzn
-rw-r--r-- 1 jovyan jovyan 396 Nov 13 2017 all_different_set.mzn
-rw-r--r-- 1 jovyan jovyan 270 Nov 13 2017 all_disjoint.mzn
-rw-r--r-- 1 jovyan jovyan 150 Nov 14 2017 all_equal_int.mzn
-rw-r--r-- 1 jovyan jovyan 164 Nov 13 2017 all_equal_set.mzn
-rw-r--r-- 1 jovyan jovyan 351 Nov 13 2017 among.mzn
-rw-r--r-- 1 jovyan jovyan 305 Nov 8 2017 arg_max_float.mzn
-rw-r--r-- 1 jovyan jovyan 291 Nov 8 2017 arg_max_int.mzn
-rw-r--r-- 1 jovyan jovyan 306 Nov 8 2017 arg_min_float.mzn
-rw-r--r-- 1 jovyan jovyan 291 Nov 8 2017 arg_min_int.mzn
-rw-r--r-- 1 jovyan jovyan 480 Nov 13 2017 at_least_int.mzn
-rw-r--r-- 1 jovyan jovyan 506 Nov 14 2017 at_least_set.mzn
-rw-r--r-- 1 jovyan jovyan 340 Nov 13 2017 at_most1.mzn
-rw-r--r-- 1 jovyan jovyan 474 Nov 13 2017 at_most_int.mzn
-rw-r--r-- 1 jovyan jovyan 502 Nov 13 2017 at_most_set.mzn
-rw-r--r-- 1 jovyan jovyan 1162 Nov 8 2017 bin_packing_capa.mzn
-rw-r--r-- 1 jovyan jovyan 1044 Nov 8 2017 bin_packing_load.mzn
-rw-r--r-- 1 jovyan jovyan 883 Nov 14 2017 bin_packing.mzn
-rw-r--r-- 1 jovyan jovyan 765 Nov 14 2017 comparison_rel_array.mzn
-rw-r--r-- 1 jovyan jovyan 350 Nov 13 2017 count_eq.mzn
-rw-r--r-- 1 jovyan jovyan 382 Nov 13 2017 count_geq.mzn
-rw-r--r-- 1 jovyan jovyan 375 Nov 13 2017 count_gt.mzn
-rw-r--r-- 1 jovyan jovyan 379 Nov 13 2017 count_leq.mzn
-rw-r--r-- 1 jovyan jovyan 371 Nov 13 2017 count_lt.mzn
-rw-r--r-- 1 jovyan jovyan 371 Nov 13 2017 count_neq.mzn
-rw-r--r-- 1 jovyan jovyan 398 Nov 13 2017 decreasing_bool.mzn
-rw-r--r-- 1 jovyan jovyan 404 Nov 13 2017 decreasing_float.mzn
-rw-r--r-- 1 jovyan jovyan 393 Nov 13 2017 decreasing_int.mzn
-rw-r--r-- 1 jovyan jovyan 408 Nov 14 2017 decreasing_set.mzn
-rw-r--r-- 1 jovyan jovyan 1589 Nov 13 2017 diffn_k.mzn
-rw-r--r-- 1 jovyan jovyan 853 Nov 14 2017 diffn.mzn
-rw-r--r-- 1 jovyan jovyan 1731 Nov 13 2017 diffn_nonstrict_k.mzn
-rw-r--r-- 1 jovyan jovyan 919 Nov 14 2017 diffn_nonstrict.mzn
-rw-r--r-- 1 jovyan jovyan 276 Nov 14 2017 disjoint.mzn
-rw-r--r-- 1 jovyan jovyan 836 Nov 8 2017 disjunctive.mzn
-rw-r--r-- 1 jovyan jovyan 748 Nov 8 2017 disjunctive_strict.mzn
-rw-r--r-- 1 jovyan jovyan 696 Nov 14 2017 distribute.mzn
-rw-r--r-- 1 jovyan jovyan 474 Nov 13 2017 exactly_int.mzn
-rw-r--r-- 1 jovyan jovyan 502 Nov 13 2017 exactly_set.mzn
-rw-r--r-- 1 jovyan jovyan 851 Nov 14 2017 global_cardinality_closed.mzn
-rw-r--r-- 1 jovyan jovyan 396 Nov 8 2017 global_cardinality_fn.mzn
-rw-r--r-- 1 jovyan jovyan 914 Nov 13 2017 global_cardinality_low_up_closed.mzn
-rw-r--r-- 1 jovyan jovyan 795 Nov 13 2017 global_cardinality_low_up.mzn
-rw-r--r-- 1 jovyan jovyan 717 Nov 14 2017 global_cardinality.mzn
-rw-r--r-- 1 jovyan jovyan 398 Nov 13 2017 increasing_bool.mzn
-rw-r--r-- 1 jovyan jovyan 403 Nov 13 2017 increasing_float.mzn
-rw-r--r-- 1 jovyan jovyan 394 Nov 13 2017 increasing_int.mzn
-rw-r--r-- 1 jovyan jovyan 408 Nov 13 2017 increasing_set.mzn
-rw-r--r-- 1 jovyan jovyan 728 Nov 14 2017 int_set_channel.mzn
-rw-r--r-- 1 jovyan jovyan 582 Nov 8 2017 inverse.mzn
-rw-r--r-- 1 jovyan jovyan 827 Nov 8 2017 inverse_set.mzn
-rw-r--r-- 1 jovyan jovyan 708 Nov 14 2017 link_set_to_booleans.mzn
-rw-r--r-- 1 jovyan jovyan 375 Nov 11 2017 maximum_float.mzn
-rw-r--r-- 1 jovyan jovyan 367 Nov 11 2017 maximum_int.mzn
-rw-r--r-- 1 jovyan jovyan 422 Nov 13 2017 member_bool.mzn
-rw-r--r-- 1 jovyan jovyan 431 Nov 13 2017 member_float.mzn
-rw-r--r-- 1 jovyan jovyan 414 Nov 13 2017 member_int.mzn
-rw-r--r-- 1 jovyan jovyan 442 Nov 13 2017 member_set.mzn
-rw-r--r-- 1 jovyan jovyan 372 Nov 11 2017 minimum_float.mzn
-rw-r--r-- 1 jovyan jovyan 367 Nov 11 2017 minimum_int.mzn
-rw-r--r-- 1 jovyan jovyan 283 Nov 13 2017 nvalue.mzn
-rw-r--r-- 1 jovyan jovyan 712 Nov 14 2017 range.mzn
-rw-r--r-- 1 jovyan jovyan 1751 Nov 13 2017 redefinitions-2.0.2.mzn
-rw-r--r-- 1 jovyan jovyan 1434 Nov 13 2017 redefinitions-2.0.mzn
-rw-r--r-- 1 jovyan jovyan 678 Nov 15 2017 redefinitions-2.1.mzn
-rw-r--r-- 1 jovyan jovyan 571 Nov 14 2017 roots.mzn
-rw-r--r-- 1 jovyan jovyan 764 Nov 8 2017 sum_pred.mzn
-rw-r--r-- 1 jovyan jovyan 445 Nov 15 2017 symmetric_all_different.mzn
-rw-r--r-- 1 jovyan jovyan 280 Nov 15 2017 value_precede_int.mzn
-rw-r--r-- 1 jovyan jovyan 294 Nov 15 2017 value_precede_set.mzn
| BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Transform `dzn` to `fzn` Using a `mzn` Model Then transform the desired `.dzn` file to `.fzn` using a `Mz.mzn` MiniZinc model. First list all `dzn` files contained in the `dzn_path` that should get processed. | import os
dzn_files = []
dzn_path = f'''/home/{my_env['USER']}/data/dzn/'''
for filename in os.listdir(dzn_path):
if filename.endswith(".dzn"):
dzn_files.append(filename)
len(dzn_files) | _____no_output_____ | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Model $Mz_1$ | import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz1-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
--search-dir /home/{my_env['USER']}/minizinc/share/minizinc/smt2/ \
/home/{my_env['USER']}/models/mzn/Mz1-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True) | (278/278) Translating /home/jovyan/data/dzn/R028.dzn to /home/jovyan/data/fzn/smt2/Mz1-noAbs/R028-dzn.fzn | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Model $Mz_2$ | import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz2-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
--search-dir /home/{my_env['USER']}/minizinc/share/minizinc/smt2/ \
/home/{my_env['USER']}/models/mzn/Mz2-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True) | (278/278) Translating /home/jovyan/data/dzn/R028.dzn to /home/jovyan/data/fzn/smt2/Mz2-noAbs/R028-dzn.fzn | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Translate `fzn` to `smt2` The generated `.fzn` files can be used to generate a `.smt2` files using the `fzn2smt2.py` script from this [project](https://github.com/PatrickTrentin88/fzn2omt).**NOTE**: Files `R001` (no cables) and `R002` (one one-sided cable) throw an error while translating. $Mz_1$ | import os
fzn_files = []
fzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz1-noAbs/'''
for filename in os.listdir(fzn_path):
if filename.endswith(".fzn"):
fzn_files.append(filename)
len(fzn_files)
smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs/'''
fzn2smt2_base_cmd = f'''/home/{my_env['USER']}/fzn2omt/bin/fzn2z3.py'''
translate_count = 0
my_env = os.environ.copy()
my_env['PATH'] = f'''/home/{my_env['USER']}/optimathsat/bin/:{my_env['PATH']}'''
my_env['PATH'] = f'''/home/{my_env['USER']}/z3/build/:{my_env['PATH']}'''
for fzn in fzn_files:
translate_count += 1
fzn2smt2_transform_cmd = f'''{fzn2smt2_base_cmd} {fzn_path}{fzn} --smt2 {smt2_path}{fzn.replace('.', '-')}.smt2'''
print(f'''\r({translate_count}/{len(fzn_files)}) Translating {fzn_path + fzn} to {smt2_path + fzn.replace('.', '-')}.smt2''', end='')
try:
output = subprocess.check_output(fzn2smt2_transform_cmd,
shell=True,env=my_env,
universal_newlines=True)
except Exception as e:
output = str(e.output)
print(f'''\r{output}''', end='')
sys.stdout.flush() | (278/278) Translating /home/jovyan/data/fzn/smt2/Mz1-noAbs/R079-dzn.fzn to /home/jovyan/data/smt2/z3/Mz1-noAbs/R079-dzn-fzn.smt2
| BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
$Mz_2$ | import os
fzn_files = []
fzn_path = f'''/home/{my_env['USER']}/data/fzn/smt2/Mz2-noAbs/'''
for filename in os.listdir(fzn_path):
if filename.endswith(".fzn"):
fzn_files.append(filename)
len(fzn_files)
smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs/'''
fzn2smt2_base_cmd = f'''/home/{my_env['USER']}/fzn2omt/bin/fzn2z3.py'''
translate_count = 0
my_env = os.environ.copy()
my_env['PATH'] = f'''/home/{my_env['USER']}/optimathsat/bin/:{my_env['PATH']}'''
my_env['PATH'] = f'''/home/{my_env['USER']}/z3/build/:{my_env['PATH']}'''
for fzn in fzn_files:
translate_count += 1
fzn2smt2_transform_cmd = f'''{fzn2smt2_base_cmd} {fzn_path}{fzn} --smt2 {smt2_path}{fzn.replace('.', '-')}.smt2'''
print(f'''\r({translate_count}/{len(fzn_files)}) Translating {fzn_path + fzn} to {smt2_path + fzn.replace('.', '-')}.smt2''', end='')
try:
output = subprocess.check_output(fzn2smt2_transform_cmd,
shell=True,env=my_env,
universal_newlines=True)
except Exception as e:
output = str(e.output)
print(f'''\r{output}''', end='')
sys.stdout.flush() | (278/278) Translating /home/jovyan/data/fzn/smt2/Mz2-noAbs/R079-dzn.fzn to /home/jovyan/data/smt2/z3/Mz2-noAbs/R079-dzn-fzn.smt2
| BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Adjust `smt2` Files According to Chapter 5.2- Add lower and upper bounds for the decision variable `pfc`- Add number of cavities as comments for later solution extraction (workaround) | import os
import re
def adjust_smt2_file(smt2_path: str, file: str, write_path: str):
with open(smt2_path+'/'+file, 'r+') as myfile:
data = "".join(line for line in myfile)
filename = os.path.splitext(file)[0]
newFile = open(os.path.join(write_path, filename +'.smt2'),"w+")
newFile.write(data)
newFile.close()
openFile = open(os.path.join(write_path, filename +'.smt2'))
data = openFile.readlines()
additionalLines = data[-5:]
data = data[:-5]
openFile.close()
newFile = open(os.path.join(write_path, filename +'.smt2'),"w+")
newFile.writelines([item for item in data])
newFile.close()
with open(os.path.join(write_path, filename +'.smt2'),"r") as myfile:
data = "".join(line for line in myfile)
newFile = open(os.path.join(write_path, filename +'.smt2'),"w+")
matches = re.findall(r'\(define-fun .\d\d \(\) Int (\d+)\)', data)
try:
cavity_count = int(matches[0])
newFile.write(f''';; k={cavity_count}\n''')
newFile.write(f''';; Extract pfc from\n''')
for i in range(0,cavity_count):
newFile.write(f''';; X_INTRODUCED_{str(i)}_\n''')
newFile.write(data)
for i in range(1,cavity_count+1):
lb = f'''(define-fun lbound{str(i)} () Bool (> X_INTRODUCED_{str(i-1)}_ 0))\n'''
ub = f'''(define-fun ubound{str(i)} () Bool (<= X_INTRODUCED_{str(i-1)}_ {str(cavity_count)}))\n'''
assertLb = f'''(assert lbound{str(i)})\n'''
assertUb = f'''(assert ubound{str(i)})\n'''
newFile.write(lb)
newFile.write(ub)
newFile.write(assertLb)
newFile.write(assertUb)
except:
print(f'''\nCheck {filename} for completeness - data missing?''')
newFile.writelines([item for item in additionalLines])
newFile.close() | _____no_output_____ | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
$Mz_1$ | import os
smt2_files = []
smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs'''
for filename in os.listdir(smt2_path):
if filename.endswith(".smt2"):
smt2_files.append(filename)
len(smt2_files)
fix_count = 0
for smt2 in smt2_files:
fix_count += 1
print(f'''\r{fix_count}/{len(smt2_files)} Fixing file {smt2}''', end='')
adjust_smt2_file(smt2_path=smt2_path, file=smt2, write_path=f'''{smt2_path}''')
sys.stdout.flush() | 49/278 Fixing file R002-dzn-fzn.smt2
Check R002-dzn-fzn for completeness - data missing?
150/278 Fixing file R001-dzn-fzn.smt2
Check R001-dzn-fzn for completeness - data missing?
278/278 Fixing file R166-dzn-fzn.smt2 | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
$Mz_2$ | import os
smt2_files = []
smt2_path = f'''/home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs'''
for filename in os.listdir(smt2_path):
if filename.endswith(".smt2"):
smt2_files.append(filename)
len(smt2_files)
fix_count = 0
for smt2 in smt2_files:
fix_count += 1
print(f'''\r{fix_count}/{len(smt2_files)} Fixing file {smt2}''', end='')
adjust_smt2_file(smt2_path=smt2_path, file=smt2, write_path=f'''{smt2_path}''')
sys.stdout.flush() | 49/278 Fixing file R002-dzn-fzn.smt2
Check R002-dzn-fzn for completeness - data missing?
150/278 Fixing file R001-dzn-fzn.smt2
Check R001-dzn-fzn for completeness - data missing?
278/278 Fixing file R166-dzn-fzn.smt2 | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Test Generated `smt2` Files Using `z3`This shoud generate the `smt2` file without any error. If this was the case then the `z3` prover can be called on a file by running```zshz3 output/A001-dzn-smt2-fzn.smt2 ```yielding something similar to```zshz3 output/A001-dzn-smt2-fzn.smt2 sat(objectives (obj 41881))(model (define-fun X_INTRODUCED_981_ () Bool false) (define-fun X_INTRODUCED_348_ () Bool false) .....``` Test with `smt2` from $Mz_1$ | command = f'''/home/{my_env['USER']}/z3/build/z3 /home/{my_env['USER']}/data/smt2/z3/Mz1-noAbs/A001-dzn-fzn.smt2'''
print(command)
try:
result = subprocess.check_output(command, shell=True, universal_newlines=True)
except Exception as e:
print(e.output)
print(result) | _____no_output_____ | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Test with `smt2` from $Mz_2$ | result = subprocess.check_output(
f'''/home/{my_env['USER']}/z3/build/z3 \
/home/{my_env['USER']}/data/smt2/z3/Mz2-noAbs/v3/A004-dzn-fzn_v3.smt2''',
shell=True, universal_newlines=True)
print(result) | _____no_output_____ | BSD-3-Clause | translation_toolchain/scripts/TranslateDZN2SMT2_Z3.ipynb | kw90/ctw_translation_toolchain |
Cincinnati Salaries- https://data.cincinnati-oh.gov/Efficient-Service-Delivery/City-of-Cincinnati-Employees-w-Salaries/wmj4-ygbf | ! pip install sodapy
! pip install pandas
import pandas as pd
from sodapy import Socrata
# Unauthenticated client only works with public data sets. Note 'None'
# in place of application token, and no username or password:
client = Socrata("data.cincinnati-oh.gov", None)
# Example authenticated client (needed for non-public datasets):
# client = Socrata(data.cincinnati-oh.gov,
# MyAppToken,
# userame="user@example.com",
# password="AFakePassword")
# First 2000 results, returned as JSON from API / converted to Python list of
# dictionaries by sodapy.
results = client.get("wmj4-ygbf", limit=10000)
# Convert to pandas DataFrame
results_df = pd.DataFrame.from_records(results)
results_df
max(pd.to_numeric(results_df['annual_rt'])) | _____no_output_____ | MIT | cincinnati_salaries.ipynb | doedotdev/cincinnati-salaries |
Welcome to Colaboratory!Colaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.With Colaboratory you can write and execute code, save and share your analyses, and access powerful computing resources, all for free from your browser. | #@title Introducing Colaboratory { display-mode: "form" }
#@markdown This 3-minute video gives an overview of the key features of Colaboratory:
from IPython.display import YouTubeVideo
YouTubeVideo('inN8seMm7UI', width=600, height=400) | _____no_output_____ | MIT | colaboratory_introduction.ipynb | karlkirschner/2020_Scientific_Programming |
Getting StartedThe document you are reading is a [Jupyter notebook](https://jupyter.org/), hosted in Colaboratory. It is not a static page, but an interactive environment that lets you write and execute code in Python and other languages.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result: | seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day | _____no_output_____ | MIT | colaboratory_introduction.ipynb | karlkirschner/2020_Scientific_Programming |
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter".All cells modify the same global state, so variables that you define by executing a cell can be used in other cells: | seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week | _____no_output_____ | MIT | colaboratory_introduction.ipynb | karlkirschner/2020_Scientific_Programming |
For more information about working with Colaboratory notebooks, see [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb). --- CellsA notebook is a list of cells. Cells contain either explanatory text or executable code and its output. Click a cell to select it. Code cellsBelow is a **code cell**. Once the toolbar button indicates CONNECTED, click in the cell to select it and execute the contents in the following ways:* Click the **Play icon** in the left gutter of the cell;* Type **Cmd/Ctrl+Enter** to run the cell in place;* Type **Shift+Enter** to run the cell and move focus to the next cell (adding one if none exists); or* Type **Alt+Enter** to run the cell and insert a new code cell immediately below it.There are additional options for running some or all cells in the **Runtime** menu. | a = 13
a | _____no_output_____ | MIT | colaboratory_introduction.ipynb | karlkirschner/2020_Scientific_Programming |
Table of Contents1 Leveraging Pre-trained Word Embedding for Text Classification1.1 Data Preparation1.2 Glove1.3 Model1.3.1 Model with Pretrained Embedding1.3.2 Model without Pretrained Embedding1.4 Submission1.5 Summary2 Reference | # code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', '..', 'notebook_format'))
from formats import load_style
load_style(plot_style=False)
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format='retina'
import os
import time
import numpy as np
import pandas as pd
from typing import List, Tuple, Dict
from sklearn.model_selection import train_test_split
from keras import layers
from keras.models import Model
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
from keras.preprocessing.sequence import pad_sequences
# prevent scientific notations
pd.set_option('display.float_format', lambda x: '%.3f' % x)
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,sklearn,keras | Using TensorFlow backend.
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Leveraging Pre-trained Word Embedding for Text Classification There are two main ways to obtain word embeddings:- Learn it from scratch: We specify a neural network architecture and learn the word embeddings jointly with the main task at our hand (e.g. sentiment classification). i.e. we would start off with some random word embeddings, and it would update itself along with the word embeddings.- Transfer Learning: The whole idea behind transfer learning is to avoid reinventing the wheel as much as possible. It gives us the capability to transfer knowledge that was gained/learned in some other task and use it to improve the learning of another related task. In practice, one way to do this is for the embedding part of the neural network architecture, we load some other embeddings that were trained on a different machine learning task than the one we are trying to solve and use that to bootstrap the process.One area that transfer learning shines is when we have little training data available and using our data alone might not be enough to learn an appropriate task specific embedding/features for our vocabulary. In this case, leveraging a word embedding that captures generic aspect of the language can prove to be beneficial from both a performance and time perspective (i.e. we won't have to spend hours/days training a model from scratch to achieve a similar performance). Keep in mind that, as with all machine learning application, everything is still all about trial and error. What makes a embedding good depends heavily on the task at hand: The word embedding for a movie review sentiment classification model may look very different from a legal document classification model as the semantic of the corpus varies between these two tasks. Data Preparation We'll use the movie review sentiment analysis dataset from [Kaggle](https://www.kaggle.com/c/word2vec-nlp-tutorial/overview) for this example. It's a binary classification problem with AUC as the ultimate evaluation metric. The next few code chunk performs the usual text preprocessing, build up the word vocabulary and performing a train/test split. | data_dir = 'data'
submission_dir = 'submission'
input_path = os.path.join(data_dir, 'word2vec-nlp-tutorial', 'labeledTrainData.tsv')
df = pd.read_csv(input_path, delimiter='\t')
print(df.shape)
df.head()
raw_text = df['review'].iloc[0]
raw_text
import re
def clean_str(string: str) -> str:
string = re.sub(r"\\", "", string)
string = re.sub(r"\'", "", string)
string = re.sub(r"\"", "", string)
return string.strip().lower()
from bs4 import BeautifulSoup
def clean_text(df: pd.DataFrame,
text_col: str,
label_col: str) -> Tuple[List[str], List[int]]:
texts = []
labels = []
for raw_text, label in zip(df[text_col], df[label_col]):
text = BeautifulSoup(raw_text).get_text()
cleaned_text = clean_str(text)
texts.append(cleaned_text)
labels.append(label)
return texts, labels
text_col = 'review'
label_col = 'sentiment'
texts, labels = clean_text(df, text_col, label_col)
print('sample text: ', texts[0])
print('corresponding label:', labels[0])
random_state = 1234
val_split = 0.2
labels = to_categorical(labels)
texts_train, texts_val, y_train, y_val = train_test_split(
texts, labels,
test_size=val_split,
random_state=random_state)
print('labels shape:', labels.shape)
print('train size: ', len(texts_train))
print('validation size: ', len(texts_val))
max_num_words = 20000
tokenizer = Tokenizer(num_words=max_num_words, oov_token='<unk>')
tokenizer.fit_on_texts(texts_train)
print('Found %s unique tokens.' % len(tokenizer.word_index))
max_sequence_len = 1000
sequences_train = tokenizer.texts_to_sequences(texts_train)
x_train = pad_sequences(sequences_train, maxlen=max_sequence_len)
sequences_val = tokenizer.texts_to_sequences(texts_val)
x_val = pad_sequences(sequences_val, maxlen=max_sequence_len)
sequences_train[0][:5] | _____no_output_____ | MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Glove There are many different pretrained word embeddings online. The one we'll be using is from [Glove](https://nlp.stanford.edu/projects/glove/). Others include but not limited to [FastText](https://fasttext.cc/docs/en/crawl-vectors.html), [bpemb](https://github.com/bheinzerling/bpemb).If we look at the project's wiki page, we can find any different pretrained embeddings available for us to experiment. | import requests
from tqdm import tqdm
def download_glove(embedding_type: str='glove.6B.zip'):
"""
download GloVe word vector representations, this step may take a while
Parameters
----------
embedding_type : str, default 'glove.6B.zip'
Specifying different glove embeddings to download if not already there.
{'glove.6B.zip', 'glove.42B.300d.zip', 'glove.840B.300d.zip', 'glove.twitter.27B.zip'}
Be wary of the size. e.g. 'glove.6B.zip' is a 822 MB zipped, 2GB unzipped
"""
base_url = 'http://nlp.stanford.edu/data/'
if not os.path.isfile(embedding_type):
url = base_url + embedding_type
# the following section is a pretty generic http get request for
# saving large files, provides progress bars for checking progress
response = requests.get(url, stream=True)
response.raise_for_status()
content_len = response.headers.get('Content-Length')
total = int(content_len) if content_len is not None else 0
with tqdm(unit='B', total=total) as pbar, open(embedding_type, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
pbar.update(len(chunk))
f.write(chunk)
if response.headers.get('Content-Type') == 'application/zip':
from zipfile import ZipFile
with ZipFile(embedding_type, 'r') as f:
f.extractall(embedding_type.strip('.zip'))
download_glove() | _____no_output_____ | MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
The way we'll leverage the pretrained embedding is to first read it in as a dictionary lookup, where the key is the word and the value is the corresponding word embedding. Then for each token in our vocabulary, we'll lookup this dictionary to see if there's a pretrained embedding available, if there is, we'll use the pretrained embedding, if there isn't, we'll leave the embedding for this word in its original randomly initialized form.The format for this particular pretrained embedding is for every line, we have a space delimited values, where the first token is the word, and the rest are its corresponding embedding values. e.g. the first line from the line looks like:```the -0.038194 -0.24487 0.72812 -0.39961 0.083172 0.043953 -0.39141 0.3344 -0.57545 0.087459 0.28787 -0.06731 0.30906 -0.26384 -0.13231 -0.20757 0.33395 -0.33848 -0.31743 -0.48336 0.1464 -0.37304 0.34577 0.052041 0.44946 -0.46971 0.02628 -0.54155 -0.15518 -0.14107 -0.039722 0.28277 0.14393 0.23464 -0.31021 0.086173 0.20397 0.52624 0.17164 -0.082378 -0.71787 -0.41531 0.20335 -0.12763 0.41367 0.55187 0.57908 -0.33477 -0.36559 -0.54857 -0.062892 0.26584 0.30205 0.99775 -0.80481 -3.0243 0.01254 -0.36942 2.2167 0.72201 -0.24978 0.92136 0.034514 0.46745 1.1079 -0.19358 -0.074575 0.23353 -0.052062 -0.22044 0.057162 -0.15806 -0.30798 -0.41625 0.37972 0.15006 -0.53212 -0.2055 -1.2526 0.071624 0.70565 0.49744 -0.42063 0.26148 -1.538 -0.30223 -0.073438 -0.28312 0.37104 -0.25217 0.016215 -0.017099 -0.38984 0.87424 -0.72569 -0.51058 -0.52028 -0.1459 0.8278 0.27062``` | def get_embedding_lookup(embedding_path) -> Dict[str, np.ndarray]:
embedding_lookup = {}
with open(embedding_path) as f:
for line in f:
values = line.split()
word = values[0]
coef = np.array(values[1:], dtype=np.float32)
embedding_lookup[word] = coef
return embedding_lookup
def get_pretrained_embedding(embedding_path: str,
index2word: Dict[int, str],
max_features: int) -> np.ndarray:
embedding_lookup = get_embedding_lookup(embedding_path)
pretrained_embedding = np.stack(list(embedding_lookup.values()))
embedding_dim = pretrained_embedding.shape[1]
embeddings = np.random.normal(pretrained_embedding.mean(),
pretrained_embedding.std(),
(max_features, embedding_dim)).astype(np.float32)
# we track how many tokens in our vocabulary exists in the pre-trained embedding,
# i.e. how many tokens has a pre-trained embedding from this particular file
n_found = 0
# the loop starts from 1 due to keras' Tokenizer reserves 0 for padding index
for i in range(1, max_features):
word = index2word[i]
embedding_vector = embedding_lookup.get(word)
if embedding_vector is not None:
embeddings[i] = embedding_vector
n_found += 1
print('number of words found:', n_found)
return embeddings
glove_path = os.path.join('glove.6B', 'glove.6B.100d.txt')
max_features = max_num_words + 1
pretrained_embedding = get_pretrained_embedding(glove_path, tokenizer.index_word, max_features)
pretrained_embedding.shape | number of words found: 19654
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Model To train our text classifier, we specify a 1D convolutional network. Our embedding layer can either be initialized randomly or loaded from a pre-trained embedding. Note that for the pre-trained embedding case, apart from loading the weights, we also "freeze" the embedding layer, i.e. we set its trainable attribute to False. This idea is often times used in transfer learning, where when parts of a model are pre-trained (in our case, only our Embedding layer), and parts of it are randomly initialized, the pre-trained part should ideally not be trained together with the randomly initialized part. The rationale behind it is that a large gradient update triggered by the randomly initialized layer would become very disruptive to those pre-trained weights.Once we train the randomly initialized weights for a few iterations, we can then go about un-freezing the layers that were loaded with pre-trained weights, and do an update on the weight for the entire thing. The [keras documentation](https://keras.io/applications/fine-tune-inceptionv3-on-a-new-set-of-classes) also provides an example of how to do this, although the example is for image models, the same idea can also be applied here, and can be something that's worth experimenting. | def simple_text_cnn(max_sequence_len: int,
max_features: int,
num_classes: int,
optimizer: str='adam',
metrics: List[str]=['acc'],
pretrained_embedding: np.ndarray=None) -> Model:
sequence_input = layers.Input(shape=(max_sequence_len,), dtype='int32')
if pretrained_embedding is None:
embedded_sequences = layers.Embedding(max_features, 100,
name='embedding')(sequence_input)
else:
embedded_sequences = layers.Embedding(max_features, pretrained_embedding.shape[1],
weights=[pretrained_embedding],
name='embedding',
trainable=False)(sequence_input)
conv1 = layers.Conv1D(128, 5, activation='relu')(embedded_sequences)
pool1 = layers.MaxPooling1D(5)(conv1)
conv2 = layers.Conv1D(128, 5, activation='relu')(pool1)
pool2 = layers.MaxPooling1D(5)(conv2)
conv3 = layers.Conv1D(128, 5, activation='relu')(pool2)
pool3 = layers.MaxPooling1D(35)(conv3)
flatten = layers.Flatten()(pool3)
dense = layers.Dense(128, activation='relu')(flatten)
preds = layers.Dense(num_classes, activation='softmax')(dense)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=metrics)
return model | _____no_output_____ | MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Model with Pretrained Embedding | num_classes = 2
model1 = simple_text_cnn(max_sequence_len, max_features, num_classes,
pretrained_embedding=pretrained_embedding)
model1.summary() | WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:66: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:541: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4432: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:190: The name tf.get_default_session is deprecated. Please use tf.compat.v1.get_default_session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:197: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:203: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:207: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:216: The name tf.is_variable_initialized is deprecated. Please use tf.compat.v1.is_variable_initialized instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:223: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:4267: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/optimizers.py:793: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3576: The name tf.log is deprecated. Please use tf.math.log instead.
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 1000) 0
_________________________________________________________________
embedding (Embedding) (None, 1000, 100) 2000100
_________________________________________________________________
conv1d_1 (Conv1D) (None, 996, 128) 64128
_________________________________________________________________
max_pooling1d_1 (MaxPooling1 (None, 199, 128) 0
_________________________________________________________________
conv1d_2 (Conv1D) (None, 195, 128) 82048
_________________________________________________________________
max_pooling1d_2 (MaxPooling1 (None, 39, 128) 0
_________________________________________________________________
conv1d_3 (Conv1D) (None, 35, 128) 82048
_________________________________________________________________
max_pooling1d_3 (MaxPooling1 (None, 1, 128) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 128) 0
_________________________________________________________________
dense_1 (Dense) (None, 128) 16512
_________________________________________________________________
dense_2 (Dense) (None, 2) 258
=================================================================
Total params: 2,245,094
Trainable params: 244,994
Non-trainable params: 2,000,100
_________________________________________________________________
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
We can confirm whether our embedding layer is trainable by looping through each layer and checking the trainable attribute. | df_model_layers = pd.DataFrame(
[(layer.name, layer.trainable, layer.count_params()) for layer in model1.layers],
columns=['layer', 'trainable', 'n_params']
)
df_model_layers
# time : 70
# test performance : auc 0.93212
start = time.time()
history1 = model1.fit(x_train, y_train,
validation_data=(x_val, y_val),
batch_size=128,
epochs=8)
end = time.time()
elapse1 = end - start
elapse1 | WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1033: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:1020: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.
Train on 20000 samples, validate on 5000 samples
Epoch 1/8
20000/20000 [==============================] - 12s 604us/step - loss: 0.5854 - acc: 0.6748 - val_loss: 0.4772 - val_acc: 0.7808
Epoch 2/8
20000/20000 [==============================] - 8s 416us/step - loss: 0.4001 - acc: 0.8186 - val_loss: 0.3766 - val_acc: 0.8352
Epoch 3/8
20000/20000 [==============================] - 8s 414us/step - loss: 0.3428 - acc: 0.8507 - val_loss: 0.4276 - val_acc: 0.7966
Epoch 4/8
20000/20000 [==============================] - 8s 415us/step - loss: 0.2790 - acc: 0.8842 - val_loss: 0.3433 - val_acc: 0.8594
Epoch 5/8
20000/20000 [==============================] - 8s 415us/step - loss: 0.2469 - acc: 0.8987 - val_loss: 0.4015 - val_acc: 0.8310
Epoch 6/8
20000/20000 [==============================] - 8s 420us/step - loss: 0.1782 - acc: 0.9289 - val_loss: 0.4670 - val_acc: 0.8296
Epoch 7/8
20000/20000 [==============================] - 8s 419us/step - loss: 0.1017 - acc: 0.9643 - val_loss: 0.5965 - val_acc: 0.8146
Epoch 8/8
20000/20000 [==============================] - 8s 418us/step - loss: 0.0680 - acc: 0.9758 - val_loss: 0.6876 - val_acc: 0.8332
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Model without Pretrained Embedding | num_classes = 2
model2 = simple_text_cnn(max_sequence_len, max_features, num_classes)
model2.summary()
# time : 86 secs
# test performance : auc 0.92310
start = time.time()
history1 = model2.fit(x_train, y_train,
validation_data=(x_val, y_val),
batch_size=128,
epochs=8)
end = time.time()
elapse1 = end - start
elapse1 | Train on 20000 samples, validate on 5000 samples
Epoch 1/8
20000/20000 [==============================] - 11s 570us/step - loss: 0.5010 - acc: 0.7065 - val_loss: 0.3016 - val_acc: 0.8730
Epoch 2/8
20000/20000 [==============================] - 11s 542us/step - loss: 0.2024 - acc: 0.9243 - val_loss: 0.2816 - val_acc: 0.8824
Epoch 3/8
20000/20000 [==============================] - 11s 538us/step - loss: 0.0806 - acc: 0.9734 - val_loss: 0.3552 - val_acc: 0.8812
Epoch 4/8
20000/20000 [==============================] - 11s 535us/step - loss: 0.0272 - acc: 0.9917 - val_loss: 0.4671 - val_acc: 0.8836
Epoch 5/8
20000/20000 [==============================] - 11s 543us/step - loss: 0.0088 - acc: 0.9973 - val_loss: 0.6534 - val_acc: 0.8788
Epoch 6/8
20000/20000 [==============================] - 11s 542us/step - loss: 0.0090 - acc: 0.9973 - val_loss: 0.7522 - val_acc: 0.8740
Epoch 7/8
20000/20000 [==============================] - 11s 542us/step - loss: 0.0104 - acc: 0.9967 - val_loss: 1.0453 - val_acc: 0.8480
Epoch 8/8
20000/20000 [==============================] - 11s 543us/step - loss: 0.0205 - acc: 0.9924 - val_loss: 0.6930 - val_acc: 0.8712
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
Submission For the submission section, we read in and preprocess the test data provided by the competition, then generate the predicted probability column for both the model that uses pretrained embedding and one that doesn't to compare their performance. | input_path = os.path.join(data_dir, 'word2vec-nlp-tutorial', 'testData.tsv')
df_test = pd.read_csv(input_path, delimiter='\t')
print(df_test.shape)
df_test.head()
def clean_text_without_label(df: pd.DataFrame, text_col: str) -> List[str]:
texts = []
for raw_text in df[text_col]:
text = BeautifulSoup(raw_text).get_text()
cleaned_text = clean_str(text)
texts.append(cleaned_text)
return texts
texts_test = clean_text_without_label(df_test, text_col)
sequences_test = tokenizer.texts_to_sequences(texts_test)
x_test = pad_sequences(sequences_test, maxlen=max_sequence_len)
len(x_test)
def create_submission(ids, predictions, ids_col, label_col, submission_path) -> pd.DataFrame:
df_submission = pd.DataFrame({
ids_col: ids,
label_col: predictions
}, columns=[ids_col, label_col])
if submission_path is not None:
# create the directory if need be, e.g. if the submission_path = submission/submission.csv
# we'll create the submission directory first if it doesn't exist
directory = os.path.split(submission_path)[0]
if (directory != '' or directory != '.') and not os.path.isdir(directory):
os.makedirs(directory, exist_ok=True)
df_submission.to_csv(submission_path, index=False, header=True)
return df_submission
ids_col = 'id'
label_col = 'sentiment'
ids = df_test[ids_col]
models = {
'pretrained_embedding': model1,
'without_pretrained_embedding': model2
}
for model_name, model in models.items():
print('generating submission for: ', model_name)
submission_path = os.path.join(submission_dir, '{}_submission.csv'.format(model_name))
predictions = model.predict(x_test, verbose=1)[:, 1]
df_submission = create_submission(ids, predictions, ids_col, label_col, submission_path)
# sanity check to make sure the size and the output of the submission makes sense
print(df_submission.shape)
df_submission.head() | generating submission for: pretrained_embedding
25000/25000 [==============================] - 6s 228us/step
generating submission for: without_pretrained_embedding
25000/25000 [==============================] - 6s 222us/step
(25000, 2)
| MIT | keras/text_classification/keras_pretrained_embedding.ipynb | sindhu819/machine-learning-1 |
04 - Pandas: Working with time series data> *© 2021, Joris Van den Bossche and Stijn Van Hoey (, ). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*--- | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot') | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Introduction: `datetime` module Standard Python contains the `datetime` module to handle date and time data: | import datetime
dt = datetime.datetime(year=2016, month=12, day=19, hour=13, minute=30)
dt
print(dt) # .day,...
print(dt.strftime("%d %B %Y")) | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Dates and times in pandas The ``Timestamp`` object Pandas has its own date and time objects, which are compatible with the standard `datetime` objects, but provide some more functionality to work with. The `Timestamp` object can also be constructed from a string: | ts = pd.Timestamp('2016-12-19')
ts | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Like with `datetime.datetime` objects, there are several useful attributes available on the `Timestamp`. For example, we can get the month (experiment with tab completion!): | ts.month | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
There is also a `Timedelta` type, which can e.g. be used to add intervals of time: | ts + pd.Timedelta('5 days') | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Parsing datetime strings  Unfortunately, when working with real world data, you encounter many different `datetime` formats. Most of the time when you have to deal with them, they come in text format, e.g. from a `CSV` file. To work with those data in Pandas, we first have to *parse* the strings to actual `Timestamp` objects. REMEMBER: To convert string formatted dates to Timestamp objects: use the `pandas.to_datetime` function | pd.to_datetime("2016-12-09")
pd.to_datetime("09/12/2016")
pd.to_datetime("09/12/2016", dayfirst=True)
pd.to_datetime("09/12/2016", format="%d/%m/%Y") | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
A detailed overview of how to specify the `format` string, see the table in the python documentation: https://docs.python.org/3/library/datetime.htmlstrftime-and-strptime-behavior `Timestamp` data in a Series or DataFrame column | s = pd.Series(['2016-12-09 10:00:00', '2016-12-09 11:00:00', '2016-12-09 12:00:00'])
s | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
The `to_datetime` function can also be used to convert a full series of strings: | ts = pd.to_datetime(s)
ts | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Notice the data type of this series has changed: the `datetime64[ns]` dtype. This indicates that we have a series of actual datetime values. The same attributes as on single `Timestamp`s are also available on a Series with datetime data, using the **`.dt`** accessor: | ts.dt.hour
ts.dt.dayofweek | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
To quickly construct some regular time series data, the [``pd.date_range``](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.date_range.html) function comes in handy: | pd.Series(pd.date_range(start="2016-01-01", periods=10, freq='3H')) | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Time series data: `Timestamp` in the index River discharge example data For the following demonstration of the time series functionality, we use a sample of discharge data of the Maarkebeek (Flanders) with 3 hour averaged values, derived from the [Waterinfo website](https://www.waterinfo.be/). | data = pd.read_csv("data/vmm_flowdata.csv")
data.head() | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
We already know how to parse a date column with Pandas: | data['Time'] = pd.to_datetime(data['Time']) | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
With `set_index('datetime')`, we set the column with datetime values as the index, which can be done by both `Series` and `DataFrame`. | data = data.set_index("Time")
data | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
The steps above are provided as built-in functionality of `read_csv`: | data = pd.read_csv("data/vmm_flowdata.csv", index_col=0, parse_dates=True) | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
REMEMBER: `pd.read_csv` provides a lot of built-in functionality to support this kind of transactions when reading in a file! Check the help of the read_csv function... The DatetimeIndex When we ensure the DataFrame has a `DatetimeIndex`, time-series related functionality becomes available: | data.index | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Similar to a Series with datetime data, there are some attributes of the timestamp values available: | data.index.day
data.index.dayofyear
data.index.year | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
The `plot` method will also adapt its labels (when you zoom in, you can see the different levels of detail of the datetime labels): | %matplotlib widget
data.plot()
# switching back to static inline plots (the default)
%matplotlib inline | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
We have too much data to sensibly plot on one figure. Let's see how we can easily select part of the data or aggregate the data to other time resolutions in the next sections. Selecting data from a time series We can use label based indexing on a timeseries as expected: | data[pd.Timestamp("2012-01-01 09:00"):pd.Timestamp("2012-01-01 19:00")] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
But, for convenience, indexing a time series also works with strings: | data["2012-01-01 09:00":"2012-01-01 19:00"] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
A nice feature is **"partial string" indexing**, where we can do implicit slicing by providing a partial datetime string.E.g. all data of 2013: | data['2013':] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Or all data of January up to March 2012: | data['2012-01':'2012-03'] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: select all data starting from 2012 | data['2012':] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: select all data in January for all different years | data[data.index.month == 1] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: select all data in April, May and June for all different years | data[data.index.month.isin([4, 5, 6])] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: select all 'daytime' data (between 8h and 20h) for all days | data[(data.index.hour > 8) & (data.index.hour < 20)] | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
The power of pandas: `resample` A very powerfull method is **`resample`: converting the frequency of the time series** (e.g. from hourly to daily data).The time series has a frequency of 1 hour. I want to change this to daily: | data.resample('D').mean().head() | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Other mathematical methods can also be specified: | data.resample('D').max().head() | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
REMEMBER: The string to specify the new time frequency: http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.htmloffset-aliases These strings can also be combined with numbers, eg `'10D'`... | data.resample('M').mean().plot() # 10D | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: Plot the monthly standard deviation of the columns | data.resample('M').std().plot() # 'A' | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: Plot the monthly mean and median values for the years 2011-2012 for 'L06_347'__Note__ Did you know agg to derive multiple statistics at the same time? | subset = data['2011':'2012']['L06_347']
subset.resample('M').agg(['mean', 'median']).plot() | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: plot the monthly mininum and maximum daily average value of the 'LS06_348' column | daily = data['LS06_348'].resample('D').mean() # daily averages calculated
daily.resample('M').agg(['min', 'max']).plot() # monthly minimum and maximum values of these daily averages | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
EXERCISE: Make a bar plot of the mean of the stations in year of 2013 | data['2013':'2013'].mean().plot(kind='barh') | _____no_output_____ | BSD-3-Clause | _solved/pandas_04_time_series_data.ipynb | jorisvandenbossche/FLAMES-python-data-wrangling |
Importing Spark NLP, Spark NLP for Healthcare and Spark OCR | import sparknlp
import sparknlp_jsl
import sparkocr
sparknlp_jsl.version()
sparknlp.version()
sparkocr.version() | _____no_output_____ | Apache-2.0 | platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb | fcivardi/spark-nlp-workshop |
Retrieving your license | import os, json
with open('/license.json', 'r') as f:
license_keys = json.load(f)
# Defining license key-value pairs as local variables
locals().update(license_keys)
# Adding license key-value pairs to environment variables
os.environ.update(license_keys) | _____no_output_____ | Apache-2.0 | platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb | fcivardi/spark-nlp-workshop |
Add a DNS entry for your Sagemaker instance | !echo "127.0.0.1 $HOSTNAME" >> /etc/hosts | _____no_output_____ | Apache-2.0 | platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb | fcivardi/spark-nlp-workshop |
Start your session | spark = sparkocr.start(secret=os.environ['SPARK_OCR_SECRET'], nlp_secret=['SECRET']) | Spark version: 3.0.2
Spark NLP version: 3.3.1
Spark OCR version: 3.8.0
| Apache-2.0 | platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb | fcivardi/spark-nlp-workshop |
Check everything is good and have fun! | spark | _____no_output_____ | Apache-2.0 | platforms/sagemaker-studio/SparkNLP_sagemaker.ipynb | fcivardi/spark-nlp-workshop |
Demonstrating how to get DonkeyCar Tub files into a PyTorch/fastai DataBlock | from fastai.data.all import *
from fastai.vision.all import *
from fastai.data.transforms import ColReader, Normalize, RandomSplitter
import torch
from torch import nn
from torch.nn import functional as F
from donkeycar.parts.tub_v2 import Tub
import pandas as pd
from pathlib import Path
from malpi.dk.train import preprocessFileList, get_data, get_learner, get_autoencoder, train_autoencoder
def learn_resnet():
learn2 = cnn_learner(dls, resnet18, loss_func=MSELossFlat(), metrics=[rmse], cbs=ActivationStats(with_hist=True))
learn2.fine_tune(5)
learn2.recorder.plot_loss()
learn2.show_results(figsize=(20,10)) | _____no_output_____ | MIT | notebooks/DKDataBlock.ipynb | Bleyddyn/malpi |
The below code is modified from: https://github.com/cmasenas/fastai_navigation_training/blob/master/fastai_train.ipynb.TODO: Figure out how to have multiple output heads | def test_one_transform(name, inputs, df_all, batch_tfms, item_tfms, epochs, lr):
dls = get_data(inputs, df_all=df_all, batch_tfms=batch_tfms, item_tfms=item_tfms)
callbacks = [CSVLogger(f"Transform_{name}.csv", append=True)]
learn = get_learner(dls)
#learn.no_logging() #Try this to block logging when doing many training test runs
learn.fit_one_cycle(epochs, lr, cbs=callbacks)
#learn.recorder.plot_loss()
#learn.show_results(figsize=(20,10))
# Train multipel times using a list of Transforms, one at a time.
# Compare mean/stdev of best validation loss (or rmse?) for each Transform
df_all = get_dataframe("track1_warehouse.txt")
transforms = [None]
transforms.extend( [*aug_transforms(do_flip=False, size=128)] )
for tfm in transforms:
name = "None" if tfm is None else str(tfm.__class__.__name__)
print( f"Transform: {name}" )
for i in range(5):
print( f" Run {i+1}" )
test_one_transform(name, "track1_warehouse.txt", df_all, None, 5, 3e-3)
def visualize_learner( learn ):
#dls=nav.dataloaders(df, bs=512)
preds, tgt = learn.get_preds(dl=[dls.one_batch()])
plt.title("Target vs Predicted Steering", fontsize=18, y=1.0)
plt.xlabel("Target", fontsize=14, labelpad=15)
plt.ylabel("Predicted", fontsize=14, labelpad=15)
plt.plot(tgt.T[0], preds.T[0],'bo')
plt.plot([-1,1],[-1,1],'r', linewidth = 4)
plt.show()
plt.title("Target vs Predicted Throttle", fontsize=18, y=1.02)
plt.xlabel("Target", fontsize=14, labelpad=15)
plt.ylabel("Predicted", fontsize=14, labelpad=15)
plt.plot(tgt.T[1], preds.T[1],'bo')
plt.plot([0,1],[0,1],'r', linewidth = 4)
plt.show()
learn.export()
df_all = get_dataframe("track1_warehouse.txt")
dls = get_data("track1_warehouse.txt", df_all=df_all, batch_tfms=None)
learn = get_learner(dls)
learn.fit_one_cycle(15, 3e-3)
visualize_learner(learn)
learn.export('models/track1_v2.pkl')
def clear_pyplot_memory():
plt.clf()
plt.cla()
plt.close()
df_all = get_dataframe("track1_warehouse.txt")
transforms=[None,
RandomResizedCrop(128,p=1.0,min_scale=0.5,ratio=(0.9,1.1)),
RandomErasing(sh=0.2, max_count=6,p=1.0),
Brightness(max_lighting=0.4, p=1.0),
Contrast(max_lighting=0.4, p=1.0),
Saturation(max_lighting=0.4, p=1.0)]
#dls = get_data(None, df_all, item_tfms=item_tfms, batch_tfms=batch_tfms)
for tfm in transforms:
name = "None" if tfm is None else str(tfm.__class__.__name__)
if name == "RandomResizedCrop":
item_tfms = tfm
batch_tfms = None
else:
item_tfms = None
batch_tfms = tfm
dls = get_data("track1_warehouse.txt",
df_all=df_all,
item_tfms=item_tfms, batch_tfms=batch_tfms)
dls.show_batch(unique=True, show=True)
plt.savefig( f'Transform_{name}.png' )
#clear_pyplot_memory()
learn, dls = train_autoencoder( "tracks_all.txt", 5, 3e-3, name="ae_test1", verbose=False )
learn.recorder.plot_loss()
learn.show_results(figsize=(20,10))
#plt.savefig(name + '.png')
idx = 0
idx += 1
im1 = dls.one_batch()[0]
im1_out = learn.model.forward(im1)
show_image(im1[idx])
show_image(im1_out[idx])
from fastai.metrics import rmse
from typing import List, Callable, Union, Any, TypeVar, Tuple
Tensor = TypeVar('torch.tensor')
from abc import abstractmethod
class BaseVAE(nn.Module):
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, input: Tensor) -> List[Tensor]:
raise NotImplementedError
def decode(self, input: Tensor) -> Any:
raise NotImplementedError
def sample(self, batch_size:int, current_device: int, **kwargs) -> Tensor:
raise NotImplementedError
def generate(self, x: Tensor, **kwargs) -> Tensor:
raise NotImplementedError
@abstractmethod
def forward(self, *inputs: Tensor) -> Tensor:
pass
@abstractmethod
def loss_function(self, *inputs: Any, **kwargs) -> Tensor:
pass
class VanillaVAE(BaseVAE):
def __init__(self,
in_channels: int,
latent_dim: int,
hidden_dims: List = None,
**kwargs) -> None:
super(VanillaVAE, self).__init__()
self.latent_dim = latent_dim
self.kld_weight = 0.00025 # TODO calculate based on: #al_img.shape[0]/ self.num_train_imgs
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Tanh())
def encode(self, input: Tensor) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent codes.
:param input: (Tensor) Input tensor to encoder [N x C x H x W]
:return: (Tensor) List of latent codes
"""
result = self.encoder(input)
result = torch.flatten(result, start_dim=1)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z: Tensor) -> Tensor:
"""
Maps the given latent codes
onto the image space.
:param z: (Tensor) [B x D]
:return: (Tensor) [B x C x H x W]
"""
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
*args,
**kwargs) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
#print( f"loss_function: {len(args[0])} {type(args[0][0])} {args[1].shape}" )
recons = args[0][0]
input = args[1]
mu = args[0][2]
log_var = args[0][3]
kld_weight = self.kld_weight # kwargs['M_N'] # Account for the minibatch samples from the dataset
recons_loss =F.mse_loss(recons, input)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + kld_weight * kld_loss
return loss
#return {'loss': loss, 'Reconstruction_Loss':recons_loss.detach(), 'KLD':-kld_loss.detach()}
def sample(self,
num_samples:int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
input_file="track1_warehouse.txt"
item_tfms = [Resize(64,method="squish")]
dls = get_data(input_file, item_tfms=item_tfms, verbose=False, autoencoder=True)
vae = VanillaVAE(3, 64)
learn = Learner(dls, vae, loss_func=vae.loss_function)
learn.fit_one_cycle(5, 3e-3)
vae | _____no_output_____ | MIT | notebooks/DKDataBlock.ipynb | Bleyddyn/malpi |
Redis列表实现一次pop 弹出多条数据 | # 连接 Redis
import redis
client = redis.Redis(host='122.51.39.219', port=6379, password='leftright123')
# 注意:
# 这个 Redis 环境仅作为练习之用,每小时会清空一次,请勿存放重要数据。
# 准备数据
client.lpush('test_batch_pop', *list(range(10000)))
# 一条一条读取,非常耗时
import time
start = time.time()
while True:
data = client.lpop('test_batch_pop')
if not data:
break
end = time.time()
delta = end - start
print(f'循环读取10000条数据,使用 lpop 耗时:{delta}') | 循环读取10000条数据,使用 lpop 耗时:112.04084920883179
| MIT | 视频课件/Redis 的高级用法.ipynb | kingname/SourceCodeofMongoRedis |
为什么使用`lpop`读取10000条数据这么慢?因为`lpop`每次只弹出1条数据,每次弹出数据都要连接 Redis 。大量时间浪费在了网络传输上面。 如何实现批量弹出多条数据,并在同一次网络请求中返回?先使用 `lrange` 获取数据,再使用`ltrim`删除被获取的数据。 | # 复习一下 lrange 的用法
datas = client.lrange('test_batch_pop', 0, 9) # 读取前10条数据
datas
# 学习一下 ltrim 的用法
client.ltrim('test_batch_pop', 10, -1) # 删除前10条数据
# 验证一下数据是否被成功删除
length = client.llen('test_batch_pop')
print(f'现在列表里面还剩{length}条数据')
datas = client.lrange('test_batch_pop', 0, 9) # 读取前10条数据
datas
# 一种看起来正确的做法
def batch_pop_fake(key, n):
datas = client.lrange(key, 0, n - 1)
client.ltrim(key, n, -1)
return datas
batch_pop_fake('test_batch_pop', 10)
client.lrange('test_batch_pop', 0, 9) | _____no_output_____ | MIT | 视频课件/Redis 的高级用法.ipynb | kingname/SourceCodeofMongoRedis |
这种写法用什么问题在多个进程同时使用 batch_pop_fake 函数的时候,由于执行 lrange 与 ltrim 是在两条语句中,因此实际上会分成2个网络请求。那么当 A 进程刚刚执行完lrange,还没有来得及执行 ltrim 时,B 进程刚好过来执行 lrange,那么 AB 两个进程就会获得相同的数据。等 B 进程获取完成数据以后,A 进程的 ltrim 刚刚抵达,此时Redis 会删除前 n 条数据,然后 B 进程的 ltrim 也到了,再删除前 n 条数据。那么最终导致的结果就是,AB 两个进程同时拿到前 n 条数据,但是却有2n 条数据被删除。 使用 pipeline 打包多个命令到一个请求中pipeline 的使用方法如下:```pythonimport redisclient = redis.Redis()pipe = client.pipeline()pipe.lrange('key', 0, n - 1)pipe.ltrim('key', n, -1)result = pipe.execute()```pipe.execute()返回一个列表,这个列表每一项按顺序对应每一个命令的执行结果。在上面的例子中,result 是一个有两项的列表,第一项对应 lrange 的返回结果,第二项为 True,表示 ltrim 执行成功。 | # 真正可用的批量弹出数据函数
def batch_pop_real(key, n):
pipe = client.pipeline()
pipe.lrange(key, 0, n - 1)
pipe.ltrim(key, n, -1)
result = pipe.execute()
return result[0]
# 清空列表并重新添加10000条数据
client.delete('test_batch_pop')
client.lpush('test_batch_pop', *list(range(10000)))
start = time.time()
while True:
datas = batch_pop_real('test_batch_pop', 1000)
if not datas:
break
for data in datas:
pass
end = time.time()
print(f'批量弹出10000条数据,耗时:{end - start}')
client.llen('test_batch_pop') | _____no_output_____ | MIT | 视频课件/Redis 的高级用法.ipynb | kingname/SourceCodeofMongoRedis |
Dataset Los datos son series temporales (casos semanales de Dengue) de distintos distritos de Paraguay | path = "./data/Notificaciones/"
filename_read = os.path.join(path,"normalizado.csv")
notificaciones = pd.read_csv(filename_read,delimiter=",",engine='python')
notificaciones.shape
listaMunicp = notificaciones['distrito_nombre'].tolist()
listaMunicp = list(dict.fromkeys(listaMunicp))
print('Son ', len(listaMunicp), ' distritos')
listaMunicp.sort()
print(listaMunicp) | Son 217 distritos
['1RO DE MARZO', '25 DE DICIEMBRE', '3 DE FEBRERO', 'ABAI', 'ACAHAY', 'ALBERDI', 'ALTO VERA', 'ALTOS', 'ANTEQUERA', 'AREGUA', 'ARROYOS Y ESTEROS', 'ASUNCION', 'ATYRA', 'AYOLAS', 'AZOTEY', 'BAHIA NEGRA', 'BELEN', 'BELLA VISTA', 'BENJAMIN ACEVAL', 'BORJA', 'BUENA VISTA', 'CAACUPE', 'CAAGUAZU', 'CAAZAPA', 'CABALLERO ALVAREZ', 'CAMBYRETA', 'CAPIATA', 'CAPIIBARY', 'CAPITAN BADO', 'CAPITAN MEZA', 'CAPITAN MIRANDA', 'CARAGUATAY', 'CARAPEGUA', 'CARAYAO', 'CARLOS ANTONIO LOPEZ', 'CARMELO PERALTA', 'CARMEN DEL PARANA', 'CECILIO BAEZ', 'CERRITO', 'CHACO', 'CHORE', 'COLONIA FRAM', 'COLONIA INDEPENDENCIA', 'CONCEPCION', 'CORONEL BOGADO', 'CORONEL MARTINEZ', 'CORONEL OVIEDO', 'CORPUS CHRISTI', 'CURUGUATY', 'DESMOCHADOS', 'DR BOTRELL', 'DR. JUAN MANUEL FRUTOS', 'EDELIRA', 'EMBOSCADA', 'ENCARNACION', 'ESCOBAR', 'EUGENIO A GARAY', 'EUSEBIO AYALA', 'FASSARDI', 'FELIX PEREZ CARDOZO', 'FERNANDO DE LA MORA', 'FILADELFIA', 'FUERTE OLIMPO', 'GENERAL AQUINO', 'GENERAL ARTIGAS', 'GENERAL BERNARDINO CABALLERO', 'GENERAL BRUGUEZ', 'GENERAL DELGADO', 'GENERAL DIAZ', 'GENERAL MORINIGO', 'GENERAL RESQUIN', 'GUARAMBARE', 'GUAYAIBI', 'GUAZUCUA', 'HERNANDARIAS', 'HOHENAU', 'HORQUETA', 'HUMAITA', 'ISLA PUCU', 'ISLA UMBU', 'ITA', 'ITACURUBI DE LA CORDILLERA', 'ITACURUBI DEL ROSARIO', 'ITAKYRY', 'ITANARA', 'ITAPE', 'ITAPUA POTY', 'ITAUGUA', 'ITURBE', 'J A SALDIVAR', 'JESUS', 'JOSE DOMINGO OCAMPOS', 'JUAN DE MENA', 'JUAN E. OLEARY', 'JUAN EULOGIO ESTIGARRIBIA', 'JUAN LEON MALLORQUIN', 'KATUETE', 'LA PALOMA', 'LA PASTORA', 'LA VICTORIA', 'LAMBARE', 'LAURELES', 'LEANDRO OVIEDO', 'LIMA', 'LIMOY PUEBLO', 'LIMPIO', 'LOMA GRANDE', 'LOMA PLATA', 'LORETO', 'LUQUE', 'MACIEL', 'MARIANO ROQUE ALONSO', 'MAURICIO JOSE TROCHE', 'MBARACAYU', 'MBOCAYATY', 'MBOCAYATY DEL YHAGUY', 'MCAL. ESTIGARRIBIA', 'MCAL. FRANCISCO SOLANO LOPEZ', 'MINGA GUAZU', 'MINGA PORA', 'MOISES BERTONI', 'NANAWA', 'NARANJAL', 'NATALICIO TALAVERA', 'NATALIO', 'NUEVA ALBORADA', 'NUEVA COLOMBIA', 'NUEVA ESPERANZA', 'NUEVA GERMANIA', 'NUEVA ITALIA', 'NUEVA LONDRES', 'OBLIGADO', 'PARAGUARI', 'PASO YOBAI', 'PEDRO JUAN CABALLERO', 'PILAR', 'PIRAPO', 'PIRAYU', 'PIRIBEBUY', 'POZO COLORADO', 'PUERTO FALCON', 'PUERTO PINASCO', 'QUIINDY', 'R I 3 CORRALES', 'RAUL ARSENIO OVIEDO', 'REPATRIACION', 'ROQUE GONZALEZ DE SANTA CRUZ', 'SALTO DEL GUAIRA', 'SAN ALBERTO', 'SAN ANTONIO', 'SAN BERNARDINO', 'SAN CARLOS', 'SAN COSME Y DAMIAN', 'SAN ESTANISLAO', 'SAN IGNACIO', 'SAN JOAQUIN', 'SAN JOSE DE LOS ARROYOS', 'SAN JOSE OBRERO', 'SAN JUAN BAUTISTA', 'SAN JUAN DEL PARANA', 'SAN JUAN NEPOMUCENO', 'SAN LAZARO', 'SAN LORENZO', 'SAN MIGUEL', 'SAN PATRICIO', 'SAN PEDRO', 'SAN PEDRO DEL PARANA', 'SAN PEDRO DEL YCUAMANDIYU', 'SAN RAFAEL DEL PARANA', 'SAN ROQUE GONZALEZ DE SANTACRUZ', 'SAN SALVADOR', 'SANTA ELENA', 'SANTA MARIA', 'SANTA RITA', 'SANTA ROSA', 'SANTA ROSA DEL AGUARAY', 'SANTA ROSA DEL MBUTUY', 'SANTA ROSA DEL MONDAY', 'SANTIAGO', 'SAPUCAI', 'SIMON BOLIVAR', 'TACUARAS', 'TACUATI', 'TAVAI', 'TAVAPY', 'TEBICUARY', 'TEBICUARYMI', 'TEMBIAPORA', 'TOBATI', 'TOMAS ROMERO PEREIRA', 'TRINIDAD', 'UNION', 'VALENZUELA', 'VAQUERIA', 'VILLA DEL ROSARIO', 'VILLA ELISA', 'VILLA HAYES', 'VILLA OLIVA', 'VILLALBIN', 'VILLARRICA', 'VILLETA', 'YAGUARON', 'YATAITY', 'YATAITY DEL NORTE', 'YATYTAY', 'YBY YAU', 'YBYRAROVANA', 'YBYTYMI', 'YEGROS', 'YGATIMI', 'YGUAZU', 'YHU', 'YPACARAI', 'YPANE', 'YPEJHU', 'YUTY', 'ZANJA PYTA']
| Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
A continuación tomamos las series temporales que leímos y vemos como quedan | timeSeries = pd.DataFrame()
for muni in listaMunicp:
municipio=notificaciones['distrito_nombre']==muni
notif_x_municp=notificaciones[municipio]
notif_x_municp = notif_x_municp.reset_index(drop=True)
notif_x_municp = notif_x_municp['incidencia']
notif_x_municp = notif_x_municp.replace('nan', np.nan).fillna(0.000001)
notif_x_municp = notif_x_municp.replace([np.inf, -np.inf], np.nan).fillna(0.000001)
timeSeries = timeSeries.append(notif_x_municp)
ax = sns.tsplot(ax=None, data=notif_x_municp.values, err_style="unit_traces")
plt.show()
#timeseries shape
n=217
timeSeries.shape
timeSeries.describe() | _____no_output_____ | Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
Análisis de grupos (Clustering) El Clustering o la clusterización es un proceso importante dentro del Machine learning. Este proceso desarrolla una acción fundamental que le permite a los algoritmos de aprendizaje automatizado entrenar y conocer de forma adecuada los datos con los que desarrollan sus actividades. Tiene como finalidad principal lograr el agrupamiento de conjuntos de objetos no etiquetados, para lograr construir subconjuntos de datos conocidos como Clusters. Cada cluster dentro de un grafo está formado por una colección de objetos o datos que a términos de análisis resultan similares entre si, pero que poseen elementos diferenciales con respecto a otros objetos pertenecientes al conjunto de datos y que pueden conformar un cluster independiente.  Aunque los datos no necesariamente son tan fáciles de agrupar  Métricas de similitud Para medir lo similares ( o disimilares) que son los individuos existe una enorme cantidad de índices de similaridad y de disimilaridad o divergencia. Todos ellos tienen propiedades y utilidades distintas y habrá que ser consciente de ellas para su correcta aplicación al caso que nos ocupe.La mayor parte de estos índices serán o bien, indicadores basados en la distancia (considerando a los individuos como vectores en el espacio de las variables) (en este sentido un elevado valor de la distancia entre dos individuos nos indicará un alto grado de disimilaridad entre ellos); o bien, indicadores basados en coeficientes de correlación ; o bien basados en tablas de datos de posesión o no de una serie de atributos. A continuación mostramos las funciones de: * Distancia Euclidiana* Error cuadrático medio* Fast Dynamic Time Warping* Correlación de Pearson y* Correlación de Spearman.Existen muchas otras métricas y depende de la naturaleza de cada problema decidir cuál usar. Por ejemplo, *Fast Dymanic Time Warping* es una medida de similitud diseña especialmente para series temporales. | #Euclidean
def euclidean(x, y):
r=np.linalg.norm(x-y)
if math.isnan(r):
r=1
#print(r)
return r
#RMSE
def rmse(x, y):
r=sqrt(mean_squared_error(x,y))
if math.isnan(r):
r=1
#print(r)
return r
#Fast Dynamic time warping
def fast_DTW(x, y):
r, _ = fastdtw(x, y, dist=euclidean)
if math.isnan(r):
r=1
#print(r)
return r
#Correlation
def corr(x, y):
r=np.dot(x-mean(x),y-mean(y))/((np.linalg.norm(x-mean(x)))*(np.linalg.norm(y-mean(y))))
if math.isnan(r):
r=0
#print(r)
return 1 - r
#Spearman
def scorr(x, y):
r = stats.spearmanr(x, y)[0]
if math.isnan(r):
r=0
#print(r)
return 1 - r
# compute distances using LCSS
# function for LCSS computation
# based on implementation from
# https://rosettacode.org/wiki/Longest_common_subsequence
def lcs(a, b):
lengths = [[0 for j in range(len(b)+1)] for i in range(len(a)+1)]
# row 0 and column 0 are initialized to 0 already
for i, x in enumerate(a):
for j, y in enumerate(b):
if x == y:
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j], lengths[i][j+1])
x, y = len(a), len(b)
result = lengths[x][y]
return result
def discretise(x):
return int(x * 10)
def multidim_lcs(a, b):
a = a.applymap(discretise)
b = b.applymap(discretise)
rows, dims = a.shape
lcss = [lcs(a[i+2], b[i+2]) for i in range(dims)]
return 1 - sum(lcss) / (rows * dims)
#Distancias para kmeans
#Euclidean
euclidean_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
euclidean_dist[i,j] = euclidean(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())
#RMSE
rmse_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
rmse_dist[i,j] = rmse(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())
#Corr
corr_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
corr_dist[i,j] = corr(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())
#scorr
scorr_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
scorr_dist[i,j] = scorr(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten())
#DTW
dtw_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
dtw_dist[i,j] = fast_DTW(timeSeries.iloc[i].values.flatten(), timeSeries.iloc[j].values.flatten()) | _____no_output_____ | Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
Determinar el número de clusters a formar La mayoría de las técnicas de clustering necesitan como *input* el número de clusters a formar, para eso lo que se hace es hacer una prueba con diferentes números de cluster y nos quedamos con el que dió menor error en general. Para medir ese error utilizamos **Silhouette score**. El **Silhoutte score** se puede utilizar para estudiar la distancia de separación entre los clusters resultantes, especialmente si no hay conocimiento previo de cuáles son los verdaderos grupos para cada objeto, que es el caso más común en aplicaciones reales. El Silhouette score $s(i)$ se calcula:\begin{equation}s(i)=\dfrac{b(i)-a(i)}{max(b(i),a(i))} \end{equation} Definamos $a (i)$ como la distancia media del punto $(i)$ a todos los demás puntos del grupo que se le asignó ($A$). Podemos interpretar $a (i)$ como qué tan bien se asigna el punto al grupo. Cuanto menor sea el valor, mejor será la asignación.De manera similar, definamos $b (i)$ como la distancia media del punto $(i)$ a otros puntos de su grupo vecino más cercano ($B$). El grupo ($B$) es el grupo al que no se asigna el punto $(i)$ pero su distancia es la más cercana entre todos los demás grupos. $ s (i) $ se encuentra en el rango de [-1,1]. | from yellowbrick.cluster import KElbowVisualizer
model = AgglomerativeClustering()
visualizer = KElbowVisualizer(model, k=(3,20),metric='distortion', timings=False)
visualizer.fit(rmse_dist) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure | /home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.
warnings.warn(message, FutureWarning)
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
/home/jbogado/virtualenv_3.5/lib/python3.5/site-packages/scipy/cluster/hierarchy.py:830: ClusterWarning: scipy.cluster: The symmetric non-negative hollow observation matrix looks suspiciously like an uncondensed distance matrix
return linkage(y, method='ward', metric='euclidean')
| Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
Así tenemos que son 9 los grupos que formaremos | k=9 | _____no_output_____ | Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
Técnicas de clustering K-means El objetivo de este algoritmo es el de encontrar “K” grupos (clusters) entre los datos crudos. El algoritmo trabaja iterativamente para asignar a cada “punto” (las filas de nuestro conjunto de entrada forman una coordenada) uno de los “K” grupos basado en sus características. Son agrupados en base a la similitud de sus features (las columnas). Como resultado de ejecutar el algoritmo tendremos:* Los “centroids” de cada grupo que serán unas “coordenadas” de cada uno de los K conjuntos que se utilizarán para poder etiquetar nuevas muestras.* Etiquetas para el conjunto de datos de entrenamiento. Cada etiqueta perteneciente a uno de los K grupos formados.Los grupos se van definiendo de manera “orgánica”, es decir que se va ajustando su posición en cada iteración del proceso, hasta que converge el algoritmo. Una vez hallados los centroids deberemos analizarlos para ver cuales son sus características únicas, frente a la de los otros grupos.  En la figura de arriba vemos como los datos se agrupan según el *centroid* que está representado por una estrella. El algortimo inicializa los centroides aleatoriamente y va ajustandolo en cada iteracción, los puntos que están más cerca del *centroid* son los que pertenecen al mismo grupo. Clustering jerárquico  El algortimo de clúster jerárquico agrupa los datos basándose en la distancia entre cada uno y buscando que los datos que están dentro de un clúster sean los más similares entre sí.En una representación gráfica los elementos quedan anidados en jerarquías con forma de árbol. DBScan El agrupamiento espacial basado en densidad de aplicaciones con ruido o Density-based spatial clustering of applications with noise (DBSCAN) es un algoritmo de agrupamiento de datos (data clustering). Es un algoritmo de agrupamiento basado en densidad (density-based clustering) porque encuentra un número de grupos (clusters) comenzando por una estimación de la distribución de densidad de los nodos correspondientes. DBSCAN es uno de los algoritmos de agrupamiento más usados y citados en la literatura científica.  Los puntos marcados en rojo son puntos núcleo. Los puntos amarillos son densamente alcanzables desde rojo y densamente conectados con rojo, y pertenecen al mismo clúster. El punto azul es un punto ruidoso que no es núcleo ni densamente alcanzable. | #Experimentos
print('Silhouette coefficent')
#HAC + euclidean
Z = hac.linkage(timeSeries, method='complete', metric=euclidean)
clusters = fcluster(Z, k, criterion='maxclust')
print("HAC + euclidean distance: ",silhouette_score(euclidean_dist, clusters))
#HAC + rmse
Z = hac.linkage(timeSeries, method='complete', metric=rmse)
clusters = fcluster(Z, k, criterion='maxclust')
print("HAC + rmse distance: ",silhouette_score( rmse_dist, clusters))
#HAC + corr
Z = hac.linkage(timeSeries, method='complete', metric=corr)
clusters = fcluster(Z, k, criterion='maxclust')
print("HAC + corr distance: ",silhouette_score( corr_dist, clusters))
#HAC + scorr
Z = hac.linkage(timeSeries, method='complete', metric=scorr)
clusters = fcluster(Z, k, criterion='maxclust')
print("HAC + scorr distance: ",silhouette_score( scorr_dist, clusters))
#HAC + LCSS
#Z = hac.linkage(timeSeries, method='complete', metric=multidim_lcs)
#clusters = fcluster(Z, k, criterion='maxclust')
#print("HAC + LCSS distance: ",silhouette_score( timeSeries, clusters, metric=multidim_lcs))
#HAC + DTW
Z = hac.linkage(timeSeries, method='complete', metric=fast_DTW)
clusters = fcluster(Z, k, criterion='maxclust')
print("HAC + DTW distance: ",silhouette_score( dtw_dist, clusters))
km_euc = KMeans(n_clusters=k).fit_predict(euclidean_dist)
silhouette_avg=silhouette_score( euclidean_dist, km_euc)
print("KM + euclidian distance: ",silhouette_score( euclidean_dist, km_euc))
km_rmse = KMeans(n_clusters=k).fit_predict(rmse_dist)
print("KM + rmse distance: ",silhouette_score( rmse_dist, km_rmse))
km_corr = KMeans(n_clusters=k).fit_predict(corr_dist)
print("KM + corr distance: ",silhouette_score( corr_dist, km_corr))
km_scorr = KMeans(n_clusters=k).fit_predict(scorr_dist)
print("KM + scorr distance: ",silhouette_score( scorr_dist, km_scorr))
km_dtw = KMeans(n_clusters=k).fit_predict(dtw_dist)
print("KM + dtw distance: ",silhouette_score( dtw_dist, clusters))
#Experimentos DBSCAN
DB_euc = DBSCAN(eps=3, min_samples=2).fit_predict(euclidean_dist)
silhouette_avg=silhouette_score( euclidean_dist, DB_euc)
print("DBSCAN + euclidian distance: ",silhouette_score( euclidean_dist, DB_euc))
DB_rmse = DBSCAN(eps=12, min_samples=10).fit_predict(rmse_dist)
#print("DBSCAN + rmse distance: ",silhouette_score( rmse_dist, DB_rmse))
print("DBSCAN + rmse distance: ",0.00000000)
DB_corr = DBSCAN(eps=3, min_samples=2).fit_predict(corr_dist)
print("DBSCAN + corr distance: ",silhouette_score( corr_dist, DB_corr))
DB_scorr = DBSCAN(eps=3, min_samples=2).fit_predict(scorr_dist)
print("DBSCAN + scorr distance: ",silhouette_score( scorr_dist, DB_scorr))
DB_dtw = DBSCAN(eps=3, min_samples=2).fit_predict(dtw_dist)
print("KM + dtw distance: ",silhouette_score( dtw_dist, DB_dtw)) | DBSCAN + euclidian distance: 0.8141967832004429
DBSCAN + rmse distance: 0.0
DBSCAN + corr distance: 0.4543067216391177
DBSCAN + scorr distance: 0.005463947798855316
KM + dtw distance: 0.5203731423414103
| Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
Clustering basado en propiedades Otro enfoque en el clustering es extraer ciertas propiedades de nuestros datos y hacer la agrupación basándonos en eso, el procedimiento es igual a como si estuviesemos trabajando con nuestros datos reales. | from tsfresh import extract_features
#features extraction
extracted_features = extract_features(timeSeries, column_id="indice")
extracted_features.shape
list(extracted_features.columns.values)
n=217
features = pd.DataFrame()
Mean=[]
Var=[]
aCF1=[]
Peak=[]
Entropy=[]
Cpoints=[]
for muni in listaMunicp:
municipio=notificaciones['distrito_nombre']==muni
notif_x_municp=notificaciones[municipio]
notif_x_municp = notif_x_municp.reset_index(drop=True)
notif_x_municp = notif_x_municp['incidencia']
notif_x_municp = notif_x_municp.replace('nan', np.nan).fillna(0.000001)
notif_x_municp = notif_x_municp.replace([np.inf, -np.inf], np.nan).fillna(0.000001)
#Features
mean=tsfresh.feature_extraction.feature_calculators.mean(notif_x_municp)
var=tsfresh.feature_extraction.feature_calculators.variance(notif_x_municp)
ACF1=tsfresh.feature_extraction.feature_calculators.autocorrelation(notif_x_municp,1)
peak=tsfresh.feature_extraction.feature_calculators.number_peaks(notif_x_municp,20)
entropy=tsfresh.feature_extraction.feature_calculators.sample_entropy(notif_x_municp)
cpoints=tsfresh.feature_extraction.feature_calculators.number_crossing_m(notif_x_municp,5)
Mean.append(mean)
Var.append(var)
aCF1.append(ACF1)
Peak.append(peak)
Entropy.append(entropy)
Cpoints.append(cpoints)
data_tuples = list(zip(Mean,Var,aCF1,Peak,Entropy,Cpoints))
features = pd.DataFrame(data_tuples, columns =['Mean', 'Var', 'ACF1', 'Peak','Entropy','Cpoints'])
# print the data
features
features.iloc[1]
#Distancias para kmeans
#Euclidean
f_euclidean_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(1,n):
#print("j",j)
f_euclidean_dist[i,j] = euclidean(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())
#RMSE
f_rmse_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
f_rmse_dist[i,j] = rmse(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())
#Corr
#print(features.iloc[i].values.flatten())
#print(features.iloc[j].values.flatten())
print('-------------------------------')
f_corr_dist = np.zeros((n,n))
#for i in range(0,n):
# print("i",i)
# for j in range(0,n):
# print("j",j)
# print(features.iloc[i].values.flatten())
# print(features.iloc[j].values.flatten())
# f_corr_dist[i,j] = corr(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())
#scorr
f_scorr_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
f_scorr_dist[i,j] = scorr(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())
#DTW
f_dtw_dist = np.zeros((n,n))
for i in range(0,n):
#print("i",i)
for j in range(0,n):
# print("j",j)
f_dtw_dist[i,j] = fast_DTW(features.iloc[i].values.flatten(), features.iloc[j].values.flatten())
from yellowbrick.cluster import KElbowVisualizer
model = AgglomerativeClustering()
visualizer = KElbowVisualizer(model, k=(3,50),metric='distortion', timings=False)
visualizer.fit(f_scorr_dist) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
k=9
km_euc = KMeans(n_clusters=k).fit_predict(f_euclidean_dist)
silhouette_avg=silhouette_score( f_euclidean_dist, km_euc)
print("KM + euclidian distance: ",silhouette_score( f_euclidean_dist, km_euc))
km_rmse = KMeans(n_clusters=k).fit_predict(f_rmse_dist)
print("KM + rmse distance: ",silhouette_score( f_rmse_dist, km_rmse))
#km_corr = KMeans(n_clusters=k).fit_predict(f_corr_dist)
#print("KM + corr distance: ",silhouette_score( f_corr_dist, km_corr))
#print("KM + corr distance: ",silhouette_score( f_corr_dist, 0.0))
km_scorr = KMeans(n_clusters=k).fit_predict(f_scorr_dist)
print("KM + scorr distance: ",silhouette_score( f_scorr_dist, km_scorr))
km_dtw = KMeans(n_clusters=k).fit_predict(f_dtw_dist)
print("KM + dtw distance: ",silhouette_score( f_dtw_dist, clusters))
#Experimentos HAC
HAC_euc = AgglomerativeClustering(n_clusters=k).fit_predict(f_euclidean_dist)
silhouette_avg=silhouette_score( f_euclidean_dist, HAC_euc)
print("HAC + euclidian distance: ",silhouette_score( f_euclidean_dist, HAC_euc))
HAC_rmse = AgglomerativeClustering(n_clusters=k).fit_predict(f_rmse_dist)
print("HAC + rmse distance: ",silhouette_score( f_rmse_dist, HAC_rmse))
#HAC_corr = AgglomerativeClustering(n_clusters=k).fit_predict(f_corr_dist)
#print("HAC + corr distance: ",silhouette_score( f_corr_dist,HAC_corr))
print("HAC + corr distance: ",0.0)
HAC_scorr = AgglomerativeClustering(n_clusters=k).fit_predict(f_scorr_dist)
print("HAC + scorr distance: ",silhouette_score( f_scorr_dist, HAC_scorr))
HAC_dtw = AgglomerativeClustering(n_clusters=k).fit_predict(f_dtw_dist)
print("HAC + dtw distance: ",silhouette_score( f_dtw_dist, HAC_dtw))
#Experimentos DBSCAN
DB_euc = DBSCAN(eps=3, min_samples=2).fit_predict(f_euclidean_dist)
silhouette_avg=silhouette_score( f_euclidean_dist, DB_euc)
print("DBSCAN + euclidian distance: ",silhouette_score( f_euclidean_dist, DB_euc))
DB_rmse = DBSCAN(eps=12, min_samples=10).fit_predict(f_rmse_dist)
#print("DBSCAN + rmse distance: ",silhouette_score( f_rmse_dist, DB_rmse))
#print("DBSCAN + rmse distance: ",0.00000000)
#DB_corr = DBSCAN(eps=3, min_samples=2).fit_predict(f_corr_dist)
#print("DBSCAN + corr distance: ",silhouette_score( f_corr_dist, DB_corr))
print("DBSCAN + corr distance: ",0.0)
DB_scorr = DBSCAN(eps=3, min_samples=2).fit_predict(f_scorr_dist)
print("DBSCAN + scorr distance: ",silhouette_score( f_scorr_dist, DB_scorr))
DB_dtw = DBSCAN(eps=3, min_samples=2).fit_predict(f_dtw_dist)
print("KM + dtw distance: ",silhouette_score( f_dtw_dist, DB_dtw)) | DBSCAN + euclidian distance: 0.7327015254414699
DBSCAN + corr distance: 0.0
DBSCAN + scorr distance: 0.982667657643341
KM + dtw distance: 0.6447434480812199
| Apache-2.0 | AlgoritmosClustering/ExperimentosClusters.ipynb | diegostaPy/UcomSeminario |
What is Colaboratory?Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required- Free access to GPUs- Easy sharingWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below! **Getting started**The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result: | seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day | _____no_output_____ | MIT | Welcome_To_Colaboratory.ipynb | user9990/Synthetic-data-gen |
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.Variables that you define in one cell can later be used in other cells: | seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week | _____no_output_____ | MIT | Welcome_To_Colaboratory.ipynb | user9990/Synthetic-data-gen |
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.comcreate=true).Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org). Data scienceWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing. | import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show() | _____no_output_____ | MIT | Welcome_To_Colaboratory.ipynb | user9990/Synthetic-data-gen |
Practice with conditionalsBefore we practice conditionals, let's review:To execute a command when a condition is true, use `if`:```if [condition]: [command]```To execute a command when a condition is true, and execute something else otherwise, use `if/else`:```if [condition]: [command 1]else: [command 2]```To execute a command when one condition is true, a different command if a second condition is true, and execute something else otherwise, use `if/elif/else`:```if [condition 1]: [command 1]elif [condition 2]: [command 2]else: [command 3]```Remember that commands in an `elif` will only run if the first condition is false AND the second condition is true. Let's say we are making a smoothie. In order to make a big enough smoothie, we want at least 4 cups of ingredients. | strawberries = 1
bananas = 0.5
milk = 1
# create a variable ingredients that equals the sum of all our ingredients
ingredients = strawberries + bananas + milk
# write an if statement that prints out "We have enough ingredients!" if we have at least 4 cups of ingredients
if ingredients >= 4:
print("We have enough ingredients!") | _____no_output_____ | CC-BY-4.0 | Practices/_Keys/KEY_Practice09_Conditionals.ipynb | ssorbetto/curriculum-notebooks |
The code above will let us know if we have enough ingredients for our smoothie. But, if we don't have enough ingredients, the code won't print anything. Our code would be more informative if it also told us when we didn't have enough ingredients. Next, let's write code that also lets us know when we _don't_ have enough ingredients. | # write code that prints "We have enough ingredients" if we have at least 4 cups of ingredients
# and also prints "We don't have enough ingredients" if we have less than 4 cups of ingredients
if ingredients >=4:
print("We have enough ingredients!")
else:
print("We do not have enough ingredients.") | We do not have enough ingredients.
| CC-BY-4.0 | Practices/_Keys/KEY_Practice09_Conditionals.ipynb | ssorbetto/curriculum-notebooks |
It might also be useful to know if we have exactly 4 cups of ingredients. Add to the code above so that it lets us know when we have more than enough ingredients, exactly enough ingredients, or not enough ingredients. | # write code that prints informative messages when we have more than 4 cups of ingredients,
# exactly 4 cups of ingredients, or less than 4 cups of ingredients
if ingredients > 4:
print("we have more than enough ingredients")
elif ingredients is 4:
print("we have exactly enough ingredients")
else:
print("we do not have enough ingredients") | we have exactly enough ingredients
| CC-BY-4.0 | Practices/_Keys/KEY_Practice09_Conditionals.ipynb | ssorbetto/curriculum-notebooks |
**Challenge**: Suppose our blender can only fit up to 6 cups inside. Add to the above code so that it also warns us when we have too many ingredients. | # write an if/elif/else style statement that does the following:
# prints a message when we have exactly 4 cups of ingredients saying we have exactly the right amount of ingredients
# prints a message when we have less than 4 cups of ingredients say we do not have enough
# prints a message when we have 4-6 cups of ingredients saying we have more than enough
# prints a message otherwise that says we have too many ingredients
if ingredients is 4:
print("we have exactly enough ingredients")
elif ingredients < 4:
print("we do not have enough ingredients")
elif ingredients > 4 and ingredients < 6:
print("we have more than enough ingredients")
else:
print("We have too many ingredients") | _____no_output_____ | CC-BY-4.0 | Practices/_Keys/KEY_Practice09_Conditionals.ipynb | ssorbetto/curriculum-notebooks |
Using "method chains" to create more readable code Game of Thrones example - slicing, group stats, and plottingI didn't find an off the shelf dataset to run our seminal analysis from last week, but I found [an analysis](https://www.kaggle.com/dhanushkishore/impact-of-game-of-thrones-on-us-baby-names) that explored if Game of Thrones prompted parents to start naming their children differently. The following is inspired by that, but uses pandas to acquire and wrangle our data in a "Tidyverse"-style (how R would do it) flow. | #TO USE datadotworld PACKAGE:
#1. create account at data.world, then run the next two lines:
#2. in terminal/powershell: pip install datadotworld[pandas]
#
# IF THIS DOESN'T WORK BC YOU GET AN ERROR ABOUT "CCHARDET", RUN:
# conda install -c conda-forge cchardet
# THEN RERUN: pip install datadotworld[pandas]
#
#3. in terminal/powershell: dw configure
#3a. copy in API token from data.world (get from settings > advanced)
import datadotworld as dw
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
baby_names = dw.load_dataset('nkrishnaswami/us-ssa-baby-names-national')
baby_names = baby_names.dataframes['names_ranks_counts'] | _____no_output_____ | MIT | content/03/02f_chains-Copy1.ipynb | schemesmith/ledatascifi-2021 |
Version 11. save a slice of the dataset with the names we want (using `.loc`)2. sometimes a name is used by boys and girls in the same year, so combine the counts so that we have one observation per name per year3. save the dataset and then call a plot function | # restrict by name and only keep years after 2000
somenames = baby_names.loc[( # formating inside this () is just to make it clearer to a reader
( # condition 1: one of these names, | means "or"
(baby_names['name'] == "Sansa") | (baby_names['name'] == "Daenerys") |
(baby_names['name'] == "Brienne") | (baby_names['name'] == "Cersei") | (baby_names['name'] == "Tyrion")
) # end condition 1
& # & means "and"
( # condition 2: these years
baby_names['year'] >= 2000) # end condition 2
)]
# if a name is used by F and M in a given year, combine the count variable
# Q: why is there a "reset_index"?
# A: groupby automatically turns the groups (here name and year) into the index
# reset_index makes the index simple integers 0, 1, 2 and also
# turns the the grouping variables back into normal columns
# A2: instead of reset_index, you can include `as_index=False` inside groupby!
# (I just learned that myself!)
somenames_agg = somenames.groupby(['name','year'])['count'].sum().reset_index().sort_values(['name','year'])
# plot
sns.lineplot(data=somenames_agg, hue='name',x='year',y='count')
plt.axvline(2011, 0,160,color='red') # add a line for when the show debuted
| _____no_output_____ | MIT | content/03/02f_chains-Copy1.ipynb | schemesmith/ledatascifi-2021 |
Version 2 - `query` > `loc`, for readabilitySame as V1, but step 1 uses `.query` to slice inside of `.loc`1. save a slice of the dataset with the names we want (using `.query`)2. sometimes a name is used by boys and girls in the same year, so combine the counts so that we have one observation per name per year3. save the dataset and then call a plot function | # use query instead to slice, and the rest is the same
somenames = baby_names.query('name in ["Sansa","Daenerys","Brienne","Cersei","Tyrion"] & \
year >= 2000') # this is one string with ' as the string start/end symbol. Inside, I can use
# normal quote marks for strings. Also, I can break it into multiple lines with \
somenames_agg = somenames.groupby(['name','year'])['count'].sum().reset_index().sort_values(['name','year'])
sns.lineplot(data=somenames_agg, hue='name',x='year',y='count')
plt.axvline(2011, 0,160,color='red') # add a line for when the show debuted
| _____no_output_____ | MIT | content/03/02f_chains-Copy1.ipynb | schemesmith/ledatascifi-2021 |
Version 3 - Method chaining!Method chaining: Call the object (`baby_names`) and then keep calling one method on it after another. - Python will call the methods from left to right. - There is no need to store the intermediate dataset (like `somenames` and `somenames_agg` above!) - --> Easier to read and write without "temp" objects all over the place - You can always save the dataset at an intermediate step if you need toSo, the first two steps are the same, just the methods will be chained. And then, a bonus trick to plotwithout saving.1. Slice with `.query` to GoT-related names2. Combine M and F gender counts if a name is used by both in the same year3. Plot without saving: "Pipe" in the plotting function The code below produces a plot identical to V1 and V2, **but it is unreadable. Don't try - I'm about to make this readable!** Just _one more_ iteration... | baby_names.query('name in ["Sansa","Daenerys","Brienne","Cersei","Tyrion"] & year >= 2000').groupby(['name','year'])['count'].sum().reset_index().pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')
plt.axvline(2011, 0,160,color='red') # add a line for when the show debuted | _____no_output_____ | MIT | content/03/02f_chains-Copy1.ipynb | schemesmith/ledatascifi-2021 |
To make this readable, we write a parentheses over multiple lines```( and python knows to execute the code inside as one line)```And as a result, we can write a long series of methods that is comprehensible, and if we want we can even comment on each line: | (baby_names
.query('name in ["Sansa","Daenerys","Brienne","Cersei","Tyrion"] & \
year >= 2000')
.groupby(['name','year'])['count'].sum() # for each name-year, combine M and F counts
.reset_index() # give us the column names back as they were (makes the plot call easy)
.pipe((sns.lineplot, 'data'),hue='name',x='year',y='count')
)
plt.axvline(2011, 0,160,color='red') # add a line for when the show debuted
plt.title("WOW THAT WAS EASY TO WRITE AND SHARE") | _____no_output_____ | MIT | content/03/02f_chains-Copy1.ipynb | schemesmith/ledatascifi-2021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.