repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_ts.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_ts.py | """
Unit tests for time series forecast using sklearn and hyperopt
In this file, a simulated time series dataset is used to demonstrate the
use of hpsklearn for time series forecasting problems. More specifically,
it shows: how a time series dataset can be converted into an sklearn
compatible format; the use of the time series lag selector; and exogenous
data for training machine learning models.
Briefly, the following formula is used to generate the dataset:
y[t] = a1 * y[t - 1] + a2 * y[t - 2] +
b1 * X[t, 1] + b2 * X[t, 2] + b3 * X[t, 3] +
c + e
where y is the time series, X is an exogenous dataset, a1, a2, b1, b2, b3
and c are parameters and e is an error term with the following specifications:
a1 = .666 a2 = -.333 c = -.5
b1 = 1.5 b2 = -1.5 b3 = .5
X[t, 1] ~ uniform(.5, 1.5)
X[t, 2] ~ normal(2., 1.5)
X[t, 3] ~ normal[3., 2.5]
e ~ normal(0, 1.5)
The purpose of learning is to correctly identify the lag size and the values
of the parameters.
"""
from __future__ import print_function
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from hyperopt import tpe
from hpsklearn import HyperoptEstimator, svr_linear
from hpsklearn.components import ts_lagselector
class TimeSeriesForecast(unittest.TestCase):
def setUp(self):
'''Generate a simulated dataset and define utility functions
'''
ts_size = 1000
y = np.random.normal(0, 1.5, ts_size) # white noises, i.e. errors.
a = np.array([.666, -.333])
c = -.5
b = np.array([1.5, -1.5, .5])
x1 = np.random.uniform(.5, 1.5, ts_size)
x2 = np.random.normal(2., 1.5, ts_size)
x3 = np.random.normal(3., 2.5, ts_size)
X = np.c_[x1, x2, x3]
for i in range(2, ts_size):
y[i] += np.dot(a, y[[i - 1, i - 2]])
y += np.dot(X, b) + c
self.X = X
self.y = y
self.a = a
self.b = b
self.c = c
#### A utility function to convert time series to tabular data ####
def ts_to_lagged_df(ts, col_id, index=None, lag_size=5):
'''Convert a time series to lagged dataframe
Args:
ts (DataFrame): a dataframe, series or array containing the
time series. If it is a dataframe or array,
the 2nd dimension must have a size of 1.
col_id (str): if the col id is N, all columns will be named
as N1, N2, N3, ...
index ([DatetimeIndex]): if not provided, will use the index
from the input dataframe or series,
or integers if the input is array.
lag_size ([int]): the lag size to use
Returns: a dataframe containing the lagged values in columns.
'''
# Sanity check and extract data and index.
if isinstance(ts, pd.Series):
dat = ts.values.ravel()
if index is None:
df_idx = ts.index[lag_size:]
elif isinstance(ts, pd.DataFrame):
assert ts.shape[1] == 1
dat = ts.values.ravel()
if index is None:
df_idx = ts.index[lag_size:]
else:
assert isinstance(ts, np.ndarray)
assert ts.ndim <= 2
if ts.ndim == 2:
assert ts.shape[1] == 1
dat = ts.ravel()
if index is None:
df_idx = range(len(dat) - lag_size)
if index is not None:
assert len(index) == len(dat)
df_idx = index[lag_size:]
# Convert to lagged dataframe.
lagged_df = np.concatenate(
map(lambda x: dat[range(x - 1, x - 1 - lag_size, -1)],
range(lag_size, len(dat)))
).reshape((-1, lag_size))
lagnames = map(lambda x: col_id + str(x), range(1, lag_size + 1))
lagged_df = pd.DataFrame(lagged_df, index=df_idx, columns=lagnames)
return lagged_df
#### End definition ####
self.ts_to_lagged_df = ts_to_lagged_df
def test_svm_lagselector(self):
"""
Using SVM as regressors, I hope hyperopt can help me optimize its
hyperparameters and decide the lag size as well. I also have an
exogenous dataset that I want to use to help making predictions.
"""
# Convert time series to tabular.
max_lag_size = 10
lagged_y_df = self.ts_to_lagged_df(self.y, 'L', lag_size=max_lag_size)
# dim of lagged_y_df: [990, 10].
y_target = self.y[max_lag_size:]
# Setup train/test predictors and targets.
test_size = 300
X_train = lagged_y_df[:-test_size].values
X_test = lagged_y_df[-test_size:].values
y_train = y_target[:-test_size]
y_test = y_target[-test_size:]
EX_train = self.X[max_lag_size:-test_size, :]
EX_test = self.X[-test_size:, :]
# Optimize an SVM for forecasting.
svr_opt = HyperoptEstimator(
preprocessing=[ts_lagselector('lag', 1, 10)],
ex_preprocs=[ [] ], # must explicitly set EX preprocessings.
regressor=svr_linear('svm', max_iter=1e5),
algo=tpe.suggest,
max_evals=30,
verbose=True
)
svr_opt.fit(X_train, y_train, EX_list=[EX_train],
valid_size=.2, cv_shuffle=False)
# It's generally a good idea to turn off shuffling for time series
# forecasting to avoid over-optimistic scores.
ex_n_feas = EX_train.shape[1]
bm_ = svr_opt.best_model()
print('\n==== Time series forecast with SVM and lag selectors ====',
file=sys.stderr)
print('\nThe best model found:', bm_, file=sys.stderr)
print('=' * 40, file=sys.stderr)
print('Actual parameter values\n',
'lag size:', len(self.a), '\n',
'a:', self.a, 'b:', self.b, 'c:', self.c,
file=sys.stderr)
svr_mod = bm_['learner']
a_hat = np.round(svr_mod.coef_[0, :-ex_n_feas], 3)
b_hat = np.round(svr_mod.coef_[0, -ex_n_feas:], 3)
c_hat = np.round(svr_mod.intercept_, 3)
print('-' * 4, file=sys.stderr)
print('Estimated parameter values\n',
'lag size:', len(a_hat), '\n',
'a:', a_hat, 'b:', b_hat, 'c:', c_hat,
file=sys.stderr)
print('=' * 40, file=sys.stderr)
print('Best trial validation R2:',
1 - svr_opt.trials.best_trial['result']['loss'],
file=sys.stderr)
print('Test R2:', svr_opt.score(X_test, y_test, EX_list=[EX_test]),
file=sys.stderr)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/scikit-small-ensemble/setup.py | training/helpers/scikit-small-ensemble/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
setup(
name='scikit-small-ensemble',
version='0.0.2',
author='Stewart Park',
url='https://github.com/stewartpark/scikit-small-ensemble',
author_email='hello@stewartjpark.com',
license='MIT',
install_requires=['lz4', 'joblib'],
packages=find_packages(),
zip_safe=False
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/scikit-small-ensemble/scikit_small_ensemble/scikit_ensemble.py | training/helpers/scikit-small-ensemble/scikit_small_ensemble/scikit_ensemble.py | import lz4 as zlib
import tempfile
import joblib
import os
try:
import _pickle as pickle
except ImportError:
try:
import cPickle as pickle
except ImportError:
print('cPickle is not installed. Using the builtin pickle instead.')
import pickle
class CompressedEstimators(object):
def __init__(self, estimators, ratio):
self.cutoff = int(len(estimators) * ratio)
self.estimators = [
zlib.compress(pickle.dumps(x)) if i < self.cutoff else x
for i, x in enumerate(estimators)
]
def __getitem__(self, index):
estimator = self.estimators[index]
if index < self.cutoff:
return pickle.loads(zlib.decompress(estimator))
else:
return estimator
def __len__(self):
return len(self.estimators)
class DiskEstimators(object):
def __init__(self, estimators, ratio):
self.cutoff = int(len(estimators) * ratio)
self.saved_dir = tempfile.mkdtemp()
for i in range(self.cutoff):
joblib.dump(estimators[i], os.path.join(self.saved_dir, str(i)), compress=0)
self.estimators = [
os.path.join(self.saved_dir, str(i)) if i < self.cutoff else x
for i, x in enumerate(estimators)
]
def __getitem__(self, index):
estimator = self.estimators[index]
if index < self.cutoff:
return joblib.load(estimator, mmap_mode='r')
else:
return estimator
def __len__(self):
return len(self.estimators)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/scikit-small-ensemble/scikit_small_ensemble/__init__.py | training/helpers/scikit-small-ensemble/scikit_small_ensemble/__init__.py | from __future__ import absolute_import
from scikit_small_ensemble.scikit_ensemble import CompressedEstimators, DiskEstimators
def compress(model, ratio=0.5):
if isinstance(model.estimators_, CompressedEstimators):
raise Exception("The model is already compressed.")
model.estimators_ = CompressedEstimators(model, ratio)
def memory_map(model, ratio=1.0):
if isinstance(model.estimators_, DiskEstimators):
raise Exception("The model is already memory mapped.")
model.estimators_ = DiskEstimators(model, ratio)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/devol/setup.py | training/helpers/devol/setup.py | from setuptools import setup, find_packages
setup(name='devol',
version='0.02',
description='Genetic search for CNN classifier in Keras',
url='https//github.com/joedav/devol',
author='Joe Davison',
author_email='josephddavison@gmail.com',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 2.7',
],
keywords='genetic algorithm',
packages=['devol'],
install_requires=['keras',],
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/devol/devol/__init__.py | training/helpers/devol/devol/__init__.py | from .devol import DEvol
from .genome_handler import GenomeHandler
__all__ = ['DEvol', 'GenomeHandler']
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/devol/devol/genome_handler.py | training/helpers/devol/devol/genome_handler.py | import numpy as np
import random as rand
import math
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
class GenomeHandler:
"""
Defines the configuration and handles the conversion and mutation of
individual genomes. Should be created and passed to a `DEvol` instance.
---
Genomes are represented as fixed-with lists of integers corresponding
to sequential layers and properties. A model with 2 convolutional layers
and 1 dense layer would look like:
[<conv layer><conv layer><dense layer><optimizer>]
The makeup of the convolutional layers and dense layers is defined in the
GenomeHandler below under self.convolutional_layer_shape and
self.dense_layer_shape. <optimizer> consists of just one property.
"""
def __init__(self, max_conv_layers, max_dense_layers, max_filters,
max_dense_nodes, input_shape, n_classes,
batch_normalization=True, dropout=True, max_pooling=True,
optimizers=None, activations=None):
"""
Creates a GenomeHandler according
Args:
max_conv_layers: The maximum number of convolutional layers
max_conv_layers: The maximum number of dense (fully connected)
layers, including output layer
max_filters: The maximum number of conv filters (feature maps) in a
convolutional layer
max_dense_nodes: The maximum number of nodes in a dense layer
input_shape: The shape of the input
n_classes: The number of classes
batch_normalization (bool): whether the GP should include batch norm
dropout (bool): whether the GP should include dropout
max_pooling (bool): whether the GP should include max pooling layers
optimizers (list): list of optimizers to be tried by the GP. By
default, the network uses Keras's built-in adam, rmsprop,
adagrad, and adadelta
activations (list): list of activation functions to be tried by the
GP. By default, relu and sigmoid.
"""
if max_dense_layers < 1:
raise ValueError(
"At least one dense layer is required for softmax layer"
)
if max_filters > 0:
filter_range_max = int(math.log(max_filters, 2)) + 1
else:
filter_range_max = 0
self.optimizer = optimizers or [
'adam',
'rmsprop',
'adagrad',
'adadelta'
]
self.activation = activations or [
'relu',
'sigmoid',
]
self.convolutional_layer_shape = [
"active",
"num filters",
"batch normalization",
"activation",
"dropout",
"max pooling",
]
self.dense_layer_shape = [
"active",
"num nodes",
"batch normalization",
"activation",
"dropout",
]
self.layer_params = {
"active": [0, 1],
"num filters": [2**i for i in range(3, filter_range_max)],
"num nodes": [2**i for i in range(4, int(math.log(max_dense_nodes, 2)) + 1)],
"batch normalization": [0, (1 if batch_normalization else 0)],
"activation": list(range(len(self.activation))),
"dropout": [(i if dropout else 0) for i in range(11)],
"max pooling": list(range(3)) if max_pooling else 0,
}
self.convolution_layers = max_conv_layers
self.convolution_layer_size = len(self.convolutional_layer_shape)
self.dense_layers = max_dense_layers - 1 # this doesn't include the softmax layer, so -1
self.dense_layer_size = len(self.dense_layer_shape)
self.input_shape = input_shape
self.n_classes = n_classes
def convParam(self, i):
key = self.convolutional_layer_shape[i]
return self.layer_params[key]
def denseParam(self, i):
key = self.dense_layer_shape[i]
return self.layer_params[key]
def mutate(self, genome, num_mutations):
num_mutations = np.random.choice(num_mutations)
for i in range(num_mutations):
index = np.random.choice(list(range(1, len(genome))))
if index < self.convolution_layer_size * self.convolution_layers:
if genome[index - index % self.convolution_layer_size]:
range_index = index % self.convolution_layer_size
choice_range = self.convParam(range_index)
genome[index] = np.random.choice(choice_range)
elif rand.uniform(0, 1) <= 0.01: # randomly flip deactivated layers
genome[index - index % self.convolution_layer_size] = 1
elif index != len(genome) - 1:
offset = self.convolution_layer_size * self.convolution_layers
new_index = (index - offset)
present_index = new_index - new_index % self.dense_layer_size
if genome[present_index + offset]:
range_index = new_index % self.dense_layer_size
choice_range = self.denseParam(range_index)
genome[index] = np.random.choice(choice_range)
elif rand.uniform(0, 1) <= 0.01:
genome[present_index + offset] = 1
else:
genome[index] = np.random.choice(list(range(len(self.optimizer))))
return genome
def decode(self, genome):
if not self.is_compatible_genome(genome):
raise ValueError("Invalid genome for specified configs")
model = Sequential()
offset = 0
dim = min(self.input_shape[:-1]) # keep track of smallest dimension
input_layer = True
for i in range(self.convolution_layers):
if genome[offset]:
convolution = None
if input_layer:
convolution = Convolution2D(
genome[offset + 1], (3, 3),
padding='same',
input_shape=self.input_shape
)
input_layer = False
else:
convolution = Convolution2D(
genome[offset + 1], (3, 3),
padding='same'
)
model.add(convolution)
if genome[offset + 2]:
model.add(BatchNormalization())
model.add(Activation(self.activation[genome[offset + 3]]))
model.add(Dropout(float(genome[offset + 4] / 20.0)))
max_pooling_type = genome[offset + 5]
# must be large enough for a convolution
if max_pooling_type == 1 and dim >= 5:
model.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
dim = int(math.ceil(dim / 2))
offset += self.convolution_layer_size
if not input_layer:
model.add(Flatten())
for i in range(self.dense_layers):
if genome[offset]:
dense = None
if input_layer:
dense = Dense(genome[offset + 1], input_shape=self.input_shape)
input_layer = False
else:
dense = Dense(genome[offset + 1])
model.add(dense)
if genome[offset + 2]:
model.add(BatchNormalization())
model.add(Activation(self.activation[genome[offset + 3]]))
model.add(Dropout(float(genome[offset + 4] / 20.0)))
offset += self.dense_layer_size
model.add(Dense(self.n_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=self.optimizer[genome[offset]],
metrics=["accuracy"])
return model
def genome_representation(self):
encoding = []
for i in range(self.convolution_layers):
for key in self.convolutional_layer_shape:
encoding.append("Conv" + str(i) + " " + key)
for i in range(self.dense_layers):
for key in self.dense_layer_shape:
encoding.append("Dense" + str(i) + " " + key)
encoding.append("Optimizer")
return encoding
def generate(self):
genome = []
for i in range(self.convolution_layers):
for key in self.convolutional_layer_shape:
param = self.layer_params[key]
genome.append(np.random.choice(param))
for i in range(self.dense_layers):
for key in self.dense_layer_shape:
param = self.layer_params[key]
genome.append(np.random.choice(param))
genome.append(np.random.choice(list(range(len(self.optimizer)))))
genome[0] = 1
return genome
def is_compatible_genome(self, genome):
expected_len = self.convolution_layers * self.convolution_layer_size \
+ self.dense_layers * self.dense_layer_size + 1
if len(genome) != expected_len:
return False
ind = 0
for i in range(self.convolution_layers):
for j in range(self.convolution_layer_size):
if genome[ind + j] not in self.convParam(j):
return False
ind += self.convolution_layer_size
for i in range(self.dense_layers):
for j in range(self.dense_layer_size):
if genome[ind + j] not in self.denseParam(j):
return False
ind += self.dense_layer_size
if genome[ind] not in range(len(self.optimizer)):
return False
return True
def best_genome(self, csv_path, metric="accuracy", include_metrics=True):
best = max if metric is "accuracy" else min
col = -1 if metric is "accuracy" else -2
data = np.genfromtxt(csv_path, delimiter=",")
row = list(data[:, col]).index(best(data[:, col]))
genome = list(map(int, data[row, :-2]))
if include_metrics:
genome += list(data[row, -2:])
return genome
def decode_best(self, csv_path, metric="accuracy"):
return self.decode(self.best_genome(csv_path, metric, False))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/devol/devol/devol.py | training/helpers/devol/devol/devol.py | """
Run a genetic algorithm to find an appropriate architecture for some image
classification task with Keras+TF.
To use, define a `GenomeHandler` defined in genomehandler.py. Then pass it, with
training data, to a DEvol instance to run the genetic algorithm. See the readme
for more detailed instructions.
"""
from __future__ import print_function
import random as rand
import csv
import operator
import gc
import os
from datetime import datetime
from keras.callbacks import EarlyStopping
from keras.models import load_model
import keras.backend as K
from sklearn.metrics import log_loss
import numpy as np
if K.backend() == 'tensorflow':
import tensorflow as tf
__all__ = ['DEvol']
METRIC_OPS = [operator.__lt__, operator.__gt__]
METRIC_OBJECTIVES = [min, max]
class DEvol:
"""
Object which carries out genetic search and returns top performing model
upon completion.
"""
def __init__(self, genome_handler, data_path=""):
"""
Initialize a DEvol object which carries out the training and evaluation
of a genetic search.
Args:
genome_handler (GenomeHandler): the genome handler object defining
the restrictions for the architecture search space
data_path (str): the file which the genome encodings and metric data
will be stored in
"""
self.genome_handler = genome_handler
self.datafile = data_path or (datetime.now().ctime() + '.csv')
self._bssf = -1
if os.path.isfile(data_path) and os.stat(data_path).st_size > 1:
raise ValueError(('Non-empty file %s already exists. Please change'
'file path to prevent overwritten genome data.'
% data_path))
print("Genome encoding and metric data stored at", self.datafile, "\n")
with open(self.datafile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL)
metric_cols = ["Val Loss", "Val Accuracy"]
genome = genome_handler.genome_representation() + metric_cols
writer.writerow(genome)
def set_objective(self, metric):
"""
Set the metric for optimization. Can also be done by passing to
`run`.
Args:
metric (str): either 'acc' to maximize classification accuracy, or
else 'loss' to minimize the loss function
"""
if metric == 'acc':
metric = 'accuracy'
if metric not in ['loss', 'accuracy']:
raise ValueError(('Invalid metric name {} provided - should be'
'"accuracy" or "loss"').format(metric))
self._metric = metric
self._objective = "max" if self._metric == "accuracy" else "min"
self._metric_index = 1 if self._metric == 'loss' else -1
self._metric_op = METRIC_OPS[self._objective == 'max']
self._metric_objective = METRIC_OBJECTIVES[self._objective == 'max']
def run(self, dataset, num_generations, pop_size, epochs, fitness=None,
metric='accuracy'):
"""
Run genetic search on dataset given number of generations and
population size
Args:
dataset : tuple or list of numpy arrays in form ((train_data,
train_labels), (validation_data, validation_labels))
num_generations (int): number of generations to search
pop_size (int): initial population size
epochs (int): epochs for each model eval, passed to keras model.fit
fitness (None, optional): scoring function to be applied to
population scores, will be called on a numpy array which is
a min/max scaled version of evaluated model metrics, so It
should accept a real number including 0. If left as default
just the min/max scaled values will be used.
metric (str, optional): must be "accuracy" or "loss" , defines what
to optimize during search
Returns:
keras model: best model found with weights
"""
self.set_objective(metric)
# If no validation data is given set it to None
if len(dataset) == 2:
(self.x_train, self.y_train), (self.x_test, self.y_test) = dataset
self.x_val = None
self.y_val = None
else:
(self.x_train, self.y_train), (self.x_test, self.y_test), (self.x_val, self.y_val) = dataset
# generate and evaluate initial population
members = self._generate_random_population(pop_size)
pop = self._evaluate_population(members,
epochs,
fitness,
0,
num_generations)
# evolve
for gen in range(1, num_generations):
members = self._reproduce(pop, gen)
pop = self._evaluate_population(members,
epochs,
fitness,
gen,
num_generations)
return load_model('best-model.h5')
def _reproduce(self, pop, gen):
members = []
# 95% of population from crossover
for _ in range(int(len(pop) * 0.95)):
members.append(self._crossover(pop.select(), pop.select()))
# best models survive automatically
members += pop.get_best(len(pop) - int(len(pop) * 0.95))
# randomly mutate
for imem, mem in enumerate(members):
members[imem] = self._mutate(mem, gen)
return members
def _evaluate(self, genome, epochs):
model = self.genome_handler.decode(genome)
loss, accuracy = None, None
fit_params = {
'x': self.x_train,
'y': self.y_train,
'validation_split': 0.1,
'epochs': epochs,
'verbose': 1,
'callbacks': [
EarlyStopping(monitor='val_loss', patience=1, verbose=1)
]
}
if self.x_val is not None:
fit_params['validation_data'] = (self.x_val, self.y_val)
try:
model.fit(**fit_params)
loss, accuracy = model.evaluate(self.x_test, self.y_test, verbose=0)
except Exception as e:
loss, accuracy = self._handle_broken_model(model, e)
self._record_stats(model, genome, loss, accuracy)
return model, loss, accuracy
def _record_stats(self, model, genome, loss, accuracy):
with open(self.datafile, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
row = list(genome) + [loss, accuracy]
writer.writerow(row)
met = loss if self._metric == 'loss' else accuracy
if (self._bssf is -1 or
self._metric_op(met, self._bssf) and
accuracy is not 0):
try:
os.remove('best-model.h5')
except OSError:
pass
self._bssf = met
model.save('best-model.h5')
def _handle_broken_model(self, model, error):
del model
n = self.genome_handler.n_classes
loss = log_loss(np.concatenate(([1], np.zeros(n - 1))), np.ones(n) / n)
accuracy = 1 / n
gc.collect()
if K.backend() == 'tensorflow':
K.clear_session()
tf.reset_default_graph()
print('An error occurred and the model could not train:')
print(error)
print(('Model assigned poor score. Please ensure that your model'
'constraints live within your computational resources.'))
return loss, accuracy
def _evaluate_population(self, members, epochs, fitness, igen, ngen):
fit = []
for imem, mem in enumerate(members):
self._print_evaluation(imem, len(members), igen, ngen)
res = self._evaluate(mem, epochs)
v = res[self._metric_index]
del res
fit.append(v)
fit = np.array(fit)
self._print_result(fit, igen)
return _Population(members, fit, fitness, obj=self._objective)
def _print_evaluation(self, imod, nmod, igen, ngen):
fstr = '\nmodel {0}/{1} - generation {2}/{3}:\n'
print(fstr.format(imod + 1, nmod, igen + 1, ngen))
def _generate_random_population(self, size):
return [self.genome_handler.generate() for _ in range(size)]
def _print_result(self, fitness, generation):
result_str = ('Generation {3}:\t\tbest {4}: {0:0.4f}\t\taverage:'
'{1:0.4f}\t\tstd: {2:0.4f}')
print(result_str.format(self._metric_objective(fitness),
np.mean(fitness),
np.std(fitness),
generation + 1, self._metric))
def _crossover(self, genome1, genome2):
cross_ind = rand.randint(0, len(genome1))
child = genome1[:cross_ind] + genome2[cross_ind:]
return child
def _mutate(self, genome, generation):
# increase mutations as program continues
num_mutations = max(3, generation // 4)
return self.genome_handler.mutate(genome, num_mutations)
class _Population(object):
def __len__(self):
return len(self.members)
def __init__(self, members, fitnesses, score, obj='max'):
self.members = members
scores = fitnesses - fitnesses.min()
if scores.max() > 0:
scores /= scores.max()
if obj == 'min':
scores = 1 - scores
if score:
self.scores = score(scores)
else:
self.scores = scores
self.s_fit = sum(self.scores)
def get_best(self, n):
combined = [(self.members[i], self.scores[i])
for i in range(len(self.members))]
sorted(combined, key=(lambda x: x[1]), reverse=True)
return [x[0] for x in combined[:n]]
def select(self):
dart = rand.uniform(0, self.s_fit)
sum_fits = 0
for i in range(len(self.members)):
sum_fits += self.scores[i]
if sum_fits >= dart:
return self.members[i]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/devol/example/demo.py | training/helpers/devol/example/demo.py | from __future__ import print_function
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from devol import DEvol, GenomeHandler
# **Prepare dataset**
# This problem uses mnist, a handwritten digit classification problem used
# for many introductory deep learning examples. Here, we load the data and
# prepare it for use by the GPU. We also do a one-hot encoding of the labels.
K.set_image_data_format("channels_last")
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32') / 255
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
dataset = ((x_train, y_train), (x_test, y_test))
# **Prepare the genome configuration**
# The `GenomeHandler` class handles the constraints that are imposed upon
# models in a particular genetic program. See `genome-handler.py`
# for more information.
genome_handler = GenomeHandler(max_conv_layers=6,
max_dense_layers=2, # includes final dense layer
max_filters=256,
max_dense_nodes=1024,
input_shape=x_train.shape[1:],
n_classes=10)
# **Create and run the genetic program**
# The next, and final, step is create a `DEvol` and run it. Here we specify
# a few settings pertaining to the genetic program. The program
# will save each genome's encoding, as well as the model's loss and
# accuracy, in a `.csv` file printed at the beginning of program.
# The best model is returned decoded and with `epochs` training done.
devol = DEvol(genome_handler)
model = devol.run(dataset=dataset,
num_generations=20,
pop_size=20,
epochs=5)
print(model.summary())
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/setup.py | training/helpers/autobazaar/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
install_requires = [
'baytune>=0.2.1,<0.3',
'mlblocks>=0.3.2,<0.4',
'mlprimitives>=0.2.2,<0.3',
'scikit-learn>=0.20,<0.21',
'mit-d3m>=0.2.1,<0.3',
'numpy>=1.16.5,<1.17',
'gitpython>=3.1.1,<4'
]
tests_require = [
'pytest>=3.4.2',
'pytest-cov>=2.6.0',
]
setup_requires = [
'pytest-runner>=2.11.1',
]
development_requires = [
# general
'pip>=9.0.1',
'bumpversion>=0.5.3,<0.6',
'watchdog>=0.8.3,<0.11',
# docs
'm2r>=0.2.0,<0.3',
'autodocsumm>=0.1.10,<0.2',
'Sphinx>=1.7.1,<3',
'sphinx_rtd_theme>=0.2.4,<0.5',
# style check
'flake8>=3.7.7,<4',
'isort>=4.3.4,<5',
# fix style issues
'autoflake>=1.1,<2',
'autopep8>=1.4.3,<2',
# distribute on PyPI
'twine>=1.10.0,<4',
'wheel>=0.30.0',
# Advanced testing
'coverage>=4.5.1,<6',
'tox>=2.9.1,<4',
]
setup(
author='MIT Data To AI Lab',
author_email='dailabmit@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
description='The Machine Learning Bazaar',
entry_points={
'console_scripts': [
'abz=autobazaar.__main__:main'
]
},
extras_require={
'dev': development_requires + tests_require,
'tests': tests_require,
},
include_package_data=True,
install_requires=install_requires,
license='MIT license',
long_description=readme + '\n\n' + history,
long_description_content_type='text/markdown',
keywords='automl machine learning hyperparameters tuning classification regression autobazaar',
name='autobazaar',
packages=find_packages(include=['autobazaar', 'autobazaar.*']),
python_requires='>=3.5',
setup_requires=setup_requires,
test_suite='tests',
tests_require=tests_require,
url='https://github.com/HDI-project/AutoBazaar',
version='0.2.1-dev',
zip_safe=False,
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/autobazaar/search.py | training/helpers/autobazaar/autobazaar/search.py | # -*- coding: utf-8 -*-
"""AutoBazaar Search Module.
This module contains the PipelineSearcher, which is the class that
contains the main logic of the Auto Machine Learning process.
"""
import gc
import itertools
import json
import logging
import os
import signal
import warnings
from collections import defaultdict
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from btb import HyperParameter
from btb.tuning import GP, GPEi, Uniform
from mit_d3m.loaders import get_loader
from mlblocks.mlpipeline import MLPipeline
from sklearn.model_selection import KFold, StratifiedKFold
from autobazaar.pipeline import ABPipeline
from autobazaar.utils import ensure_dir, make_dumpable, remove_dots, restore_dots
LOGGER = logging.getLogger(__name__)
warnings.filterwarnings("ignore", category=DeprecationWarning)
TEMPLATES_DIR = os.path.join(os.path.dirname(__file__), 'templates')
TRIVIAL_PIPELINE_METHOD = {
'classification': 'mode',
'regression': 'median',
'collaborativeFiltering': 'median',
'graphMatching': 'mode',
}
TUNERS = {
'gp': GP,
'gpei': GPEi,
'uniform': Uniform
}
PARAM_TYPES = {
'str': 'string',
}
class StopSearch(KeyboardInterrupt):
pass
class UnsupportedProblem(Exception):
pass
def log_times(name, append=False):
def decorator(wrapped):
def wrapper(self, *args, **kwargs):
start = datetime.utcnow()
result = wrapped(self, *args, **kwargs)
elapsed = (datetime.utcnow() - start).total_seconds()
if append:
attribute = getattr(self, name, None)
if attribute is None:
attribute = list()
setattr(self, name, attribute)
attribute.append(elapsed)
else:
setattr(self, name, elapsed)
return result
return wrapper
return decorator
class PipelineSearcher(object):
"""PipelineSearcher class.
This class is responsible for searching the best pipeline to solve a
given dataset and problem.
"""
def __init__(self, pipelines_dir, db=None, test_id=None, tuner_type='gp',
cv_splits=5, random_state=0):
self._db = db
self._pipelines_dir = pipelines_dir
ensure_dir(self._pipelines_dir)
self._cv_splits = cv_splits
self._random_state = random_state
self._tuner_type = tuner_type
self._tuner_class = TUNERS[tuner_type]
self._test_id = test_id
def _dump_pipelines(self):
LOGGER.info('Dumping best pipelines')
dumped = list()
gc.collect()
for details in self._to_dump:
pipeline = details['pipeline']
if not pipeline.dumped:
pipeline.fit(self.data_params)
mlpipeline = pipeline.pipeline
LOGGER.info("Dumping pipeline %s: %s", pipeline.id, pipeline.pipeline)
LOGGER.info("Hyperparameters: %s", mlpipeline.get_hyperparameters())
pipeline.dump(self._pipelines_dir)
details['pipeline'] = pipeline.id
dumped.append(details)
gc.collect()
else:
LOGGER.info("Skipping already dumped pipeline %s", pipeline.id)
return dumped
def _set_for_dump(self, pipeline):
self._to_dump.append({
'elapsed': (datetime.utcnow() - self.start_time).total_seconds(),
'iterations': len(self.pipelines) - 1,
'cv_score': self.best_pipeline.score,
'rank': self.best_pipeline.rank,
'pipeline': pipeline,
'load_time': self.load_time,
'trivial_time': self.trivial_time,
'cv_time': np.sum(self.cv_times),
})
def _save_pipeline(self, pipeline):
pipeline_dict = pipeline.to_dict(True)
pipeline_dict['_id'] = pipeline.id
pipeline_dict['ts'] = datetime.utcnow()
self.pipelines.append(pipeline_dict)
if self._db:
insertable = remove_dots(pipeline_dict)
insertable.pop('problem_doc')
insertable['dataset'] = self.dataset_id
insertable['tuner_type'] = self._tuner_type
insertable['test_id'] = self._test_id
self._db.pipelines.insert_one(insertable)
@log_times('trivial_time')
def _build_trivial_pipeline(self):
LOGGER.info("Building the Trivial pipeline")
try:
method = TRIVIAL_PIPELINE_METHOD.get(self.task_type)
pipeline_dict = {
'name': 'trivial.{}'.format(method),
'primitives': ['mlprimitives.custom.trivial.TrivialPredictor'],
'init_params': {
'mlprimitives.custom.trivial.TrivialPredictor': {
'method': method
}
}
}
pipeline = ABPipeline(pipeline_dict, self.loader, self.metric, self.problem_doc)
pipeline.cv_score(self.data_params.X, self.data_params.y,
self.data_params.context, cv=self.kf)
self._save_pipeline(pipeline)
return pipeline
except Exception:
# if the Trivial pipeline crashes we can do nothing,
# so we just log the error and move on.
LOGGER.exception("The Trivial pipeline crashed.")
def _load_template_json(self, template_name):
if template_name.endswith('.json'):
template_filename = template_name
name = template_name[:-5]
else:
name = template_name
template_name = template_name.replace('/', '.') + '.json'
template_filename = os.path.join(TEMPLATES_DIR, template_name)
if os.path.exists(template_filename):
with open(template_filename, 'r') as template_file:
template_dict = json.load(template_file)
template_dict['name'] = name
return template_dict
def _find_template(self, template_name):
match = {
'metadata.name': template_name
}
cursor = self._db.pipelines.find(match)
templates = list(cursor.sort('metadata.insert_ts', -1).limit(1))
if templates:
template = templates[0]
template['name'] = template.pop('metadata')['name']
template['template'] = str(template.pop('_id'))
return restore_dots(template)
def _load_template(self, template_name):
if self._db:
template = self._find_template(template_name)
if template:
return template
return self._load_template_json(template_name)
def _get_template(self, template_name=None):
if template_name:
template = self._load_template(template_name)
if not template:
raise ValueError("Template {} not found".format(template_name))
primitives = '\n'.join(template['primitives'])
LOGGER.info('Using template %s:\n%s', template_name, primitives)
return template
else:
problem_type = [
self.data_modality,
self.task_type,
self.task_subtype
]
for levels in reversed(range(1, 4)):
# Try the following options:
# modality/task/subtask/default
# modality/task/default
# modality/default
template_name = '/'.join(problem_type[:levels] + ['default'])
template = self._load_template(template_name)
if template:
primitives = '\n'.join(template['primitives'])
LOGGER.info('Using template %s:\n%s', template_name, primitives)
return template
# Nothing has been found for this modality/task/subtask combination
problem_type = '/'.join(problem_type)
LOGGER.error('Problem type not supported %s', problem_type)
raise UnsupportedProblem(problem_type)
@log_times('cv_times', append=True)
def _cv_pipeline(self, params=None):
pipeline_dict = self.template_dict.copy()
if params:
pipeline_dict['hyperparameters'] = params
pipeline = ABPipeline(pipeline_dict, self.loader, self.metric, self.problem_doc)
X = self.data_params.X
y = self.data_params.y
context = self.data_params.context
try:
pipeline.cv_score(X, y, context, cv=self.kf)
except KeyboardInterrupt:
raise
except Exception:
LOGGER.exception("Crash cross validating pipeline %s", pipeline.id)
return None
return pipeline
def _create_tuner(self, pipeline):
# Build an MLPipeline to get the tunables and the default params
mlpipeline = MLPipeline.from_dict(self.template_dict)
tunable_hyperparameters = mlpipeline.get_tunable_hyperparameters()
tunables = []
tunable_keys = []
for block_name, params in tunable_hyperparameters.items():
for param_name, param_details in params.items():
key = (block_name, param_name)
param_type = param_details['type']
param_type = PARAM_TYPES.get(param_type, param_type)
if param_type == 'bool':
param_range = [True, False]
else:
param_range = param_details.get('range') or param_details.get('values')
value = HyperParameter(param_type, param_range)
tunables.append((key, value))
tunable_keys.append(key)
# Create the tuner
LOGGER.info('Creating %s tuner', self._tuner_class.__name__)
self.tuner = self._tuner_class(tunables)
if pipeline:
try:
# Add the default params and the score obtained by them to the tuner.
default_params = defaultdict(dict)
for block_name, params in pipeline.pipeline.get_hyperparameters().items():
for param, value in params.items():
key = (block_name, param)
if key in tunable_keys:
if value is None:
raise ValueError('None value is not supported')
default_params[key] = value
if pipeline.rank is not None:
self.tuner.add(default_params, 1 - pipeline.rank)
except ValueError:
pass
def _set_checkpoint(self):
next_checkpoint = self.checkpoints.pop(0)
interval = next_checkpoint - self.current_checkpoint
self._stop_time = datetime.utcnow() + timedelta(seconds=interval)
LOGGER.info("Setting %s seconds checkpoint in %s seconds: %s",
next_checkpoint, interval, self._stop_time)
signal.alarm(interval)
self.current_checkpoint = next_checkpoint
def _checkpoint(self, signum=None, frame=None, final=False):
signal.alarm(0)
checkpoint_name = 'Final' if final else str(self.current_checkpoint) + ' seconds'
LOGGER.info("%s checkpoint reached", checkpoint_name)
try:
if self.best_pipeline:
self._set_for_dump(self.best_pipeline)
except KeyboardInterrupt:
raise
except Exception:
LOGGER.exception("Checkpoint dump crashed")
if final or not bool(self.checkpoints):
self.current_checkpoint = None
# LOGGER.warn("Stopping Search")
# raise StopSearch()
else:
self._set_checkpoint()
def _check_stop(self):
if self._stop_time and self._stop_time < datetime.utcnow():
LOGGER.warn("Stop Time already passed. Stopping Search!")
raise StopSearch()
def _setup_search(self, d3mds, budget, checkpoints, template_name):
self.start_time = datetime.utcnow()
self.cv_times = list()
# Problem variables
self.problem_id = d3mds.get_problem_id()
self.task_type = d3mds.get_task_type()
self.task_subtype = d3mds.problem.get_task_subtype()
# TODO: put this in mit-d3m loaders
if self.task_type == 'vertex_classification':
self.task_type = 'vertex_nomination'
self.problem_doc = d3mds.problem_doc
# Dataset variables
self.dataset_id = d3mds.dataset_id
self.data_modality = d3mds.get_data_modality()
# TODO: put this in mit-d3m loaders
if self.data_modality == 'edgeList':
self.data_modality = 'graph'
self.metric = d3mds.get_metric()
self.loader = get_loader(self.data_modality, self.task_type)
self.best_pipeline = None
self.pipelines = []
self.checkpoints = sorted(checkpoints or [])
self.current_checkpoint = 0
self._to_dump = []
if not self.checkpoints and budget is None:
self.budget = 1
else:
self.budget = budget
self.template_dict = self._get_template(template_name)
LOGGER.info("Running TA2 Search")
LOGGER.info("Problem Id: %s", self.problem_id)
LOGGER.info(" Data Modality: %s", self.data_modality)
LOGGER.info(" Task type: %s", self.task_type)
LOGGER.info(" Task subtype: %s", self.task_subtype)
LOGGER.info(" Metric: %s", self.metric)
LOGGER.info(" Checkpoints: %s", self.checkpoints)
LOGGER.info(" Budget: %s", self.budget)
@log_times('load_time')
def _load_data(self, d3mds):
self.data_params = self.loader.load(d3mds)
def _setup_cv(self):
if isinstance(self.data_params.y, pd.Series):
min_samples = self.data_params.y.value_counts().min()
else:
y = self.data_params.y
min_samples = y.groupby(list(y.columns)).size().min()
if self.task_type == 'classification' and min_samples >= self._cv_splits:
self.kf = StratifiedKFold(
n_splits=self._cv_splits,
shuffle=True,
random_state=self._random_state
)
else:
self.kf = KFold(
n_splits=self._cv_splits,
shuffle=True,
random_state=self._random_state
)
def search(self, d3mds, template_name=None, budget=None, checkpoints=None):
try:
self._setup_search(d3mds, budget, checkpoints, template_name)
self._load_data(d3mds)
self._setup_cv()
# Build the trivial pipeline
self.best_pipeline = self._build_trivial_pipeline()
# Do not continue if there is no budget or no fit data
if budget == 0 or not len(self.data_params.X):
raise StopSearch()
# Build the default pipeline
default_pipeline = self._cv_pipeline()
if default_pipeline:
self.best_pipeline = default_pipeline
self._save_pipeline(default_pipeline)
if budget == 1:
raise StopSearch()
elif budget is not None:
iterator = range(budget - 1)
else:
iterator = itertools.count() # infinite range
# Build the tuner
self._create_tuner(default_pipeline)
LOGGER.info("Starting the tuning loop")
if self.checkpoints:
signal.signal(signal.SIGALRM, self._checkpoint)
self._set_checkpoint()
else:
self._stop_time = None
for iteration in iterator:
self._check_stop()
proposed_params = self.tuner.propose()
params = make_dumpable(proposed_params)
LOGGER.info("Cross validating pipeline %s", iteration + 1)
pipeline = self._cv_pipeline(params)
if pipeline and (pipeline.rank is not None):
self.tuner.add(proposed_params, 1 - pipeline.rank)
LOGGER.info("Saving pipeline %s: %s", iteration + 1, pipeline.id)
self._save_pipeline(pipeline)
if not self.best_pipeline or (pipeline.rank < self.best_pipeline.rank):
self.best_pipeline = pipeline
LOGGER.info('Best pipeline so far: %s; rank: %s, score: %s',
self.best_pipeline, self.best_pipeline.rank,
self.best_pipeline.score)
else:
self.tuner.add(proposed_params, -1000000)
except KeyboardInterrupt:
pass
finally:
signal.alarm(0)
if self.current_checkpoint:
self._checkpoint(final=True)
elif self.best_pipeline and not checkpoints:
self._set_for_dump(self.best_pipeline)
if self.best_pipeline:
LOGGER.info('Best pipeline for problem %s found: %s; rank: %s, score: %s',
self.problem_id, self.best_pipeline,
self.best_pipeline.rank, self.best_pipeline.score)
else:
LOGGER.info('No pipeline could be found for problem %s', self.problem_id)
return self._dump_pipelines()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/autobazaar/__main__.py | training/helpers/autobazaar/autobazaar/__main__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""AutoBazaar Command Line Module."""
import argparse
import gc
import json
import os
import shutil
import socket
import sys
import traceback
import warnings
from datetime import datetime
import cloudpickle
import pandas as pd
from mit_d3m import metrics
from mit_d3m.dataset import D3MDS
from mit_d3m.db import get_db
from mit_d3m.stats import get_stats
from mit_d3m.utils import logging_setup, make_abs
import autobazaar
from autobazaar.search import TUNERS, PipelineSearcher
from autobazaar.utils import encode_score, make_keras_picklable
warnings.filterwarnings(action='ignore')
VERSION = autobazaar.get_version()
def _load_targets(datasets_dir, dataset, problem):
score_phase = 'SCORE'
if problem:
score_phase += '_' + problem
score_dir = os.path.join(datasets_dir, dataset, score_phase)
csv_path = os.path.join(score_dir, 'targets.csv')
if not os.path.exists(csv_path):
csv_path = os.path.join(score_dir, 'dataset_SCORE', 'tables', 'learningData.csv')
return pd.read_csv(csv_path, index_col='d3mIndex')
def _get_metric(problem_path):
problem_schema = os.path.join(problem_path, 'problemDoc.json')
with open(problem_schema, 'r') as f:
problem_doc = json.load(f)
problem_metrics = problem_doc['inputs']['performanceMetrics']
if len(problem_metrics) > 1:
raise Exception("Wrong number of metrics")
return metrics.METRICS_DICT[problem_metrics[0]['metric']]
def _get_dataset_paths(datasets_dir, dataset, phase, problem):
if problem:
full_phase = phase + '_' + problem
else:
full_phase = phase
root_dir = os.path.join(datasets_dir, dataset, full_phase)
dataset_path = os.path.join(root_dir, 'dataset_' + phase)
problem_path = os.path.join(root_dir, 'problem_' + phase)
return dataset_path, problem_path
def _search_pipeline(dataset, problem, template, input_dir, output_dir,
budget, checkpoints, splits, db, tuner_type, test_id):
dataset_path, problem_path = _get_dataset_paths(input_dir, dataset, 'TRAIN', problem)
d3mds = D3MDS(dataset_path, problem_path)
searcher = PipelineSearcher(
output_dir,
cv_splits=splits,
db=db,
tuner_type=tuner_type,
test_id=test_id
)
return searcher.search(d3mds, template, budget=budget, checkpoints=checkpoints)
def _test_pipeline(dataset, problem, pipeline_id, input_dir, output_dir):
dataset_path, problem_path = _get_dataset_paths(input_dir, dataset, 'TEST', problem)
pipeline_path = os.path.join(output_dir, '{}.pkl'.format(pipeline_id))
with open(pipeline_path, 'rb') as pipeline_pkl:
pipeline = cloudpickle.load(pipeline_pkl)
print('Executing best pipeline {}'.format(pipeline))
d3mds = D3MDS(dataset_path, problem_path)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
predictions = pipeline.predict(d3mds)
return predictions
def _score_predictions(dataset, problem, predictions, input_dir):
dataset_path, problem_path = _get_dataset_paths(input_dir, dataset, 'TEST', problem)
metric = _get_metric(problem_path)
predictions = predictions.set_index('d3mIndex')
targets = _load_targets(input_dir, dataset, problem)[predictions.columns]
if len(targets.columns) > 1 or len(predictions.columns) > 1:
raise Exception("I don't know how to handle these")
if any(targets.index != predictions.index):
raise Exception("Different indexes cannot be compared")
targets = targets.iloc[:, 0]
predictions = predictions.iloc[:, 0]
score = encode_score(metric, targets, predictions)
print("Score: {}".format(score))
summary = {'predictions': predictions, 'targets': targets}
print(pd.DataFrame(summary).describe())
return score
def _format_exception(e):
error = '{}'.format(e.__class__.__name__)
str_e = str(e)
if str_e:
error += ' - ' + str_e
return error
def _box_print(message):
length = len(message) + 10
print(length * '#')
print('#### {} ####'.format(message))
print(length * '#')
def _insert_test(args, dataset):
insert_ts = datetime.utcnow()
document = {
'test_id': args.test_id,
'dataset': dataset,
'timeout': args.timeout,
'checkpoints': args.checkpoints,
'budget': args.budget,
'template': args.template,
'status': 'running',
'insert_ts': insert_ts,
'update_ts': insert_ts,
'version': VERSION,
'hostname': socket.gethostname(),
'tuner_type': args.tuner_type,
'splits': args.splits,
}
args.db.tests.insert_one(document)
def _update_test(args, dataset, error, step):
query = {
'test_id': args.test_id,
'dataset': dataset
}
update = {
'$set': {
'status': 'error' if error else 'done',
'error': error,
'step': step,
'update_ts': datetime.utcnow()
}
}
args.db.tests.update_one(query, update)
def _insert_test_result(args, result):
document = result.copy()
document['test_id'] = args.test_id
document['insert_ts'] = datetime.utcnow()
args.db.test_results.insert_one(document)
def _score_dataset(dataset, args):
start_ts = datetime.utcnow()
if args.db:
_insert_test(args, dataset)
result_base = {
'dataset': dataset,
'score': None,
'elapsed': None,
'iterations': None,
'error': None,
'step': None,
'load_time': None,
'trivial_time': None,
'cv_time': None,
'cv_score': None,
'rank': None
}
results = []
step = None
error = None
try:
step = 'SEARCH'
_box_print('Searching {}'.format(dataset))
# cleanup
if not args.keep:
shutil.rmtree(args.output, ignore_errors=True)
search_results = _search_pipeline(
dataset, args.problem, args.template, args.input, args.output, args.budget,
args.checkpoints, args.splits, args.db, args.tuner_type, args.test_id
)
gc.collect()
for search_result in search_results or []:
result = result_base.copy()
result.update(search_result)
results.append(result)
pipeline = result['pipeline']
try:
step = 'TEST'
_box_print('Executing {}'.format(dataset))
predictions = _test_pipeline(dataset, args.problem, pipeline,
args.input, args.output)
step = 'SCORE'
_box_print('Scoring {}'.format(dataset))
score = _score_predictions(dataset, args.problem,
predictions, args.input)
result['score'] = score
gc.collect()
except Exception as e:
error = _format_exception(e)
print("Scoring pipeline {} for dataset {} failed on step {} with error {}"
.format(pipeline, dataset, step, error))
traceback.print_exc()
result['error'] = error
result['step'] = step
if args.db:
_insert_test_result(args, result)
except Exception as e:
error = _format_exception(e)
print("Dataset {} failed on step {} with error {}".format(dataset, step, error))
traceback.print_exc()
result_base['step'] = step
result_base['error'] = error
result_base['elapsed'] = (datetime.utcnow() - start_ts).total_seconds()
results.append(result_base)
if args.db:
_update_test(args, dataset, error, step)
return results
def _prepare_search(args):
make_keras_picklable()
if not args.datasets and not args.all:
print('Please provide at least one dataset name or add the --all option')
sys.exit(1)
args.datasets = _get_datasets(args)
if args.db:
args.db = get_db(
config=args.db_config,
database=args.db_name,
host=args.db_host,
port=args.db_port,
user=args.db_user,
password=args.db_password
)
if args.checkpoints:
args.checkpoints = [int(c) for c in args.checkpoints.split(',')]
elif args.timeout:
args.checkpoints = [args.timeout]
if args.test_id is None:
args.test_id = datetime.utcnow().strftime('%Y%m%d%H%M%S%f')
def _score_datasets(args):
if args.report and os.path.exists(args.report):
report = pd.read_csv(args.report)
else:
report = pd.DataFrame(columns=['dataset'])
for dataset, row in args.datasets.iterrows():
dataset_score = report[report.dataset == dataset]
if dataset_score.empty or dataset_score.score.isnull().values[0]:
if not dataset_score.empty:
# clean-up
report = report[report.dataset != dataset].copy()
scores = _score_dataset(dataset, args)
if scores:
scores = pd.DataFrame(scores)
scores = scores.merge(pd.DataFrame([row]), left_on='dataset', right_index=True)
report = report.append(scores, ignore_index=True, sort=False)
report = report.reindex(REPORT_COLUMNS, axis=1)
if args.report:
report.to_csv(args.report, index=False)
return report
def _search(args):
_prepare_search(args)
print("{} - Processing Datasets: {}".format(args.test_id, args.datasets.index.values))
report = _score_datasets(args)
report = report.reindex(REPORT_COLUMNS, axis=1)
columns = REPORT_COLUMNS[1:]
print(report.set_index('dataset').to_string(columns=columns))
def _get_datasets(args):
if args.all:
datasets = [
d for d in os.listdir(args.input)
if os.path.isdir(os.path.join(args.input, d))
]
else:
datasets = args.datasets
exclude = getattr(args, 'exclude', None) or []
datasets = [dataset for dataset in datasets if dataset not in exclude]
try:
summary = get_stats(datasets, args.input)
except KeyError:
print("No matching datasets found")
sys.exit(1)
summary = summary.set_index('dataset').reindex(datasets)
summary = summary[~summary.data_modality.isnull()]
for field in ['data_modality', 'task_type', 'task_subtype']:
value = getattr(args, field)
if value:
summary = summary[summary[field] == value]
if summary.empty:
print("No matching datasets found")
sys.exit(1)
return summary
REPORT_COLUMNS = [
'dataset',
'pipeline',
'score',
'rank',
'cv_score',
'metric',
'data_modality',
'task_type',
'task_subtype',
'elapsed',
'iterations',
'load_time',
'trivial_time',
'cv_time',
'error',
'step'
]
def _list(args):
args.all = True
datasets = _get_datasets(args)
datasets = datasets.reset_index().sort_values('dataset').set_index('dataset')
columns = [
'data_modality', 'task_type', 'task_subtype', 'metric', 'size_human', 'train_samples'
]
datasets = datasets.reindex(columns, axis=1)
if args.report:
print("Storing datasets as {}".format(args.report))
datasets[columns].to_csv(args.report, index=True)
else:
print(datasets.to_string(columns=columns, index=True))
class ArgumentParser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
return arg_line.split()
def _path_type(string):
try:
return make_abs(string)
except ValueError:
error = "Not a valid path: '{0}'.".format(string)
raise argparse.ArgumentTypeError(error)
def _get_parser():
# Logging
logging_args = ArgumentParser(add_help=False)
logging_args.add_argument('-v', '--verbose', action='count', default=0)
logging_args.add_argument('-l', '--logfile')
# Report
report_args = ArgumentParser(add_help=False)
report_args.add_argument('-r', '--report', type=_path_type,
help='Store results in the given CVS file.')
# Dataset Selection
dataset_args = ArgumentParser(add_help=False)
dataset_args.add_argument('-i', '--input', default='input', type=_path_type,
help='Input datasets folder. Defaults to `input`.')
dataset_args.add_argument('-o', '--output', type=_path_type,
help='Output pipelines folder. Defaults to `output`.',
default='output')
dataset_args.add_argument('-p', '--problem', default='',
help='Problem suffix. Only needed if the dataset has more than one.')
dataset_args.add_argument('-M', '--data-modality', type=str,
help='Only process datasets of the given Data Modality.')
dataset_args.add_argument('-T', '--task-type', type=str,
help='Only process datasets of the given Task type')
dataset_args.add_argument('-S', '--task-subtype', type=str,
help='Only process datasets of the given Task Subtype')
# Search Configuration
search_args = ArgumentParser(add_help=False)
search_args.add_argument('-b', '--budget', type=int,
help='If given, maximum number tuning iterations to perform.')
search_args.add_argument('-s', '--splits', type=int, default=5,
help='Number of Cross Validation Folds. Defaults to 5')
search_args.add_argument('-c', '--checkpoints',
help=('Comma separated list of time checkpoints in seconds where '
'the best pipeline so far will be dumped and stored.'))
search_args.add_argument('-t', '--timeout', type=int,
help='Timeout in seconds. Ignored if checkpoints are given.')
search_args.add_argument('-u', '--tuner-type', default='gp', choices=TUNERS.keys(),
help='Type of tuner to use. Defaults to "gp"')
search_args.add_argument('--template',
help='Template to use. If not given, use the most appropriate one.')
search_args.add_argument('-e', '--exclude', nargs='+',
help='Exclude these datasets. Useful in combination with --all.')
search_args.add_argument('-a', '--all', action='store_true',
help='Process all the datasets found in the input folder.')
search_args.add_argument('-k', '--keep', action='store_true',
help='Keep previous results in the output folder.')
search_args.add_argument('--test-id', help='test_id associated with this run.')
search_args.add_argument('datasets', nargs='*',
help='Datasets to process. Ignored if --all use used.')
# Backend configuration
db_args = ArgumentParser(add_help=False)
db_args.add_argument('--db', action='store_true',
help='Use a MongoDB backend to store the results.')
db_args.add_argument('--db-config', help='MongoDB configuraiton JSON file.')
db_args.add_argument('--db-host', default='localhost')
db_args.add_argument('--db-port', default=27017, type=int)
db_args.add_argument('--db-name', default='autobazaar')
db_args.add_argument('--db-user')
db_args.add_argument('--db-password')
parser = ArgumentParser(
prog='autobazaar',
description='AutoBazaar Experiments Suite',
fromfile_prefix_chars='@',
parents=[logging_args]
)
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=VERSION))
subparsers = parser.add_subparsers(title='command', help='Command to execute')
parser.set_defaults(command=None)
list_ = subparsers.add_parser('list', parents=[logging_args, dataset_args, report_args],
help='List the available datasets that match the conditions.')
list_.set_defaults(command=_list)
search_parents = [
logging_args,
dataset_args,
search_args,
report_args,
db_args
]
search_ = subparsers.add_parser('search', parents=search_parents,
help='Search the best pipeline for the given datasets.')
search_.set_defaults(command=_search)
return parser
def main():
parser = _get_parser()
args = parser.parse_args()
if not args.command:
parser.print_help()
parser.exit()
logging_setup(args.verbose, args.logfile)
gc.enable()
args.command(args)
if __name__ == '__main__':
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/autobazaar/utils.py | training/helpers/autobazaar/autobazaar/utils.py | # -*- coding: utf-8 -*-
import os
import tempfile
from collections import defaultdict
from datetime import datetime
import numpy as np
from sklearn.preprocessing import LabelEncoder
def encode_score(scorer, expected, observed):
if expected.dtype == 'object':
le = LabelEncoder()
expected = le.fit_transform(expected)
observed = le.transform(observed)
return scorer(expected, observed)
def ensure_dir(directory):
"""Create diretory if it does not exist yet."""
if not os.path.exists(directory):
os.makedirs(directory)
def make_dumpable(params, datetimes=False):
"""Get nested dicts of params to allow json dumping.
Also work around this: https://github.com/HDI-Project/BTB/issues/79
And convert numpy types to primitive types.
Optionally dump datetimes to ISO format.
Args:
params (dict):
Params dictionary with tuples as keys.
datetimes (bool):
whether to convert datetimes to ISO strings or not.
Returns:
dict:
Dumpable params as a tree of dicts and nested sub-dicts.
"""
nested_params = defaultdict(dict)
for (block, param), value in params.items():
if isinstance(value, np.integer):
value = int(value)
elif isinstance(value, np.floating):
value = float(value)
elif isinstance(value, np.ndarray):
value = value.tolist()
elif isinstance(value, np.bool_):
value = bool(value)
elif value == 'None':
value = None
elif datetimes and isinstance(value, datetime):
value = value.isoformat()
nested_params[block][param] = value
return nested_params
def _walk(document, transform):
if not isinstance(document, dict):
return document
new_doc = dict()
for key, value in document.items():
if isinstance(value, dict):
value = _walk(value, transform)
elif isinstance(value, list):
value = [_walk(v, transform) for v in value]
new_key, new_value = transform(key, value)
new_doc[new_key] = new_value
return new_doc
def remove_dots(document):
"""Replace dots with dashes in all the keys from the dictionary."""
return _walk(document, lambda key, value: (key.replace('.', '-'), value))
def restore_dots(document):
"""Replace dashes with dots in all the keys from the dictionary."""
return _walk(document, lambda key, value: (key.replace('-', '.'), value))
def make_keras_picklable():
"""Make the keras models picklable."""
import keras.models # noqa: lazy import slow dependencies
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
return {'model_str': model_str}
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
try:
model = keras.models.load_model(fd.name)
except ValueError:
from keras.applications import mobilenet
from keras.utils.generic_utils import CustomObjectScope
scope = {
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D
}
with CustomObjectScope(scope):
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls = keras.models.Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/autobazaar/pipeline.py | training/helpers/autobazaar/autobazaar/pipeline.py | # -*- coding: utf-8 -*-
"""AutoBazaar Pipeline Module."""
import json
import logging
import os
import random
import uuid
from collections import Counter
import cloudpickle
import numpy as np
import pandas as pd
from mit_d3m.loaders import get_loader
from mit_d3m.metrics import METRICS_DICT
from mlblocks import MLPipeline
from autobazaar.utils import encode_score
LOGGER = logging.getLogger(__name__)
class ABPipeline(object):
"""AutoBazaar Pipeline Class."""
def _extract_hyperparameters(self, preprocessing_primitives):
block_names_count = Counter()
block_names = list()
for primitive in preprocessing_primitives:
block_names_count.update([primitive])
block_count = block_names_count[primitive]
block_names.append('{}#{}'.format(primitive, block_count))
pre_params = dict()
hyperparameters = self.pipeline_dict['hyperparameters'].copy()
for block_name in block_names:
block_params = hyperparameters.pop(block_name, None)
if block_params:
pre_params[block_name] = block_params
return pre_params, hyperparameters
def __init__(self, pipeline_dict, loader, metric, problem_doc):
self.pipeline_dict = pipeline_dict
self.name = pipeline_dict['name']
self.template = pipeline_dict.get('template')
self.loader = loader
self.metric = metric
self.problem_doc = problem_doc
preprocessing_blocks = self.pipeline_dict.get('preprocessing_blocks')
if preprocessing_blocks:
preprocessing = pipeline_dict.copy()
preprocessing_primitives = preprocessing['primitives'][:preprocessing_blocks]
preprocessing['primitives'] = preprocessing_primitives
self._preprocessing = preprocessing
tunable = pipeline_dict.copy()
tunable_primitives = tunable['primitives'][preprocessing_blocks:]
tunable['primitives'] = tunable_primitives
self._tunable = tunable
pre_params, tun_params = self._extract_hyperparameters(preprocessing_primitives)
self._preprocessing['hyperparameters'] = pre_params
self._tunable['hyperparameters'] = tun_params
else:
self._preprocessing = None
self._tunable = pipeline_dict
self.id = str(uuid.uuid4())
self.cv_scores = list()
self.rank = None
self.score = None
self.dumped = False
self.fitted = False
self.pipeline = MLPipeline.from_dict(pipeline_dict)
def fit(self, data_params):
"""Fit the pipeline on the given params."""
X, y = data_params.X, data_params.y
self.pipeline = MLPipeline.from_dict(self.pipeline_dict)
self.pipeline.fit(X, y, **data_params.context)
self.fitted = True
def predict(self, d3mds):
"""Get predictions for the given D3MDS."""
data_params = self.loader.load(d3mds)
predictions = self.pipeline.predict(data_params.X, **data_params.context)
out_df = pd.DataFrame()
out_df['d3mIndex'] = data_params.y.index
out_df[d3mds.target_column] = predictions
return out_df
def _get_split(self, X, y, indexes):
if hasattr(X, 'iloc'):
X = X.iloc[indexes]
else:
X = X[indexes]
if y is not None:
if hasattr(y, 'iloc'):
y = y.iloc[indexes]
else:
y = y[indexes]
return X, y
def _get_score(self):
score = np.mean(self.cv_scores)
std = np.std(self.cv_scores)
if 'Error' in self.metric:
rank = score
elif score <= 1:
rank = 1 - score
else:
raise ValueError("Found a score > 1 in a maximization problem: {}".format(score))
return score, std, rank
def preprocess(self, X, y, context):
"""Execute the preprocessing steps of the pipeline."""
if self._preprocessing:
LOGGER.info("Executing preprocessing pipeline")
pipeline = MLPipeline.from_dict(self._preprocessing)
pipeline.fit(X, y, **context)
return pipeline.predict(X, **context)
else:
LOGGER.info("No preprocessing steps found")
return X
def cv_score(self, X, y, context, metric=None, cv=None):
"""Cross Validate this pipeline."""
scorer = METRICS_DICT[metric or self.metric]
LOGGER.debug('CV Scoring pipeline %s', self)
self.cv_scores = list()
for fold, (train_index, test_index) in enumerate(cv.split(X, y)):
LOGGER.debug('Scoring fold: %s', fold)
X_train, y_train = self._get_split(X, y, train_index)
X_test, y_test = self._get_split(X, y, test_index)
pipeline = MLPipeline.from_dict(self._tunable)
pipeline.fit(X_train, y_train, **context)
pred = pipeline.predict(X_test, **context)
score = encode_score(scorer, y_test, pred)
self.cv_scores.append(score)
LOGGER.debug('Fold %s score: %s', fold, score)
score, std, rank = self._get_score()
LOGGER.debug('CV score: %s +/- %s; rank: %s', score, std, rank)
self.score = score
self.std = std
self.rank = rank + random.random() * 1.e-12 # to avoid collisions
def to_dict(self, problem_doc=False):
"""Return the details of this pipeline in a dict."""
pipeline_dict = self.pipeline.to_dict().copy()
pipeline_dict.update({
'id': self.id,
'name': self.name,
'template': self.template,
'loader': self.loader.to_dict(),
'score': self.score,
'rank': self.rank,
'metric': self.metric
})
if problem_doc:
pipeline_dict['problem_doc'] = self.problem_doc
return pipeline_dict
def __repr__(self):
return 'ABPipeline({})'.format(json.dumps(self.to_dict(), indent=4))
@classmethod
def from_dict(cls, pipeline_dict):
"""Load a pipeline from a dict."""
pipeline_dict = pipeline_dict.copy()
loader = get_loader(**pipeline_dict.pop('loader'))
metric = pipeline_dict['metric']
problem_doc = pipeline_dict.pop('problem_doc')
return cls(pipeline_dict, loader, metric, problem_doc)
def dump(self, output_dir, rank=None):
"""Dump this pipeline using pickle."""
if rank is None:
rank = self.rank
LOGGER.info('Dumping pipeline with rank %s: %s', rank, self.id)
self.dumped = True
pickle_path = os.path.join(output_dir, '{}.pkl'.format(self.id))
with open(pickle_path, "wb") as pickle_file:
LOGGER.info("Outputting pipeline %s", pickle_file.name)
cloudpickle.dump(self, pickle_file)
json_path = os.path.join(output_dir, '{}.json'.format(self.id))
self.pipeline.save(json_path)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/autobazaar/__init__.py | training/helpers/autobazaar/autobazaar/__init__.py | # -*- coding: utf-8 -*-
"""
AutoBazaar top module.
AutoBazaar is an AutoML system created to execute the experiments associated with the
[The Machine Learning Bazaar Paper: Harnessing the ML Ecosystem for Effective System
Development](https://arxiv.org/pdf/1905.08942.pdf)
by the [Human-Data Interaction (HDI) Project](https://hdi-dai.lids.mit.edu/) at LIDS, MIT.
* Free software: MIT license
* Documentation: https://HDI-Project.github.io/AutoBazaar
"""
import os
import git
__author__ = 'MIT Data To AI Lab'
__copyright__ = 'Copyright (c) 2019, MIT Data To AI Lab'
__email__ = 'dailabmit@gmail.com'
__license__ = 'MIT'
__version__ = '0.2.1-dev'
def _get_commit():
try:
base_path = os.path.dirname(__file__)
repo = git.Repo(base_path, search_parent_directories=True)
commit = repo.commit().hexsha[0:7]
if repo.is_dirty(untracked_files=False):
commit += '*'
return commit
except git.InvalidGitRepositoryError:
return None
def get_version():
commit = _get_commit()
if commit:
return '{} - {}'.format(__version__, commit)
return __version__
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autobazaar/docs/conf.py | training/helpers/autobazaar/docs/conf.py | # -*- coding: utf-8 -*-
#
# AutoBazaar documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 6 13:06:48 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sphinx_rtd_theme # For read the docs theme
import autobazaar
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'm2r',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'autodocsumm',
]
autodoc_default_options = {
'autosummary': True,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AutoBazaar'
slug = 'autobazaar'
title = project + ' Documentation'
copyright = '2019, MIT Data to AI Lab'
author = 'Carles Sala, Micah Smith, Max Kanter, Kalyan Veeramachaneni'
description = 'The Machine Learning Bazaar Experiments'
user = 'HDI-project'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = autobazaar.__version__
# The full version, including alpha/beta/rc tags.
release = autobazaar.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['.py', '_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Readthedocs additions
html_context = {
'display_github': True,
'github_user': user,
'github_repo': project,
'github_version': 'master',
'conf_py_path': '/docs/',
}
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'collapse_navigation': False,
'display_version': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'images/dai-logo-white.ico'
html_logo = 'images/dai-logo-white-200.png'
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = slug + 'doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [(
master_doc,
slug + '.tex',
title,
author,
'manual'
)]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
master_doc,
slug,
title,
[author],
1
)]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
master_doc,
slug,
title,
author,
slug,
description,
'Miscellaneous'
)]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/train_autobazaar_old.py | training/helpers/archived/train_autobazaar_old.py | import warnings, datetime, uuid, os, json, shutil, pickle, random
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
import pandas as pd
import csv, io
'''
Taken from the example here:
https://github.com/HDI-Project/BTB/blob/master/notebooks/BTBSession%20-%20Example.ipynb
Note that autobazaar is used as the primary model trainer for BTB sessions.
https://github.com/HDI-Project/AutoBazaar
Tutorial:
https://hdi-project.github.io/AutoBazaar/readme.html#install
Data: Must be formatted (https://github.com/mitll/d3m-schema/blob/master/documentation/datasetSchema.md)
Case 1: Single table
In many openml and other tabular cases, all the learning data is contained in a single tabular file. In this case, an example dataset will look like the following.
─ 196_autoMpg
├── 196_autoMpg_dataset
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
├── 196_autoMpg_problem
│ ├── dataSplits.csv
│ └── problemDoc.json
├── SCORE
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ ├── problem_TEST
│ │ ├── dataSplits.csv
│ │ └── problemDoc.json
│ └── targets.csv
├── TEST
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ └── problem_TEST
│ ├── dataSplits.csv
│ └── problemDoc.json
└── TRAIN
├── dataset_TRAIN
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
└── problem_TRAIN
├── dataSplits.csv
└── problemDoc.json
'''
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def convert_(X_train, y_train, labels):
feature_list=labels
data=dict()
print(len(feature_list))
print(len(X_train[0]))
# time.sleep(50)
indices=list()
for i in range(len(X_train)):
indices.append(i)
for j in range(len(feature_list)-1):
if i > 0:
# print(data[feature_list[j]])
try:
# print(feature_list[j])
# print(data)
# print(X_train[i][j])
# print(data[feature_list[j]])
# time.sleep(2)
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
# print(data)
# time.sleep(50)
# print(str(i)+'-i')
# print(j)
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class_']=y_train
data['d3mIndex']=indices
data=pd.DataFrame(data, columns = list(data))
print(data)
print(list(data))
# time.sleep(500)
return data
def split_data(data):
# get training and testing numbers
train_num=int(0.80*len(data))
test_num=len(data)-train_num
print('TRAINING SAMPLES')
print(train_num)
print('TESTING SAMPLES')
print(test_num)
# now write the rows
rows=list()
train_count=0
test_count=0
train_rows=list()
test_rows=list()
i=0
totalcount=train_num+test_num
# randomize the numbers of i
i_list=list()
for i in range(totalcount):
i_list.append(i)
random.shuffle(i_list)
for i in range(len(i_list)):
if train_num > train_count:
rows.append([i_list[i], 'TRAIN', 0, 0])
train_rows.append(i_list[i])
i=i+1
train_count=train_count+1
print(train_count)
# print(train_num)
elif test_num > test_count:
rows.append([i_list[i], 'TEST', 0, 0])
test_rows.append(i_list[i])
i=i+1
test_count=test_count+1
# print(len(rows))
print([test_num, test_count, train_num, train_count])
# field names
fields = ['d3mIndex', 'type', 'repeat', 'fold']
# name of csv file
filename = "dataSplits.csv"
# writing to csv file
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(fields)
# writing the data rows
csvwriter.writerows(rows)
# now split this data into another csv
print(train_rows)
train_data=data.iloc[train_rows,:]
train_data.to_csv('train.csv')
print(test_rows)
test_data=data.iloc[test_rows,:]
test_data.to_csv('test.csv')
return filename
def create_dataset_json(foldername, trainingcsv):
# create the template .JSON file necessary for the featurization
dataset_name=foldername
dataset_id="%s_dataset"%(foldername)
columns=list()
colnames=list(pd.read_csv(trainingcsv))
for i in range(len(colnames)):
if colnames[i] != 'class_':
columns.append({"colIndex": i,
"colName": colnames[i],
"colType": "real",
"role": ["attribute"]})
else:
columns.append({"colIndex": i,
"colName": 'class_',
"colType": "real",
"role": ["suggestedTarget"]})
i1=i
data={"about":
{
"datasetID": dataset_id,
"datasetName":dataset_name,
"humanSubjectsResearch": False,
"license":"CC",
"datasetSchemaVersion":"3.0",
"redacted":False
},
"dataResources":
[
{
"resID": "0",
"resPath": 'tables/learningData.csv',
"resType": "table",
"resFormat": ["text/csv"],
"isCollection": False,
"columns":columns,
}
]
}
filename='datasetDoc.json'
jsonfile=open(filename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return dataset_id, filename, i1
def create_problem_json(mtype, folder,i1):
if mtype == 'classification':
data = {
"about": {
"problemID": "%s_problem"%(folder),
"problemName": "%s_problem"%(folder),
"problemDescription": "not applicable",
"taskType": "classification",
"taskSubType": "multiClass",
"problemVersion": "1.0",
"problemSchemaVersion": "3.0"
},
"inputs": {
"data": [
{
"datasetID": "%s"%(folder),
"targets": [
{
"targetIndex": 0,
"resID": "0",
"colIndex": i1,
"colName": 'class_',
}
]
}
],
"dataSplits": {
"method": "holdOut",
"testSize": 0.2,
"stratified": True,
"numRepeats": 0,
"randomSeed": 42,
"splitsFile": "dataSplits.csv"
},
"performanceMetrics": [
{
"metric": "accuracy"
}
]
},
"expectedOutputs": {
"predictionsFile": "predictions.csv"
}
}
elif mtype == 'regression':
data={"about": {
"problemID": "%s_problem"%(folder),
"problemName": "%s_problem"%(folder),
"problemDescription": "not applicable",
"taskType": "regression",
"taskSubType": "univariate",
"problemVersion": "1.0",
"problemSchemaVersion": "3.0"
},
"inputs": {
"data": [
{
"datasetID": "%s_dataset"%(folder),
"targets": [
{
"targetIndex": 0,
"resID": "0",
"colIndex": i1,
"colName": "class_"
}
]
}
],
"dataSplits": {
"method": "holdOut",
"testSize": 0.2,
"stratified": True,
"numRepeats": 0,
"randomSeed": 42,
"splitsFile": "dataSplits.csv"
},
"performanceMetrics": [
{
"metric": "meanSquaredError"
}
]
},
"expectedOutputs": {
"predictionsFile": "predictions.csv"
}
}
jsonfile=open('problemDoc.json','w')
json.dump(data,jsonfile)
jsonfile.close()
def train_autobazaar(alldata, labels, mtype, jsonfile, problemtype, default_features, settings):
print('installing package configuration')
# curdir=os.getcwd()
# os.chdir(prev_dir(curdir)+'/training/helpers/autobazaar')
# os.system('make install-develop')
# os.chdir(curdir)
# create file names
model_name=jsonfile[0:-5]+'_'+str(default_features).replace("'",'').replace('"','')+'_btb'
if mtype == 'c':
model_name=model_name+'_classification'
mtype='classification'
elif mtype == 'r':
model_name=model_name+'_regression'
mtype='regression'
folder=model_name
jsonfilename=model_name+'.json'
csvfilename=model_name+'.csv'
model_name=model_name+'.pickle'
# this should be the model directory
hostdir=os.getcwd()
# open a sample featurization
labels_dir=prev_dir(hostdir)+'/train_dir/'+jsonfilename.split('_')[0]
os.chdir(labels_dir)
listdir=os.listdir()
features_file=''
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
features_file=listdir[i]
# load features file and get labels
labels_=json.load(open(features_file))['features'][problemtype][default_features]['labels']
os.chdir(hostdir)
# make a temporary folder for the training session
try:
os.mkdir(folder)
os.chdir(folder)
except:
shutil.rmtree(folder)
os.mkdir(folder)
os.chdir(folder)
# make the data arrays
print('creating training data...')
all_data = convert_(alldata, labels, labels_)
all_data.to_csv(csvfilename,index=False)
data=pd.read_csv(csvfilename)
# create required .JSON files
dataset_id, dataset_filename, i1=create_dataset_json(folder, csvfilename)
problem_filename=create_problem_json(mtype, folder, i1)
split_data(data)
# get the current directory
abz_dir=os.getcwd()
# make necessary directories
# now create proper tree structure
'''
─ 196_autoMpg
├── 196_autoMpg_dataset
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
├── 196_autoMpg_problem
│ ├── dataSplits.csv
│ └── problemDoc.json
├── SCORE
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ ├── problem_TEST
│ │ ├── dataSplits.csv
│ │ └── problemDoc.json
│ └── targets.csv
├── TEST
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ └── problem_TEST
│ ├── dataSplits.csv
│ └── problemDoc.json
└── TRAIN
├── dataset_TRAIN
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
└── problem_TRAIN
├── dataSplits.csv
└── problemDoc.json
'''
dataset_folder=folder+'_dataset'
problem_folder=folder+'_problem'
# make datasets folder
os.mkdir(dataset_folder)
os.chdir(dataset_folder)
os.mkdir('tables')
shutil.copy(abz_dir+'/datasetDoc.json', os.getcwd()+'/datasetDoc.json')
shutil.copy(abz_dir+'/'+csvfilename, os.getcwd()+'/tables/'+csvfilename)
os.chdir('tables')
os.rename(csvfilename, 'learningData.csv')
# make problem folder
os.chdir(abz_dir)
os.mkdir(problem_folder)
os.chdir(problem_folder)
shutil.copy(abz_dir+'/problemDoc.json', os.getcwd()+'/problemDoc.json')
shutil.copy(abz_dir+'/dataSplits.csv', os.getcwd()+'/dataSplits.csv')
os.chdir(abz_dir)
os.mkdir('TEST')
os.chdir('TEST')
os.mkdir('dataset_TEST')
shutil.copy(abz_dir+'/'+dataset_folder+'/datasetDoc.json', os.getcwd()+'/dataset_TEST/datasetDoc.json')
os.mkdir('problem_TEST')
shutil.copy(abz_dir+'/'+problem_folder+'/problemDoc.json',os.getcwd()+'/problem_TEST/problemDoc.json')
shutil.copy(abz_dir+'/'+problem_folder+'/dataSplits.csv', os.getcwd()+'/problem_TEST/dataSplits.csv')
os.chdir('dataset_TEST')
os.mkdir('tables')
shutil.copy(abz_dir+'/test.csv', os.getcwd()+'/tables/test.csv')
os.chdir('tables')
os.rename('test.csv', 'learningData.csv')
os.chdir(abz_dir)
os.mkdir('TRAIN')
os.chdir('TRAIN')
os.mkdir('dataset_TRAIN')
os.chdir('dataset_TRAIN')
os.mkdir('tables')
shutil.copy(abz_dir+'/datasetDoc.json', os.getcwd()+'/datasetDoc.json')
shutil.copy(abz_dir+'/train.csv', os.getcwd()+'/tables/train.csv')
os.chdir('tables')
os.rename('train.csv','learningData.csv')
os.chdir(abz_dir+'/TRAIN')
os.mkdir('problem_TRAIN')
shutil.copy(abz_dir+'/'+problem_folder+'/problemDoc.json',os.getcwd()+'/problem_TRAIN/problemDoc.json')
shutil.copy(abz_dir+'/'+problem_folder+'/dataSplits.csv', os.getcwd()+'/problem_TRAIN/dataSplits.csv')
os.chdir(abz_dir)
os.mkdir('SCORE')
os.chdir('SCORE')
shutil.copytree(abz_dir+'/TEST/dataset_TEST',os.getcwd()+'/dataset_SCORE')
shutil.copytree(abz_dir+'/TEST/problem_TEST', os.getcwd()+'/problem_SCORE')
os.chdir(hostdir)
# this works for really any input configuration - regression or classificatoin (as this is covered in config files)
try:
os.mkdir('input')
except:
pass
# remove if file exists
try:
shutil.copytree(folder, os.getcwd()+'/input/'+folder)
except:
shutil.rmtree(os.getcwd()+'/input/'+folder)
shutil.copytree(folder, os.getcwd()+'/input/'+folder)
os.system('abz search %s -c20,30,40 -b10'%(folder))
# now go to output folder
os.chdir('output')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
g=json.load(open(listdir[i]))
# os.remove(listdir[i])
elif listdir[i].endswith('.pkl'):
picklefile=folder+'.pickle'
shutil.copy(os.getcwd()+'/'+listdir[i],os.getcwd()+'/'+folder+'.pickle')
model=pickle.load(open(picklefile, 'rb'))
# load some training data in
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, train_size=0.750, test_size=0.250)
# make some predictions and get accuracy measure
if mtype=='classification':
y_pred=model.predict(X_test)
accuracy=accuracy_score(y_test, y_pred)
data={'sample type': problemtype,
'feature_set':default_features,
'model name':picklefile,
'training params': g,
'accuracy': float(accuracy),
'model_type': 'autobazaar_%s'%(mtype),
'settings': settings,
'training params': g}
elif mtype=='regression':
y_pred=model.predict(X_test)
mse_error=mean_squared_error(y_true, y_pred)
data={'sample type': problemtype,
'feature_set':default_features,
'model name':picklefile,
'training params': g,
'mse_error': float(mse_error),
'model_type': 'autobazaar_%s'%(mtype),
'settings': settings,
'training params': g}
jsonfile=open(folder+'.json','w')
json.dump(data,jsonfile)
jsonfile.close()
# now get all them transferred
os.chdir(hostdir)
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
# copy necessary files
shutil.copy(hostdir+'/output/'+picklefile, os.getcwd()+'/'+picklefile)
shutil.copy(hostdir+'/output/'+jsonfile, os.getcwd()+'/'+jsonfile)
# delete inactive directories
os.chdir(hostdir)
shutil.rmtree('input')
shutil.rmtree('output')
shutil.rmtree(folder)
# go back to model directory
os.chdir(problemtype+'_models')
# get variables
model_dir=hostdir+'/%s_models/'%(problemtype)
model_name=picklefile
# finally done! Whew - what a lot of data transformations here to get this to work
return model_name, model_dir
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/train_autobazaar.py | training/helpers/archived/train_autobazaar.py | import warnings, datetime, uuid, os, json, shutil, pickle, random
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
import pandas as pd
import csv, io, time
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
print('installing package configuration')
curdir=os.getcwd()
os.chdir(prev_dir(curdir)+'/training/helpers/autobazaar')
os.system('make install-develop')
os.chdir(curdir)
'''
Taken from the example here:
https://github.com/HDI-Project/BTB/blob/master/notebooks/BTBSession%20-%20Example.ipynb
Note that autobazaar is used as the primary model trainer for BTB sessions.
https://github.com/HDI-Project/AutoBazaar
Tutorial:
https://hdi-project.github.io/AutoBazaar/readme.html#install
Data: Must be formatted (https://github.com/mitll/d3m-schema/blob/master/documentation/datasetSchema.md)
Case 1: Single table
In many openml and other tabular cases, all the learning data is contained in a single tabular file. In this case, an example dataset will look like the following.
─ 196_autoMpg
├── 196_autoMpg_dataset
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
├── 196_autoMpg_problem
│ ├── dataSplits.csv
│ └── problemDoc.json
├── SCORE
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ ├── problem_TEST
│ │ ├── dataSplits.csv
│ │ └── problemDoc.json
│ └── targets.csv
├── TEST
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ └── problem_TEST
│ ├── dataSplits.csv
│ └── problemDoc.json
└── TRAIN
├── dataset_TRAIN
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
└── problem_TRAIN
├── dataSplits.csv
└── problemDoc.json
'''
def convert_(X_train, y_train, labels):
feature_list=labels
data=dict()
print(len(feature_list))
print(len(X_train[0]))
# time.sleep(50)
indices=list()
for i in range(len(X_train)):
indices.append(i)
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class_']=y_train
data['d3mIndex']=indices
data=pd.DataFrame(data, columns = list(data))
print(data)
print(list(data))
return data
def split_data(data):
# get training and testing numbers
train_num=int(0.80*len(data))
test_num=len(data)-train_num
print('TRAINING SAMPLES')
print(train_num)
print('TESTING SAMPLES')
print(test_num)
# now write the rows
rows=list()
train_count=0
test_count=0
train_rows=list()
test_rows=list()
i=0
totalcount=train_num+test_num
# randomize the numbers of i
i_list=list()
for i in range(totalcount):
i_list.append(i)
random.shuffle(i_list)
for i in range(len(i_list)):
if train_num > train_count:
rows.append([i_list[i], 'TRAIN', 0, 0])
train_rows.append(i_list[i])
i=i+1
train_count=train_count+1
print(train_count)
# print(train_num)
elif test_num > test_count:
rows.append([i_list[i], 'TEST', 0, 0])
test_rows.append(i_list[i])
i=i+1
test_count=test_count+1
# print(len(rows))
print([test_num, test_count, train_num, train_count])
# field names
fields = ['d3mIndex', 'type', 'repeat', 'fold']
# name of csv file
filename = "dataSplits.csv"
# writing to csv file
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(fields)
# writing the data rows
csvwriter.writerows(rows)
# now split this data into another csv
print(train_rows)
train_data=data.iloc[train_rows,:]
train_data.to_csv('train.csv')
print(test_rows)
test_data=data.iloc[test_rows,:]
test_data.to_csv('test.csv')
return filename
def create_dataset_json(foldername, trainingcsv):
# create the template .JSON file necessary for the featurization
dataset_name=foldername
dataset_id="%s_dataset"%(foldername)
columns=list()
colnames=list(pd.read_csv(trainingcsv))
for i in range(len(colnames)):
if colnames[i] != 'class_':
columns.append({"colIndex": i,
"colName": colnames[i],
"colType": "real",
"role": ["attribute"]})
else:
columns.append({"colIndex": i,
"colName": 'class_',
"colType": "real",
"role": ["suggestedTarget"]})
i1=i
data={"about":
{
"datasetID": dataset_id,
"datasetName":dataset_name,
"humanSubjectsResearch": False,
"license":"CC",
"datasetSchemaVersion":"3.0",
"redacted":False
},
"dataResources":
[
{
"resID": "0",
"resPath": 'tables/learningData.csv',
"resType": "table",
"resFormat": ["text/csv"],
"isCollection": False,
"columns":columns,
}
]
}
filename='datasetDoc.json'
jsonfile=open(filename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return dataset_id, filename, i1
def create_problem_json(mtype, folder,i1):
if mtype == 'c':
data = {
"about": {
"problemID": "%s_problem"%(folder),
"problemName": "%s_problem"%(folder),
"problemDescription": "not applicable",
"taskType": "classification",
"taskSubType": "multiClass",
"problemVersion": "1.0",
"problemSchemaVersion": "3.0"
},
"inputs": {
"data": [
{
"datasetID": "%s"%(folder),
"targets": [
{
"targetIndex": 0,
"resID": "0",
"colIndex": i1,
"colName": 'class_',
}
]
}
],
"dataSplits": {
"method": "holdOut",
"testSize": 0.2,
"stratified": True,
"numRepeats": 0,
"randomSeed": 42,
"splitsFile": "dataSplits.csv"
},
"performanceMetrics": [
{
"metric": "accuracy"
}
]
},
"expectedOutputs": {
"predictionsFile": "predictions.csv"
}
}
elif mtype == 'r':
data={"about": {
"problemID": "%s_problem"%(folder),
"problemName": "%s_problem"%(folder),
"problemDescription": "not applicable",
"taskType": "regression",
"taskSubType": "univariate",
"problemVersion": "1.0",
"problemSchemaVersion": "3.0"
},
"inputs": {
"data": [
{
"datasetID": "%s_dataset"%(folder),
"targets": [
{
"targetIndex": 0,
"resID": "0",
"colIndex": i1,
"colName": "class_"
}
]
}
],
"dataSplits": {
"method": "holdOut",
"testSize": 0.2,
"stratified": True,
"numRepeats": 0,
"randomSeed": 42,
"splitsFile": "dataSplits.csv"
},
"performanceMetrics": [
{
"metric": "meanSquaredError"
}
]
},
"expectedOutputs": {
"predictionsFile": "predictions.csv"
}
}
jsonfile=open('problemDoc.json','w')
json.dump(data,jsonfile)
jsonfile.close()
def train_autobazaar(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
folder=common_name_model
# this should be the model directory
hostdir=os.getcwd()
# make a temporary folder for the training session
try:
os.mkdir(folder)
os.chdir(folder)
except:
shutil.rmtree(folder)
os.mkdir(folder)
os.chdir(folder)
# make the data arrays
print('creating training data...')
if transform_model != '':
csvfilename=common_name_model+'_all.csv'
shutil.copy(hostdir+'/'+model_session+'/data/'+common_name_model.split('_')[0]+'_all.csv', os.getcwd()+'/'+csvfilename)
else:
csvfilename=common_name_model+'_all_transformed.csv'
shutil.copy(hostdir+'/'+model_session+'/data/'+common_name_model.split('_')[0]+'_all_transformed.csv', os.getcwd()+'/'+csvfilename)
# create required .JSON files
data=pd.read_csv(csvfilename)
dataset_id, dataset_filename, i1=create_dataset_json(folder, csvfilename)
problem_filename=create_problem_json(mtype, folder, i1)
split_data(data)
# get the current directory
abz_dir=os.getcwd()
# make necessary directories
# now create proper tree structure
'''
─ 196_autoMpg
├── 196_autoMpg_dataset
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
├── 196_autoMpg_problem
│ ├── dataSplits.csv
│ └── problemDoc.json
├── SCORE
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ ├── problem_TEST
│ │ ├── dataSplits.csv
│ │ └── problemDoc.json
│ └── targets.csv
├── TEST
│ ├── dataset_TEST
│ │ ├── datasetDoc.json
│ │ └── tables
│ │ └── learningData.csv
│ └── problem_TEST
│ ├── dataSplits.csv
│ └── problemDoc.json
└── TRAIN
├── dataset_TRAIN
│ ├── datasetDoc.json
│ └── tables
│ └── learningData.csv
└── problem_TRAIN
├── dataSplits.csv
└── problemDoc.json
'''
dataset_folder=folder+'_dataset'
problem_folder=folder+'_problem'
# make datasets folder
os.mkdir(dataset_folder)
os.chdir(dataset_folder)
os.mkdir('tables')
shutil.copy(abz_dir+'/datasetDoc.json', os.getcwd()+'/datasetDoc.json')
shutil.copy(abz_dir+'/'+csvfilename, os.getcwd()+'/tables/'+csvfilename)
os.chdir('tables')
os.rename(csvfilename, 'learningData.csv')
# make problem folder
os.chdir(abz_dir)
os.mkdir(problem_folder)
os.chdir(problem_folder)
shutil.copy(abz_dir+'/problemDoc.json', os.getcwd()+'/problemDoc.json')
shutil.copy(abz_dir+'/dataSplits.csv', os.getcwd()+'/dataSplits.csv')
os.chdir(abz_dir)
os.mkdir('TEST')
os.chdir('TEST')
os.mkdir('dataset_TEST')
shutil.copy(abz_dir+'/'+dataset_folder+'/datasetDoc.json', os.getcwd()+'/dataset_TEST/datasetDoc.json')
os.mkdir('problem_TEST')
shutil.copy(abz_dir+'/'+problem_folder+'/problemDoc.json',os.getcwd()+'/problem_TEST/problemDoc.json')
shutil.copy(abz_dir+'/'+problem_folder+'/dataSplits.csv', os.getcwd()+'/problem_TEST/dataSplits.csv')
os.chdir('dataset_TEST')
os.mkdir('tables')
shutil.copy(abz_dir+'/test.csv', os.getcwd()+'/tables/test.csv')
os.chdir('tables')
os.rename('test.csv', 'learningData.csv')
os.chdir(abz_dir)
os.mkdir('TRAIN')
os.chdir('TRAIN')
os.mkdir('dataset_TRAIN')
os.chdir('dataset_TRAIN')
os.mkdir('tables')
shutil.copy(abz_dir+'/datasetDoc.json', os.getcwd()+'/datasetDoc.json')
shutil.copy(abz_dir+'/train.csv', os.getcwd()+'/tables/train.csv')
os.chdir('tables')
os.rename('train.csv','learningData.csv')
os.chdir(abz_dir+'/TRAIN')
os.mkdir('problem_TRAIN')
shutil.copy(abz_dir+'/'+problem_folder+'/problemDoc.json',os.getcwd()+'/problem_TRAIN/problemDoc.json')
shutil.copy(abz_dir+'/'+problem_folder+'/dataSplits.csv', os.getcwd()+'/problem_TRAIN/dataSplits.csv')
os.chdir(abz_dir)
os.mkdir('SCORE')
os.chdir('SCORE')
shutil.copytree(abz_dir+'/TEST/dataset_TEST',os.getcwd()+'/dataset_SCORE')
shutil.copytree(abz_dir+'/TEST/problem_TEST', os.getcwd()+'/problem_SCORE')
os.chdir(hostdir)
# this works for really any input configuration - regression or classificatoin (as this is covered in config files)
try:
os.mkdir('input')
except:
pass
# remove if file exists
try:
shutil.copytree(folder, os.getcwd()+'/input/'+folder)
except:
shutil.rmtree(os.getcwd()+'/input/'+folder)
shutil.copytree(folder, os.getcwd()+'/input/'+folder)
print(os.getcwd())
time.sleep(30)
os.system('abz search %s -c20,30,40 -b10'%(folder))
# now go to output folder
os.chdir('output')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
g=json.load(open(listdir[i]))
# os.remove(listdir[i])
elif listdir[i].endswith('.pkl'):
picklefile=folder+'.pickle'
shutil.copy(os.getcwd()+'/'+listdir[i],hostdir+'/'+model_name)
files.append(model_name)
model_dir=hostdir
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/WIP_train_mlbox-WIP.py | training/helpers/archived/WIP_train_mlbox-WIP.py | import os, json, shutil, pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_log_error
import pandas as pd
print('installing library')
os.system('pip3 install mlbox==0.8.4')
from mlbox.preprocessing import *
from mlbox.optimisation import *
from mlbox.prediction import *
'''
From the documentation: https://mlbox.readthedocs.io/en/latest/
'''
# install mlblocks
def train_mlbox(alldata, labels, mtype, jsonfile, problemtype, default_features, settings):
# name model
modelname=jsonfile[0:-5]+'_mlbox_'+str(default_features).replace("'",'').replace('"','')
# training and testing sets
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, train_size=0.750, test_size=0.250)
df = {"train" : pd.DataFrame(X_train), "target" : pd.DataFrame(y_train), "test" : pd.DataFrame(X_test)}
print(df)
if mtype=='c':
# rename files with classification
modelname=modelname+'_classification'
model_name=modelname+'.pickle'
jsonfilename=modelname+'.json'
# from sklearn.datasets import load_boston
# dataset = load_boston()
# df = {"train" : pd.DataFrame(dataset.data), "target" : pd.Series(dataset.target)}
# print(df['train'][0])
# print(type(df['train'][0]))
# data = Drift_thresholder().fit_transform(df) #deleting non-stable variables
space = {
'ne__numerical_strategy' : {"space" : [0, 'mean']},
'ce__strategy' : {"space" : ["label_encoding", "random_projection", "entity_embedding"]},
'fs__strategy' : {"space" : ["variance", "rf_feature_importance"]},
'fs__threshold': {"search" : "choice", "space" : [0.1, 0.2, 0.3]},
'est__strategy' : {"space" : ["LightGBM"]},
'est__max_depth' : {"search" : "choice", "space" : [5,6]},
'est__subsample' : {"search" : "uniform", "space" : [0.6,0.9]}
}
best = Optimiser().optimise(space, df, max_evals = 5)
mse_ =Optimiser().evaluate(best, df)
pipeline = Predictor().fit_predict(best, df)
print(best)
print(mse_)
# saving model
print('saving model')
modelfile=open(model_name,'wb')
pickle.dump(pipeline, modelfile)
modelfile.close()
# SAVE JSON FILE
print('saving .JSON file (%s)'%(jsonfilename))
jsonfile=open(jsonfilename,'w')
data={'sample type': problemtype,
'feature_set':default_features,
'model name':jsonfilename[0:-5]+'.pickle',
'accuracy':accuracy,
'model type':'mlblocks_regression',
'settings': settings,
}
json.dump(data,jsonfile)
jsonfile.close()
if mtype=='r':
# rename files with regression
modelname=modelname+'_regression'
model_name=modelname+'.pickle'
jsonfilename=modelname+'.json'
params = {"ne__numerical_strategy" : 0,
"ce__strategy" : "label_encoding",
"fs__threshold" : 0.1,
"stck__base_estimators" : [Regressor(strategy="RandomForest"), Regressor(strategy="ExtraTrees")],
"est__strategy" : "Linear"}
best = Optimiser().optimise(params, df, max_evals = 5)
mse_error =Optimiser().evaluate(best, df)
# saving model
print('saving model')
modelfile=open(model_name,'wb')
pickle.dump(pipeline, modelfile)
modelfile.close()
# save JSON
print('saving .JSON file (%s)'%(jsonfilename))
jsonfile=open(jsonfilename,'w')
data={'sample type': problemtype,
'feature_set':default_features,
'model name':jsonfilename[0:-5]+'.pickle',
'mse_error':mse_error,
'model type':'mlblocks_regression',
'settings': settings,
}
json.dump(data,jsonfile)
jsonfile.close()
cur_dir2=os.getcwd()
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
# now move all the files over to proper model directory
shutil.copy(cur_dir2+'/'+model_name, os.getcwd()+'/'+model_name)
shutil.copy(cur_dir2+'/'+jsonfilename, os.getcwd()+'/'+jsonfilename)
os.remove(cur_dir2+'/'+model_name)
os.remove(cur_dir2+'/'+jsonfilename)
# get model directory
model_dir=os.getcwd()
return model_name, model_dir | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/train_pLDA.py | training/helpers/archived/train_pLDA.py | '''
PLDA implementation from
https://github.com/RaviSoji/plda/blob/master/mnist_demo/mnist_demo.ipynb
'''
import os, sys, pickle
import helpers.plda.plda as plda
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
def train_pLDA(alldata, labels):
# get train and test data
training_data, testing_data, training_labels, testing_labels = train_test_split(alldata, labels, train_size=0.750, test_size=0.250)
training_data = training_data.reshape(training_data.shape)
testing_data = testing_data.reshape(testing_data.shape)
# optimize number of principal components in terms of accuracy
acclist=list()
compnum=list()
for i in range(2, len(training_data[0]),1):
#try:
classifier = plda.Classifier()
numcomponents=i
classifier.fit_model(training_data, training_labels, n_principal_components=numcomponents)
predictions, log_p_predictions = classifier.predict(testing_data)
accuracy=(testing_labels == predictions).mean()
print(accuracy)
if accuracy > 1:
pass
else:
acclist.append(accuracy)
print(accuracy)
print(i)
#except:
# if dimension too high, break it
#print('error')
maxacc=max(acclist)
numcomponents=acclist.index(maxacc)+1
# now retrain with proper parameters
classifier = plda.Classifier()
classifier.fit_model(training_data, training_labels, n_principal_components=numcomponents)
predictions, log_p_predictions = classifier.predict(testing_data)
accuracy=(testing_labels == predictions).mean()
print('max acc %s with %s components'%(maxacc, numcomponents))
Psi = classifier.model.Psi
A = classifier.model.A
inv_A = classifier.model.inv_A
m = classifier.model.m
# Indices of the subspace used for classification.
relevant_U_dims = classifier.model.relevant_U_dims
# # Prior Gaussian Parameters
# classifier.model.prior_params.keys()
# # Posterior Gaussian Parameters
# classifier.model.posterior_params.keys()
# classifier.model.posterior_params[0].keys()
# # Posterior Predictive Gaussian Parameters
# classifier.model.posterior_predictive_params.keys()
# classifier.model.posterior_predictive_params[0].keys()
'''
Transforming Data to PLDA Space
There are 4 "spaces" that result from the transformations the model performs:
Data space ('D'),
Preprocessed data space ('X'),
Latent space ('U'), and
The "effective" subspace of the latent space ('U_model'), which is essentially the set of dimensions the model actually uses for prediction.
'''
# U_model = classifier.model.transform(training_data, from_space='D', to_space='U_model')
# print(training_data.shape)
# print(U_model.shape)
# D = classifier.model.transform(U_model, from_space='U_model', to_space='D')
# print(U_model.shape)
# print(D.shape)
# create dump of classifier
print('saving classifier to disk')
f=open('plda_classifier.pickle','wb')
pickle.dump(classifier,f)
f.close() | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/train_autosklearn.py | training/helpers/archived/train_autosklearn.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import GridSearchCV, train_test_split
import autosklearn.classification as asklc
import sklearn.metrics
import os, shutil
def train_autosklearn(alldata, labels, mtype, jsonfile, problemtype, default_features):
foldername=jsonfile[0:-5]+'_autosklearn_%s'%(default_features)
X_train, X_test, y_train, y_test = train_test_split(alldata,
labels,
train_size=0.750,
test_size=0.250,
random_state=42,
shuffle=True)
feature_types = (['numerical'] * len(X_train[0]))
automl = asklc.AutoSklearnClassifier(
time_left_for_this_task=60,
per_run_time_limit=300,
ml_memory_limit=10240,
tmp_folder=os.getcwd()+'/'+foldername+'_tmp',
output_folder=os.getcwd()+'/'+foldername,
delete_tmp_folder_after_terminate=False,
delete_output_folder_after_terminate=False)
automl.fit(X_train,
y_train,
dataset_name=jsonfile[0:-5],
feat_type=feature_types)
y_predictions = automl.predict(X_test)
acc= sklearn.metrics.accuracy_score(y_true=y_test,
y_pred=y_predictions)
print("Accuracy:", acc)
print('saving classifier to disk')
f=open(modelname+'.pickle','wb')
pickle.dump(automl,f)
f.close()
data={'sample type': problemtype,
'training script': 'autosklearn',
'feature_set':default_features,
'model name':modelname+'.pickle',
'accuracy':acc,
'model type':'sc_'+classifiername,
}
g2=open(modelname+'.json','w')
json.dump(data,g2)
g2.close()
cur_dir2=os.getcwd()
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
# now move all the files over to proper model directory
shutil.move(cur_dir2+'/'+modelname+'.json', os.getcwd()+'/'+modelname+'.json')
shutil.move(cur_dir2+'/'+modelname+'.pickle', os.getcwd()+'/'+modelname+'.pickle')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/archived/train_autokeras.py | training/helpers/archived/train_autokeras.py | '''
@Train_autokeras script.
Take in a dataset,
convert it to pytorch dataloader format,
ingest it in autokeras,
output model in './models directory'
This will make it easier to deploy automated machine learning models
into the future.
Note that grid search can be expensive + take up to 24 hours on most
GPUs / CPUs to optimize a model.
'''
from autokeras import MlpModule, CnnModule
from autokeras.backend.torch.loss_function import classification_loss
from autokeras.backend.torch.loss_function import regression_loss
from autokeras.nn.metric import Accuracy
from autokeras.utils import pickle_from_file
from sklearn.model_selection import train_test_split
# pre processing
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from pandas import Series
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing.data import QuantileTransformer
import numpy as np
from keras.utils import to_categorical
import keras.models
from keras import layers
from keras.models import Sequential,model_from_json
from keras.layers import Dense, Dropout
import torch, time, shutil, os
import torch.utils.data as utils
# skip the CNN for neural architecture search because it doesn't work unless an image type really.
def train_autokeras(classes, alldata, labels, mtype, jsonfile, problemtype, default_features):
## this is a CNN architecture
modelname=jsonfile[0:-5]+'_autokeras_%s'%(default_features)
TEST_FOLDER = modelname
x_train, x_test, y_train, y_test = train_test_split(alldata, labels, train_size=0.750, test_size=0.250)
# we have to do some odd re-shapes to get the data loader to work for the autokeras module (keep this in mind when loading new data in)
x_train=x_train.reshape(x_train.shape+(1,))
y_train=y_train.reshape(y_train.shape+(1,)+(1,))
x_test=x_test.reshape(x_test.shape+(1,))
y_test=y_test.reshape(y_test.shape+(1,)+(1,))
print(x_train.shape)
print(y_train.shape)
tensor_x = torch.stack([torch.Tensor(i) for i in x_train]) # transform to torch tensors
tensor_y = torch.stack([torch.Tensor(i) for i in y_train])
my_dataset = utils.TensorDataset(tensor_x, tensor_y) # create your datset
training_data = utils.DataLoader(my_dataset) # create your dataloader
tensor_x = torch.stack([torch.Tensor(i) for i in x_test]) # transform to torch tensors
tensor_y = torch.stack([torch.Tensor(i) for i in y_test])
my_dataset = utils.TensorDataset(tensor_x,tensor_y) # create your datset
test_data = utils.DataLoader(my_dataset) # create your dataloader
print(test_data)
input_shape=x_train[0].shape
n_output_node=1
# cnnModule = CnnModule(loss=classification_loss, metric=Accuracy, searcher_args={}, path=TEST_FOLDER, verbose=False)
if mtype == 'c':
# metric = Accuracy is for classification
# loss = classiciation_loss for classification
mlpModule = MlpModule(loss=classification_loss, metric=Accuracy, searcher_args={}, path=TEST_FOLDER, verbose=True)
elif mtype == 'r':
# metric = MSE for regression
# loss = regression_loss for regression
mlpModule = MlpModule(loss=regression_loss, metric=MSE, searcher_args={}, path=TEST_FOLDER, verbose=True)
timelimit=60
print('training MLP model for %s hours'%(timelimit/(60*60)))
mlpModule.fit(n_output_node, input_shape, training_data, test_data, time_limit=timelimit)
mlpModule.final_fit(training_data, test_data, trainer_args=None, retrain=False)
# # serialize model to JSON
# mlpModule.export_autokeras_model(modelname+'.pickle')
# print("\n Saved %s.pickle model to disk"%(modelname))
# # test opening model and making predictions
# model=pickle_from_file(modelname+'.pickle')
# results=model.evaluate(x_test, y_test)
# print(results)
cur_dir2=os.getcwd()
try:
os.chdir(problemtype+'_models')
except:
os.mkdir(problemtype+'_models')
os.chdir(problemtype+'_models')
# now move all the files over to proper model directory
shutil.copytree(cur_dir2+'/'+TEST_FOLDER, os.getcwd() + '/'+TEST_FOLDER)
shutil.rmtree(cur_dir2+'/'+TEST_FOLDER)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/setup.py | training/helpers/hyperband/setup.py | from __future__ import print_function
import sys
from setuptools import setup, find_packages
with open('requirements.txt') as f:
INSTALL_REQUIRES = [l.strip() for l in f.readlines() if l]
try:
import numpy
except ImportError:
print('numpy is required during installation')
sys.exit(1)
try:
import scipy
except ImportError:
print('scipy is required during installation')
sys.exit(1)
setup(name='scikit-hyperband',
version='0.0.1',
description='A scikit-learn compatible implementation of hyperband for model selection',
author='Thomas Huijskens',
packages=find_packages(),
install_requires=INSTALL_REQUIRES,
author_email='thomas_huijskens@hotmail.com',
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/hyperband/search.py | training/helpers/hyperband/hyperband/search.py | """
=========
Hyperband
=========
This module contains a scikit-learn compatible implementation of the hyperband
algorithm[^1].
Compared to the civismlext implementation, this supports multimetric scoring,
and the option to turn the last round of hyperband (the randomized search
round) off.
References
----------
.. [1] Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A.,
2017. Hyperband: A novel bandit-based approach to hyperparameter
optimization. The Journal of Machine Learning Research, 18(1),
pp.6765-6816.
"""
import copy
import numpy as np
from scipy.stats import rankdata
from sklearn.utils import check_random_state
from sklearn.model_selection._search import BaseSearchCV, ParameterSampler
__all__ = ['HyperbandSearchCV']
class HyperbandSearchCV(BaseSearchCV):
"""Hyperband search on hyper parameters.
HyperbandSearchCV implements a ``fit`` and a ``score`` method.
It also implements ``predict``, ``predict_proba``, ``decision_function``,
``transform`` and ``inverse_transform`` if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings using the hyperband
algorithm [1]_ .
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the scikit-learn `User Guide
<http://scikit-learn.org/stable/modules/grid_search.html#randomized-parameter-search>`_.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
resource_param : str, default='n_estimators'
The name of the cost parameter for the estimator ``estimator``
to be fitted. Typically, this is the number of decision trees
``n_estimators`` in an ensemble or the number of iterations
for estimators trained with stochastic gradient descent.
eta : float, default=3
The inverse of the proportion of configurations that are discarded
in each round of hyperband.
min_iter : int, default=1
The minimum amount of resource that should be allocated to the cost
parameter ``resource_param`` for a single configuration of the
hyperparameters.
max_iter : int, default=81
The maximum amount of resource that can be allocated to the cost
parameter ``resource_param`` for a single configuration of the
hyperparameters.
skip_last : int, default=0
The number of last rounds to skip. For example, this can be used
to skip the last round of hyperband, which is standard randomized
search. It can also be used to inspect intermediate results,
although warm-starting HyperbandSearchCV is not supported.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`sklearn.model_selection.StratifiedKFold`
is used. In all other cases, :class:`sklearn.model_selection.KFold` is used.
Refer `User Guide <http://scikit-learn.org/stable/modules/cross_validation.html>`_
for the various cross-validation strategies that can be used here.
refit : boolean, or string default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HyperbandSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_parameters_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, optional, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
References
----------
.. [1] Li, L., Jamieson, K., DeSalvo, G., Rostamizadeh, A. and Talwalkar, A.,
2017. Hyperband: A novel bandit-based approach to hyperparameter
optimization. The Journal of Machine Learning Research, 18(1),
pp.6765-6816.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`sklearn.model_selection.GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`sklearn.model_selection.ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions,
resource_param='n_estimators', eta=3, min_iter=1,
max_iter=81, skip_last=0, scoring=None, n_jobs=1,
iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=False):
self.param_distributions = param_distributions
self.resource_param = resource_param
self.eta = eta
self.min_iter = min_iter
self.max_iter = max_iter
self.skip_last = skip_last
self.random_state = random_state
super(HyperbandSearchCV, self).__init__(
estimator=estimator, scoring=scoring, n_jobs=n_jobs,
iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _run_search(self, evaluate_candidates):
self._validate_input()
s_max = int(np.floor(np.log(self.max_iter / self.min_iter) / np.log(self.eta)))
B = (s_max + 1) * self.max_iter
refit_metric = self.refit if self.multimetric_ else 'score'
random_state = check_random_state(self.random_state)
if self.skip_last > s_max:
raise ValueError('skip_last is higher than the total number of rounds')
for round_index, s in enumerate(reversed(range(s_max + 1))):
n = int(np.ceil(int(B / self.max_iter / (s + 1)) * np.power(self.eta, s)))
# initial number of iterations per config
r = self.max_iter / np.power(self.eta, s)
configurations = list(ParameterSampler(param_distributions=self.param_distributions,
n_iter=n,
random_state=random_state))
if self.verbose > 0:
print('Starting bracket {0} (out of {1}) of hyperband'
.format(round_index + 1, s_max + 1))
for i in range((s + 1) - self.skip_last):
n_configs = np.floor(n / np.power(self.eta, i)) # n_i
n_iterations = int(r * np.power(self.eta, i)) # r_i
n_to_keep = int(np.floor(n_configs / self.eta))
if self.verbose > 0:
msg = ('Starting successive halving iteration {0} out of'
' {1}. Fitting {2} configurations, with'
' resource_param {3} set to {4}')
if n_to_keep > 0:
msg += ', and keeping the best {5} configurations.'
msg = msg.format(i + 1, s + 1, len(configurations),
self.resource_param, n_iterations,
n_to_keep)
print(msg)
# Set the cost parameter for every configuration
parameters = copy.deepcopy(configurations)
for configuration in parameters:
configuration[self.resource_param] = n_iterations
results = evaluate_candidates(parameters)
if n_to_keep > 0:
top_configurations = [x for _, x in sorted(zip(results['rank_test_%s' % refit_metric],
results['params']),
key=lambda x: x[0])]
configurations = top_configurations[:n_to_keep]
if self.skip_last > 0:
print('Skipping the last {0} successive halving iterations'
.format(self.skip_last))
def fit(self, X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
super().fit(X, y, groups, **fit_params)
s_max = int(np.floor(np.log(self.max_iter / self.min_iter) / np.log(self.eta)))
B = (s_max + 1) * self.max_iter
brackets = []
for round_index, s in enumerate(reversed(range(s_max + 1))):
n = int(np.ceil(int(B / self.max_iter / (s + 1)) * np.power(self.eta, s)))
n_configs = int(sum([np.floor(n / np.power(self.eta, i))
for i in range((s + 1) - self.skip_last)]))
bracket = (round_index + 1) * np.ones(n_configs)
brackets.append(bracket)
self.cv_results_['hyperband_bracket'] = np.hstack(brackets)
return self
def _validate_input(self):
if not isinstance(self.min_iter, int) or self.min_iter <= 0:
raise ValueError('min_iter should be a positive integer, got %s' %
self.min_iter)
if not isinstance(self.max_iter, int) or self.max_iter <= 0:
raise ValueError('max_iter should be a positive integer, got %s' %
self.max_iter)
if self.max_iter < self.min_iter:
raise ValueError('max_iter should be bigger than min_iter, got'
'max_iter=%d and min_iter=%d' % (self.max_iter,
self.min_iter))
if not isinstance(self.skip_last, int) or self.skip_last < 0:
raise ValueError('skip_last should be an integer, got %s' %
self.skip_last)
if not isinstance(self.eta, int) or not self.eta > 1:
raise ValueError('eta should be a positive integer, got %s' %
self.eta)
if self.resource_param not in self.estimator.get_params().keys():
raise ValueError('resource_param is set to %s, but base_estimator %s '
'does not have a parameter with that name' %
(self.resource_param,
self.estimator.__class__.__name__))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/hyperband/__init__.py | training/helpers/hyperband/hyperband/__init__.py | """
"""
from .search import HyperbandSearchCV
__all__ = ['HyperbandSearchCV']
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/hyperband/tests/__init__.py | training/helpers/hyperband/hyperband/tests/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/hyperband/tests/test_validation.py | training/helpers/hyperband/hyperband/tests/test_validation.py | from nose.tools import raises
from hyperband import HyperbandSearchCV
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint as sp_randint
def setup():
model = RandomForestClassifier()
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(2, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
return model, param_dist
@raises(ValueError)
def test_check_min_iter():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, min_iter=-1)._validate_input()
@raises(ValueError)
def test_check_max_iter():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, max_iter=-1)._validate_input()
@raises(ValueError)
def test_check_min_iter_smaller_max_iter():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, min_iter=30, max_iter=15)._validate_input()
@raises(ValueError)
def test_check_skip_last():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, skip_last=-1)._validate_input()
@raises(ValueError)
def test_check_eta():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, eta=0)._validate_input()
@raises(ValueError)
def test_check_resource_param():
model, param_dist = setup()
HyperbandSearchCV(model, param_dist, resource_param='wrong_name')._validate_input()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/hyperband/tests/test_hyperband.py | training/helpers/hyperband/hyperband/tests/test_hyperband.py | from nose.tools import raises
from hyperband import HyperbandSearchCV
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import randint as sp_randint
from sklearn.datasets import load_digits
from sklearn.utils import check_random_state
def setup():
model = RandomForestClassifier()
rng = check_random_state(42)
param_dist = {'max_depth': [3, None],
'max_features': sp_randint(1, 11),
'min_samples_split': sp_randint(2, 11),
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']}
digits = load_digits()
X, y = digits.data, digits.target
return model, param_dist, X, y, rng
def test_multimetric_hyperband():
model, param_dist, X, y, rng = setup()
# multimetric scoring is only supported for 1-D classification
first_label = (y == 1)
y[first_label] = 1
y[~first_label] = 0
multimetric = [
'roc_auc',
'accuracy'
]
search = HyperbandSearchCV(model, param_dist, refit='roc_auc', scoring=multimetric,
random_state=rng)
search.fit(X, y)
assert('mean_test_roc_auc' in search.cv_results_.keys())
assert('mean_test_accuracy' in search.cv_results_.keys())
assert (len(search.cv_results_['hyperband_bracket']) == 187)
def test_min_resource_param():
model, param_dist, X, y, rng = setup()
search = HyperbandSearchCV(model, param_dist, min_iter=3, random_state=rng,
verbose=1)
search.fit(X, y)
assert(search.cv_results_['param_n_estimators'].data.min() == 3)
@raises(ValueError)
def test_skip_last_raise():
model, param_dist, X, y, rng = setup()
search = HyperbandSearchCV(model, param_dist, skip_last=10, random_state=rng)
search.fit(X, y)
def test_skip_last():
model, param_dist, X, y, rng = setup()
search = HyperbandSearchCV(model, param_dist, skip_last=1, random_state=rng)
search.fit(X, y)
# 177 Because in every round the last search is dropped
# 187 - (1 + 1 + 1 + 2 + 5)
assert (len(search.cv_results_['hyperband_bracket']) == 177)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/examples/random_forest_example.py | training/helpers/hyperband/examples/random_forest_example.py | """
==================================================================
Tuning the hyperparameters of a random forest model with hyperband
==================================================================
"""
from hyperband import HyperbandSearchCV
from scipy.stats import randint as sp_randint
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelBinarizer
if __name__ == '__main__':
model = RandomForestClassifier()
param_dist = {
'max_depth': [3, None],
'max_features': sp_randint(1, 11),
'min_samples_split': sp_randint(2, 11),
'min_samples_leaf': sp_randint(1, 11),
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']
}
digits = load_digits()
X, y = digits.data, digits.target
y = LabelBinarizer().fit_transform(y)
search = HyperbandSearchCV(model, param_dist,
resource_param='n_estimators',
scoring='roc_auc',
n_jobs=1,
verbose=1)
search.fit(X, y)
print(search.best_params_)
print(search.best_score_)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperband/doc/conf.py | training/helpers/hyperband/doc/conf.py | # -*- coding: utf-8 -*-
#
# project-template documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery'
]
numpydoc_show_class_members = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples',
'backreferences_dir': os.path.join('generated'),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-hyperband'
copyright = u'2018, Thomas Huijskens'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'project-templatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scikit-hyperband.tex', u'scikit-hyperband Documentation',
u'Thomas Huijskens', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-hyperband', u'scikit-hyperband Documentation',
[u'Thomas Huijskens'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scikit-hyperband', u'scikit-hyperband Documentation',
u'Thomas Huijskens', 'scikit-hyperband', 'scikit-learn compatible implementation of hyperband.',
'Miscellaneous'),
]
# def generate_example_rst(app, what, name, obj, options, lines):
# # generate empty examples files, so that we don't get
# # inclusion errors if there are no examples for a class / module
# examples_path = os.path.join(app.srcdir, "modules", "generated",
# "%s.examples" % name)
# if not os.path.exists(examples_path):
# # touch file
# open(examples_path, 'w').close()
#
#
# def setup(app):
# app.connect('autodoc-process-docstring', generate_example_rst)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/gender_tpot_regression/model/gender_tpot_regression.py | training/helpers/gender_tpot_regression/model/gender_tpot_regression.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectPercentile, f_regression
from sklearn.linear_model import ElasticNetCV
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('gender_tpot_regression.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: -0.13558964188025885
exported_pipeline = make_pipeline(
SelectPercentile(score_func=f_regression, percentile=19),
StackingEstimator(estimator=ElasticNetCV(l1_ratio=0.2, tol=0.0001)),
StackingEstimator(estimator=ExtraTreesRegressor(bootstrap=True, max_features=0.7000000000000001, min_samples_leaf=14, min_samples_split=15, n_estimators=100)),
ElasticNetCV(l1_ratio=0.9, tol=0.001)
)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('gender_tpot_regression.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/gender_tpot_classifier/model/gender_tpot_classifier.py | training/helpers/gender_tpot_classifier/model/gender_tpot_classifier.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.svm import LinearSVC
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('gender_tpot_classifier.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: 0.8276292335115866
exported_pipeline = make_pipeline(
Normalizer(norm="max"),
LinearSVC(C=20.0, dual=True, loss="hinge", penalty="l2", tol=0.0001)
)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('gender_tpot_classifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autokaggle/tabular_supervised.py | training/helpers/autokaggle/tabular_supervised.py | from abc import abstractmethod
import os
from lightgbm import LGBMClassifier, LGBMRegressor
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import roc_auc_score, f1_score, mean_squared_error
import numpy as np
import random
from autokaggle.tabular_preprocessor import TabularPreprocessor
from autokaggle.utils import rand_temp_folder_generator, ensure_dir
class TabularSupervised:
def __init__(self, path=None, verbose=True):
"""
Initialization function for tabular supervised learner.
"""
self.verbose = verbose
self.is_trained = False
self.clf = None
self.objective = None
self.tabular_preprocessor = None
self.path = path if path is not None else rand_temp_folder_generator()
ensure_dir(self.path)
if self.verbose:
print('Path:', path)
self.save_filename = os.path.join(self.path, 'lgbm.txt')
self.time_limit = None
self.lgbm = None
def search(self, search_space, search_iter, n_estimators, x, y):
if 'n_estimators' in search_space:
del search_space['n_estimators']
params = {
'boosting_type': ['gbdt'],
'min_child_weight': [5],
'min_split_gain': [1.0],
'subsample': [0.8],
'colsample_bytree': [0.6],
'max_depth': [10],
'n_estimators': n_estimators,
'num_leaves': [70],
'learning_rate': [0.04],
}
params.update(search_space)
if self.verbose:
print(params)
folds = 3
score_metric, skf = self.get_skf(folds)
random_search = RandomizedSearchCV(self.lgbm, param_distributions=params, n_iter=search_iter,
scoring=score_metric,
n_jobs=1, cv=skf, verbose=0, random_state=1001)
random_search.fit(x, y)
self.clf = random_search.best_estimator_
return random_search.best_params_
@abstractmethod
def get_skf(self, folds):
pass
def fit(self, x, y, time_limit=None, data_info=None):
"""
This function should train the model parameters.
Args:
x: A numpy.ndarray instance containing the training data.
y: training label vector.
time_limit: remaining time budget.
data_info: meta-features of the dataset, which is an numpy.ndarray describing the
feature type of each column in raw_x. The feature type include:
'TIME' for temporal feature, 'NUM' for other numerical feature,
and 'CAT' for categorical feature.
Both inputs X and y are numpy arrays.
If fit is called multiple times on incremental data (train, test1, test2, etc.)
you should warm-start your training from the pre-trained model. Past data will
NOT be available for re-training.
"""
if time_limit is None:
time_limit = 24 * 60 * 60
self.time_limit = time_limit
self.init_lgbm(y)
self.tabular_preprocessor = TabularPreprocessor()
if x.shape[1] == 0:
raise ValueError("No feature exist!")
x = self.tabular_preprocessor.fit(x, y, self.time_limit, data_info)
if x.shape[0] > 600:
grid_train_percentage = max(600.0 / x.shape[0], 0.1)
else:
grid_train_percentage = 1
grid_n = int(x.shape[0] * grid_train_percentage)
idx = random.sample(list(range(x.shape[0])), grid_n)
grid_train_x = x[idx, :]
grid_train_y = y[idx]
while x.shape[0] < 60:
x = np.concatenate([x, x], axis=0)
y = np.concatenate([y, y], axis=0)
response_rate = sum(y) / len(y)
if not self.is_trained:
# Two-step cross-validation for hyperparameter selection
if self.verbose:
print('-----------------Search Regularization Params---------------------')
if response_rate < 0.005:
depth_choice = [5]
else:
depth_choice = [8, 10]
params = {
'min_split_gain': [0.1],
'max_depth': depth_choice,
'min_child_weight': [5, 10, 30, 50, 60, 80, 100],
'colsample_bytree': [0.6, 0.7],
'learning_rate': [0.3],
'subsample': [0.8],
'num_leaves': [80],
}
search_iter = 14
n_estimators_choice = [50]
best_param = self.search(
params,
search_iter,
n_estimators_choice,
grid_train_x, grid_train_y)
if self.verbose:
print('-----------------Search Learning Rate---------------------')
for key, value in best_param.items():
best_param[key] = [value]
best_param['learning_rate'] = [0.03, 0.045, 0.06, 0.075, 0.85, 0.95, 0.105, 0.12]
n_estimators_choice = [100, 150, 200]
search_iter = 16
self.search(
best_param,
search_iter,
n_estimators_choice,
grid_train_x, grid_train_y)
if self.verbose:
print('self.clf', self.clf)
self.is_trained = True
# Fit Model
self.clf.fit(x, y)
self.clf.booster_.save_model(self.save_filename)
if self.verbose:
print("The whole available data is: ")
print("Real-FIT: dim(X)= [{:d}, {:d}]".format(x.shape[0], x.shape[1]))
print('Feature Importance:')
print(self.clf.feature_importances_)
@abstractmethod
def init_lgbm(self, y):
pass
def predict(self, x_test):
"""
This function should provide predictions of labels on (test) data.
The function predict eventually casdn return probabilities or continuous values.
"""
x_test = self.tabular_preprocessor.encode(x_test)
y = self.clf.predict(x_test, )
if y is None:
raise ValueError("Tabular predictor does not exist")
return y
@abstractmethod
def evaluate(self, x_test, y_test):
pass
def final_fit(self, x_train, y_train):
x_train = self.tabular_preprocessor.encode(x_train)
self.clf.fit(x_train, y_train)
class TabularRegressor(TabularSupervised):
"""TabularRegressor class.
It is used for tabular data regression with lightgbm regressor.
"""
def __init__(self, path=None):
super().__init__(path)
self.objective = 'regression'
def evaluate(self, x_test, y_test):
y_pred = self.predict(x_test)
return mean_squared_error(y_test, y_pred)
def init_lgbm(self, y):
self.lgbm = LGBMRegressor(silent=False,
verbose=-1,
n_jobs=1,
objective=self.objective)
def get_skf(self, folds):
return 'neg_mean_squared_error', KFold(n_splits=folds, shuffle=True, random_state=1001)
class TabularClassifier(TabularSupervised):
"""TabularClassifier class.
It is used for tabular data classification with lightgbm classifier.
"""
def init_lgbm(self, y):
n_classes = len(set(y))
if n_classes == 2:
self.objective = 'binary'
self.lgbm = LGBMClassifier(silent=False,
verbose=-1,
n_jobs=1,
objective=self.objective)
else:
self.objective = 'multiclass'
self.lgbm = LGBMClassifier(silent=False,
verbose=-1,
n_jobs=1,
num_class=n_classes,
objective=self.objective)
def evaluate(self, x_test, y_test):
if self.verbose:
print('objective:', self.objective)
y_pred = self.predict(x_test)
results = None
if self.objective == 'binary':
results = roc_auc_score(y_test, y_pred)
elif self.objective == 'multiclass':
results = f1_score(y_test, y_pred, average='weighted')
return results
def get_skf(self, folds):
if self.lgbm.objective == 'binary':
score_metric = 'roc_auc'
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=1001)
else:
score_metric = 'f1_weighted'
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=1001)
return score_metric, skf
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autokaggle/utils.py | training/helpers/autokaggle/utils.py | import os
import tempfile
import string
import random
def ensure_dir(directory):
"""Create directory if it does not exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def temp_path_generator():
sys_temp = tempfile.gettempdir()
path = os.path.join(sys_temp, 'autokaggle')
return path
def rand_temp_folder_generator():
"""Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
chars = string.ascii_uppercase + string.digits
size = 6
random_suffix = ''.join(random.choice(chars) for _ in range(size))
sys_temp = temp_path_generator()
path = sys_temp + '_' + random_suffix
ensure_dir(path)
return path
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/autokaggle/tabular_preprocessor.py | training/helpers/autokaggle/tabular_preprocessor.py | import numpy as np
from pandas import DataFrame
from scipy.stats import pearsonr
LEVEL_HIGH = 32
def parallel_function(labels, first_batch_keys, task):
if task == 'label':
if min(labels) > first_batch_keys:
labels = labels - np.min(labels)
return labels.reshape(labels.shape[0], 1)
elif task == 'frequency':
cat_dict = {}
n_rows = labels.shape[0]
labels = np.expand_dims(labels, axis=1)
if min(labels) > first_batch_keys:
labels = labels - np.min(labels)
frequencies = np.zeros((n_rows, 1))
for row_index in range(n_rows):
key = labels[row_index, 0]
if key in cat_dict:
cat_dict[key] += 1
else:
cat_dict[key] = 1
n_level = len(cat_dict)
key_to_frequency = {}
for key in cat_dict.keys():
key_to_frequency[key] = cat_dict[key] / n_rows * n_level
for row_index in range(n_rows):
key = labels[row_index, 0]
frequencies[row_index][0] = key_to_frequency[key]
return frequencies
elif task == 'num_cat':
df = DataFrame(data=labels)
return df.join(df.groupby(1)[0].mean(),
rsuffix='r',
on=1).values[:, -1:]
elif task == 'cat_cat':
df = DataFrame(data=labels)
df[3] = list(range(len(labels)))
return df.join(df.groupby([0, 1]).count(),
rsuffix='r',
on=(0, 1)).values[:, -1:]
elif task == 'train_num_cat':
y = first_batch_keys[0]
df = DataFrame(data=labels)
fe = df.join(df.groupby(1)[0].mean(),
rsuffix='r',
on=1).values[:, -1:]
mu = abs(pearsonr(np.squeeze(np.array(fe)), y)[0])
if np.isnan(mu):
mu = 0
return [[first_batch_keys[1], first_batch_keys[2], mu, mu], first_batch_keys[3]]
elif task == 'train_cat_cat':
y = first_batch_keys[0]
df = DataFrame(data=labels)
df[3] = list(range(len(labels)))
fe = df.join(df.groupby([0, 1]).count(),
rsuffix='r',
on=(0, 1)).values[:, -1:]
mu = abs(pearsonr(np.squeeze(np.array(fe)), y)[0])
if np.isnan(mu):
mu = 0
return [[first_batch_keys[1], first_batch_keys[2], mu], first_batch_keys[3]]
return None
def call_parallel(tasks):
results = []
for t in tasks:
results.append(parallel_function(t[0], t[1], t[2]))
return results
class TabularPreprocessor:
def __init__(self):
"""
Initialization function for tabular preprocessor.
"""
self.num_cat_pair = {}
self.total_samples = 0
self.cat_to_int_label = {}
self.n_first_batch_keys = {}
self.high_level_cat_keys = []
self.feature_add_high_cat = 0
self.feature_add_cat_num = 0
self.feature_add_cat_cat = 0
self.order_num_cat_pair = {}
self.rest = None
self.budget = None
self.data_info = None
self.n_time = None
self.n_num = None
self.n_cat = None
def remove_useless(self, x):
self.rest = np.where(np.max(x, 0) - np.min(x, 0) != 0)[0]
return x[:, self.rest]
def process_time(self, x):
cols = range(self.n_time)
if len(cols) > 10:
cols = cols[:10]
x_time = x[:, cols]
for i in cols:
for j in range(i + 1, len(cols)):
x = np.append(x, np.expand_dims(x_time[:, i] - x_time[:, j], 1), 1)
return x
def extract_data(self, raw_x):
# only get numerical variables
ret = np.concatenate([raw_x['TIME'], raw_x['NUM'], raw_x['CAT']], axis=1)
n_rows = ret.shape[0]
n_num_col = ret.shape[1] - self.n_cat
n_cat_col = self.n_cat
if n_cat_col <= 0:
return ret.astype(np.float64)
# preprocess (multi-value) categorical data
for col_index in range(n_num_col, n_num_col + n_cat_col):
for row_index in range(n_rows):
key = str(ret[row_index, col_index])
if key in self.cat_to_int_label[col_index]:
ret[row_index, col_index] = self.cat_to_int_label[col_index][key]
continue
new_value = len(self.cat_to_int_label[col_index])
self.cat_to_int_label[col_index][key] = new_value
ret[row_index, col_index] = new_value
return ret.astype(np.float64)
def cat_to_num(self, x, y=None):
if y is not None:
mark = self.n_time + self.n_num
for col_index in range(self.n_time + self.n_num, self.n_time + self.n_num + self.n_cat):
if self.n_first_batch_keys[col_index] <= LEVEL_HIGH:
self.num_cat_pair[mark] = (col_index,)
mark += 1
else:
self.num_cat_pair[mark] = (col_index, col_index)
mark += 1
mark_1 = 0
tasks = []
for i, cat_col_index1 in enumerate(self.high_level_cat_keys):
for cat_col_index2 in self.high_level_cat_keys[i + 1:]:
tasks.append((x[:, (cat_col_index1, cat_col_index2)],
[y, cat_col_index1, cat_col_index2, mark_1],
'train_cat_cat'))
mark_1 += 1
all_results = call_parallel(tasks)
num_cat_pair_1 = {}
pearsonr_dict_1 = {}
for result in all_results:
if result[0][-1] > 0.001:
pearsonr_dict_1[result[1]] = result[0][-1]
num_cat_pair_1[result[1]] = result[0]
pearsonr_high_1 = sorted(pearsonr_dict_1, key=pearsonr_dict_1.get, reverse=True)[:self.feature_add_cat_cat]
num_cat_pair_1 = {key: num_cat_pair_1[key] for key in pearsonr_high_1}
num_cat_pair_1 = {i + mark: num_cat_pair_1[key] for i, key in enumerate(num_cat_pair_1)}
self.num_cat_pair.update(num_cat_pair_1)
mark += len(pearsonr_high_1)
mark_2 = 0
tasks_2 = []
for cat_col_index in self.high_level_cat_keys:
for num_col_index in range(self.n_time, self.n_time + self.n_num):
tasks_2.append((x[:, (num_col_index, cat_col_index)],
[y, num_col_index, cat_col_index, mark_2],
'train_num_cat'))
mark_2 += 1
all_results = call_parallel(tasks_2)
num_cat_pair_2 = {}
pearsonr_dict_2 = {}
for result in all_results:
if result[0][-1] > 0.001:
pearsonr_dict_2[result[1]] = result[0][-1]
num_cat_pair_2[result[1]] = result[0]
pearsonr_high_2 = sorted(pearsonr_dict_2, key=pearsonr_dict_2.get, reverse=True)[:self.feature_add_cat_num]
num_cat_pair_2 = {key: num_cat_pair_2[key] for key in pearsonr_high_2}
num_cat_pair_2 = {i + mark: num_cat_pair_2[key] for i, key in enumerate(num_cat_pair_2)}
self.num_cat_pair.update(num_cat_pair_2)
self.order_num_cat_pair = sorted(list(self.num_cat_pair.keys()))
print('num_cat_pair_2:', num_cat_pair_2)
tasks = []
for key in self.order_num_cat_pair:
if len(self.num_cat_pair[key]) == 1:
(col_index,) = self.num_cat_pair[key]
tasks.append((x[:, col_index], self.n_first_batch_keys[col_index], 'label'))
if len(self.num_cat_pair[key]) == 2:
(col_index, col_index) = self.num_cat_pair[key]
tasks.append((x[:, col_index], self.n_first_batch_keys[col_index], 'frequency'))
if len(self.num_cat_pair[key]) == 3:
(cat_col_index1, cat_col_index2, mu) = self.num_cat_pair[key]
tasks.append((x[:, (cat_col_index1,
cat_col_index2)], self.n_first_batch_keys[cat_col_index1], 'cat_cat'))
elif len(self.num_cat_pair[key]) == 4:
(num_col_index, cat_col_index, mu, a) = self.num_cat_pair[key]
tasks.append((x[:, (num_col_index, cat_col_index)], self.n_first_batch_keys[cat_col_index], 'num_cat'))
results = call_parallel(tasks)
all_num = x.shape[1] - self.n_cat
results = [x[:, :all_num]] + results
ret = np.concatenate(results, axis=1)
return ret
def fit(self, raw_x, y, time_limit, data_info):
"""
This function should train the model parameters.
Args:
raw_x: a numpy.ndarray instance containing the training data.
y: training label vector.
time_limit: remaining time budget.
data_info: meta-features of the dataset, which is an numpy.ndarray describing the
feature type of each column in raw_x. The feature type include:
'TIME' for temporal feature, 'NUM' for other numerical feature,
and 'CAT' for categorical feature.
"""
# Get Meta-Feature
self.budget = time_limit
self.data_info = data_info if data_info is not None else self.extract_data_info(raw_x)
print('QQ: {}'.format(self.data_info))
self.n_time = sum(self.data_info == 'TIME')
self.n_num = sum(self.data_info == 'NUM')
self.n_cat = sum(self.data_info == 'CAT')
self.total_samples = raw_x.shape[0]
print('QQ1: {}'.format(self.n_time))
print('QQ2: {}'.format(self.n_num))
print('QQ3: {}'.format(self.n_cat))
raw_x = {'TIME': raw_x[:, self.data_info == 'TIME'],
'NUM': raw_x[:, self.data_info == 'NUM'],
'CAT': raw_x[:, self.data_info == 'CAT']}
for col_index in range(self.n_num + self.n_time, self.n_num + self.n_time + self.n_cat):
self.cat_to_int_label[col_index] = {}
x = self.extract_data(raw_x)
d_size = x.shape[0] * x.shape[1] / self.budget
if d_size > 35000:
self.feature_add_high_cat = 0
else:
self.feature_add_high_cat = 10
# Iterate cat features
for col_index in range(self.n_num + self.n_time, self.n_num + self.n_time + self.n_cat):
self.n_first_batch_keys[col_index] = len(self.cat_to_int_label[col_index])
high_level_cat_keys_tmp = sorted(self.n_first_batch_keys, key=self.n_first_batch_keys.get, reverse=True)[
:self.feature_add_high_cat]
for i in high_level_cat_keys_tmp:
if self.n_first_batch_keys[i] > 1e2:
self.high_level_cat_keys.append(i)
# Convert NaN to zeros
x = np.nan_to_num(x)
# Encode high-order categorical data to numerical with frequency
x = self.cat_to_num(x, y)
x = self.process_time(x)
x = self.remove_useless(x)
return x
def encode(self, raw_x, time_limit=None):
"""
This function should train the model parameters.
Args:
raw_x: a numpy.ndarray instance containing the training/testing data.
time_limit: remaining time budget.
Both inputs X and y are numpy arrays.
If fit is called multiple times on incremental data (train, test1, test2, etc.)
you should warm-start your training from the pre-trained model. Past data will
NOT be available for re-training.
"""
# Get Meta-Feature
if time_limit is None:
if self.budget is None:
time_limit = 24 * 60 * 60
self.budget = time_limit
else:
self.budget = time_limit
raw_x = {'TIME': raw_x[:, self.data_info == 'TIME'],
'NUM': raw_x[:, self.data_info == 'NUM'],
'CAT': raw_x[:, self.data_info == 'CAT']}
x = self.extract_data(raw_x)
# Convert NaN to zeros
x = np.nan_to_num(x)
# Encode high-order categorical data to numerical with frequency
x = self.cat_to_num(x)
x = self.process_time(x)
if self.rest is not None:
x = x[:, self.rest]
return x
@staticmethod
def extract_data_info(raw_x):
"""
This function extracts the data info automatically based on the type of each feature in raw_x.
Args:
raw_x: a numpy.ndarray instance containing the training data.
"""
data_info = []
row_num, col_num = raw_x.shape
for col_idx in range(col_num):
try:
raw_x[:, col_idx].astype(np.float)
data_info.append('NUM')
except:
data_info.append('CAT')
return np.array(data_info)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/plda/__init__.py | training/helpers/plda/__init__.py | # Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .plda import Classifier
from .plda import Model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/plda/plda/model.py | training/helpers/plda/plda/model.py | # Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from sklearn.decomposition import PCA
from scipy.stats import multivariate_normal as gaussian
from .optimizer import get_prior_params
from .optimizer import get_posterior_params
from .optimizer import get_posterior_predictive_params
from .optimizer import optimize_maximum_likelihood
from .optimizer import calc_scatter_matrices
def get_space_walk(from_space, to_space):
U_model_to_D = ['U_model', 'U', 'X', 'D']
D_to_U_model = U_model_to_D[::-1]
assert from_space in U_model_to_D and to_space in U_model_to_D
from_idx = U_model_to_D.index(from_space)
to_idx = U_model_to_D.index(to_space)
if to_idx < from_idx:
spaces = D_to_U_model
from_idx = D_to_U_model.index(from_space)
to_idx = D_to_U_model.index(to_space)
else:
spaces = U_model_to_D
from_spaces = [x for x in spaces[from_idx: to_idx]]
to_spaces = [x for x in spaces[from_idx + 1: to_idx + 1]]
return zip(from_spaces, to_spaces)
def transform_D_to_X(data, pca):
return data if pca is None else pca.transform(data)
def transform_X_to_U(data, inv_A, m):
return np.matmul(data - m, inv_A.T)
def transform_U_to_U_model(data, relevant_U_dims):
return data[..., relevant_U_dims]
def transform_U_model_to_U(data, relevant_U_dims, U_dimensionality):
shape = (*data.shape[:-1], U_dimensionality)
U = np.zeros(shape)
U[..., relevant_U_dims] = data
return U
def transform_U_to_X(data, A, m):
return m + np.matmul(data, A.T)
def transform_X_to_D(data, pca):
return data if pca is None else pca.inverse_transform(data)
class Model:
def __init__(self, row_wise_data, labels, n_principal_components=None):
assert len(row_wise_data.shape) == 2
assert len(labels) == row_wise_data.shape[0]
self.pca = None
self.m = None
self.A = None
self.Psi = None
self.relevant_U_dims = None
self.inv_A = None
self.prior_params = None
self.posterior_params = None
self.posterior_predictive_params = None
self.fit(row_wise_data, labels, n_principal_components)
def calc_logp_posterior(self, v_model, category):
assert v_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.posterior_params[category]['mean']
cov_diag = self.posterior_params[category]['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(v_model)
def calc_logp_posterior_predictive(self, U_model, category):
assert U_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.posterior_predictive_params[category]['mean']
cov_diag = self.posterior_predictive_params[category]['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(U_model)
def calc_logp_marginal_likelihood(self, U_model):
""" Computes the log marginal likelihood on axis=-2. """
assert U_model.shape[-1] == self.get_dimensionality('U_model')
if len(U_model.shape) == 1:
U_model = U_model[None, :]
n = U_model.shape[-2]
psi_diag = self.prior_params['cov_diag']
n_psi_plus_eye = n * psi_diag + 1
log_constant = -.5 * n * np.log(2 * np.pi)
log_constant += -.5 * np.log(n_psi_plus_eye)
sum_of_squares = np.sum(U_model ** 2, axis=-2)
log_exponent_1 = -.5 * sum_of_squares
mean = U_model.mean(axis=-2)
log_exponent_2 = .5 * (n ** 2 * psi_diag * mean ** 2)
log_exponent_2 /= n_psi_plus_eye
logp_ml = log_constant + log_exponent_1 + log_exponent_2
logp_ml = np.sum(logp_ml, axis=-1)
return logp_ml
def calc_logp_prior(self, v_model):
assert v_model.shape[-1] == self.get_dimensionality('U_model')
mean = self.prior_params['mean']
cov_diag = self.prior_params['cov_diag']
return gaussian(mean, np.diag(cov_diag)).logpdf(v_model)
def fit(self, data, labels, n_principal_components=None):
if n_principal_components is None:
S_b, S_w = calc_scatter_matrices(data, labels)
matrix_rank = np.linalg.matrix_rank(S_w)
else:
matrix_rank = n_principal_components
if matrix_rank != data.shape[-1]:
self.pca = PCA(n_components=matrix_rank)
self.pca.fit(data)
X = self.transform(data, from_space='D', to_space='X')
self.m, self.A, self.Psi, self.relevant_U_dims, self.inv_A = \
optimize_maximum_likelihood(X, labels)
U_model = self.transform(X, from_space='X', to_space='U_model')
self.prior_params = \
get_prior_params(self.Psi, self.relevant_U_dims)
self.posterior_params = \
get_posterior_params(U_model, labels, self.prior_params)
self.posterior_predictive_params = \
get_posterior_predictive_params(self.posterior_params)
def get_dimensionality(self, space):
if space == 'U_model':
return self.relevant_U_dims.shape[0]
elif space == 'U':
return self.A.shape[0]
elif space == 'X':
return self.A.shape[0]
elif space == 'D':
if self.pca is None:
return self.m.shape[0]
else:
return self.pca.n_features_
else:
raise ValueError
def transform(self, data, from_space, to_space):
""" Potential_spaces: 'D' <---> 'X' <---> 'U' <---> 'U_model'.
DESCRIPTION
There are 6 basic transformations to move back and forth
between the data space, 'D', and the model's space, 'U_model':
1. From D to X.
(i.e. from data space to preprocessed space)
Uses the minimum number of components from
Principal Components Analysis that
captures 100% of the variance in the data.
2. From X to U.
(i.e. from preprocessed space to latent space)
See the bottom of p.533 of Ioffe 2006.
3. From U to U_model.
(i.e. from latent space to the model space)
See Fig 2 on p.537 of Ioffe 2006.
4. From U_model to U.
(i.e. from the model space to latent space)
5. From U to X.
(i.e. from the latent space to the preprocessed space)
6. From X to D.
(i.e. from the preprocessed space to the data space)
"""
if len(data.shape) == 1:
data = data[None, :]
if from_space == 'D' and to_space == 'X':
return transform_D_to_X(data, self.pca)
elif from_space == 'X' and to_space == 'U':
return transform_X_to_U(data, self.inv_A, self.m)
elif from_space == 'U' and to_space == 'U_model':
return transform_U_to_U_model(data, self.relevant_U_dims)
elif from_space == 'U_model' and to_space == 'U':
dim = self.get_dimensionality('U')
return transform_U_model_to_U(data, self.relevant_U_dims, dim)
elif from_space == 'U' and to_space == 'X':
return transform_U_to_X(data, self.A, self.m)
elif from_space == 'X' and to_space == 'D':
return transform_X_to_D(data, self.pca)
else:
transformed = data
for space_1, space_2 in get_space_walk(from_space, to_space):
transformed = self.transform(transformed, space_1, space_2)
return transformed
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/plda/plda/optimizer.py | training/helpers/plda/plda/optimizer.py | # Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from scipy.linalg import eigh
def optimize_maximum_likelihood(X, labels):
""" Performs the optimization in Fig. 2 of p.537 of Ioffe 2006.
DESCRIPTION
- The main model parameters are `m`, `A`, and `Psi`.
- However, to improve the performance (speed and numerical stability)
of the plda.Model object,
inv_A and relevant_U_dims are also returned here.
ADDITIONAL NOTES
Be sure to test that np.cov(X.T) is full rank before running this.
Recall that there are 4 \"spaces\":
'D' (data) <---> 'X' (preprocessed) <---> 'U' (latent) <---> 'U_model'
ARGUMENTS
X (numpy.ndarray), shape=(n_data, n_dimensions)
- Data in statistics format, i.e. row-wise.
labels (list or numpy.ndarray), length=X.shape[0]
- Labels for the data in `X`.
- Must be sorted in the same order as `X`.
RETURNS
m (numpy.ndarray), shape=X.shape[-1]
- The mean of the row vectors in X.
- This is the prior mean fitted via maximum likelihood.
A (numpy.ndarray), shape=(X.shape[-1], X.shape[-1])
- Transformation from X space to the latent U space.
Psi (numpy.ndarray), shape=(X.shape[-1], X.shape[-1])
- The covariance matrix of the prior distribution on
the category means in U space.
relevant_U_dims (numpy.ndarray), shape=(len(np.unique(labels)) - 1,)
- The \"effective\" latent dimensions,
i.e. the ones that are actually used by the model.
inv_A (numpy.ndarray), shape=A.shape
- The inverse of the matrix A.
- Transformation from the latent U space to the X space.
"""
assert len(X.shape) == 2
assert X.shape[0] == len(labels)
m = calc_m(X)
S_b, S_w = calc_scatter_matrices(X, labels)
W = calc_W(S_b, S_w)
Lambda_b = calc_Lambda_b(S_b, W)
Lambda_w = calc_Lambda_w(S_w, W)
n_avg = calc_n_avg(labels)
A = calc_A(n_avg, Lambda_w, W)
inv_A = np.linalg.inv(A)
Psi = calc_Psi(Lambda_w, Lambda_b, n_avg)
relevant_U_dims = get_relevant_U_dims(Psi)
return m, A, Psi, relevant_U_dims, inv_A
def as_dictionary_of_dictionaries(labels, means, cov_diags):
""" Dictionary storing one dictionary of parameters per category. """
assert len(labels) == len(means) == len(cov_diags)
all_params = dict()
for label, mean, cov_diag in zip(labels, means, cov_diags):
category_params = dict()
category_params['mean'] = mean
category_params['cov_diag'] = cov_diag
all_params[label] = category_params
return all_params
def calc_A(n_avg, Lambda_w, W):
""" See Fig. 2 on p.537 of Ioffe 2006. """
Lambda_w_diagonal = Lambda_w.diagonal() # Should be diagonal matrix.
inv_W_T = np.linalg.inv(W.T)
return inv_W_T * (n_avg / (n_avg - 1) * Lambda_w_diagonal) ** .5
def calc_Lambda_b(S_b, W):
""" See Fig. 2 on p.537 of Ioffe 2006. """
return np.matmul(np.matmul(W.T, S_b), W)
def calc_Lambda_w(S_w, W):
""" See Fig. 2 on p.537 of Ioffe 2006. """
return np.matmul(np.matmul(W.T, S_w), W)
def calc_m(X):
""" See Fig. 2 on p.537 of Ioffe 2006. """
assert len(X.shape) == 2
return X.mean(axis=0)
def calc_n_avg(Y):
""" This is the \"hack\" suggested in Fig 2 on p.537 of Ioffe 2006. """
unique = np.unique(Y)
return len(Y) / unique.shape[0]
def calc_Psi(Lambda_w, Lambda_b, n_avg):
""" See Fig. 2 on p.537 of Ioffe 2006. """
Lambda_w_diagonal = Lambda_w.diagonal() # Should be diagonal matrix.
Lambda_b_diagonal = Lambda_b.diagonal() # Should be diagonal matrix.
Psi = (n_avg - 1) / n_avg * Lambda_b_diagonal / Lambda_w_diagonal
Psi -= 1 / n_avg
Psi[Psi <= 0] = 0
return np.diag(Psi)
def calc_scatter_matrices(X, Y):
""" See Equations (1) on p.532 of Ioffe 2006. """
assert len(X.shape) == 2
assert X.shape[0] == len(Y)
unique_labels = np.unique(Y)
labels = np.asarray(Y)
m = calc_m(X)
N = X.shape[0]
cov_ks = []
m_ks = []
n_ks = []
for k in unique_labels:
bool_idxs = labels == k
X_k = X[bool_idxs]
m_ks.append(X_k.mean(axis=0))
n_ks.append(bool_idxs.sum())
cov_ks.append(np.cov(X_k.T))
n_ks = np.asarray(n_ks)
m_ks = np.asarray(m_ks)
m_ks_minus_m = m_ks - m
S_b = np.matmul(m_ks_minus_m.T * (n_ks / N), m_ks_minus_m)
S_w = np.asarray(cov_ks) * ((n_ks - 1) / N)[:, None, None]
S_w = np.sum(S_w, axis=0)
return S_b, S_w
def calc_W(S_b, S_w):
""" See Fig. 2 on p.537 of Ioffe 2006. """
eigenvalues, eigenvectors = eigh(S_b, S_w)
return eigenvectors
def get_posterior_params(U_model, Y, prior_params):
labels = np.asarray(Y)
prior_cov_diagonal = prior_params['cov_diag']
cov_diags = []
means = []
categories = []
for k in np.unique(labels):
bool_idxs = labels == k
U_model_k = U_model[bool_idxs]
n_k = bool_idxs.sum()
cov_diag = prior_cov_diagonal / (1 + n_k * prior_cov_diagonal)
mean = U_model_k.sum(axis=0) * cov_diag
cov_diags.append(cov_diag)
means.append(mean)
categories.append(k)
return as_dictionary_of_dictionaries(categories, means, cov_diags)
def get_posterior_predictive_params(posterior_params):
""" Likelihood covariance matrix is an Identity matrix. """
pp_params = posterior_params.copy()
for k, k_params in pp_params.items():
k_params['cov_diag'] += 1
return pp_params
def get_prior_params(Psi, dims):
""" See Equation (2) on p.533 of Ioffe 2006. """
cov_diag = Psi.diagonal()[dims]
mean = np.zeros(dims.shape)
return {'mean': mean, 'cov_diag': cov_diag}
def get_relevant_U_dims(Psi):
""" See Fig. 2 on p.537 of Ioffe 2006. """
relevant_dims = np.squeeze(np.argwhere(Psi.diagonal() != 0))
if relevant_dims.shape == ():
relevant_dims = relevant_dims.reshape(1,)
return relevant_dims
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/plda/plda/__init__.py | training/helpers/plda/plda/__init__.py | # Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .classifier import Classifier
from .model import Model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/plda/plda/classifier.py | training/helpers/plda/plda/classifier.py | # Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from scipy.special import logsumexp
from .model import Model
class Classifier:
def __init__(self):
self.model = None
def fit_model(self, X, Y, n_principal_components=None):
self.model = Model(X, Y, n_principal_components)
def predict(self, data, space='D', normalize_logps=False):
""" Classifies data into categories present in the training data.
DESCRIPTION
Predictions are the MAP estimates,
i.e. categories with the highest probabilities,
following the procedure described in the first sentence
on p.535 of Ioffe 2006.
See the `calc_logp_pp_categories()` method for documentation on the
actual equations.
ARGUMENT
data (numpy.ndarray), shape=(..., data_dimensionality)
- Data must follow statistics convention (row-wise).
- The last dimension of the array corresponds to data dimension.
- The dimensionality of the data depends on the space (see below).
PARAMETERS
space (str)
- Must be either 'D', 'X', 'U', or 'U_model',
where 'D' is the data space,
'X' is the preprocessed space,
'U' is the latent space, and
'U_model' is the subspace of 'U' the model works in:
'D' <---> 'X' <---> 'U' <---> 'U_model'.
- See `transform()` method in model.py
for details on the relationship between spaces.
normalize_logps (bool)
- Whether or not to normalize
the posterior predictive probabilities before returning them.
RETURNS
predictions (numpy.ndarray), shape=data.shape[:-1]
logps (numpy.ndarray), shape=(*data.shape[:-1], n_categories)
- Log posterior predictive probabilities for each category,
if normalize_logps = False.
- Log classification probabilities for each category,
if normalize_logps = True.
These are just the normalized posterior predictive
probabilities, aka model certainties.
"""
if space != 'U_model':
data = self.model.transform(data,
from_space=space, to_space='U_model')
logpps_k, K = self.calc_logp_pp_categories(data, normalize_logps)
predictions = K[np.argmax(logpps_k, axis=-1)]
return predictions, logpps_k
def calc_logp_pp_categories(self, data, normalize_logps):
""" Computes log posterior predictive probabilities for each category.
DESCRIPTION
The posterior predictive comes from p.535 of Ioffe 2006.
The classification procedure is described in the first sentence
of p.535,
which clearly implies the prior on categories to be uniform.
LATEX EQUATIONS
Normalized posterior predictive (classification certainty):
```
\begin{align}
p(y_* = T \mid \mathbf{u}_*)
&= \frac{
p(\mathbf{u}_*
\vert
\mathbf{u}_1^T,
\mathbf{u}_2^T,
\dots,
\mathbf{u}_n^T)
}{\sum\limits_{k \in K}
p(\mathbf{u}_*
\vert
\mathbf{u}_1^k,
\mathbf{u}_2^k,
\dots,
\mathbf{u}_n^k)
},
\end{align}
```
Posterior predictive
```
\begin{align}
p(\mathbf{u}_*
\vert
\mathbf{u}_1^k,
\mathbf{u}_2^k,
\dots,
\mathbf{u}_n^k)
&= \int \dots \int
p(\mathbf{u}_* \vert \mathbf{v})
p(\mathbf{v} \vert
\mathbf{u}_1^k,
\mathbf{u}_2^k,
\dots,
\mathbf{u}_n^k)
d\mathbf{v} \\
&= \mathcal{N}
\left(
\mathbf{u}_*
\mid
\frac{
n \mathbf{\Psi}
}{n \mathbf{\Psi} + \mathbf{I}
}
\mathbf{\bar{u}}^k,
\mathbf{I}
+ \frac{\mathbf{\Psi}
}{n \mathbf{\Psi} + \mathbf{I}
}
\right)
\end{align}
```
ARGUMENT
See documentation for the `predict()` method.
PARAMTER
See documentation for the `predict()` method.
"""
assert type(normalize_logps) == bool
logpps_by_category = []
K = self.get_categories()
for k in K:
logpps_k = self.model.calc_logp_posterior_predictive(data, k)
logpps_by_category.append(logpps_k)
logpps_by_category = np.stack(logpps_by_category, axis=-1)
if normalize_logps:
norms = logsumexp(logpps_by_category, axis=-1)
logps = logpps_by_category - norms[..., None]
else:
logps = logpps_by_category
return logps, np.asarray(K)
def get_categories(self):
return [k for k in self.model.posterior_params.keys()]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/setup.py | training/helpers/keras_compressor/setup.py | from setuptools import find_packages, setup
setup(
name='keras_compressor',
version='0.0.1',
packages=find_packages(
exclude=['example'],
),
url='',
license='Apache License v2',
author='Kosuke Kusano',
author_email='kosuke_kusano@dwango.co.jp',
description='',
install_requires=[
'numpy',
'h5py',
'keras>=2.0.0',
'scipy',
'scikit-learn',
],
scripts=['bin/keras-compressor.py'],
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/bin/keras-compressor.py | training/helpers/keras_compressor/bin/keras-compressor.py | #!/usr/bin/env python
import argparse
import logging
import keras
import keras.backend as K
import numpy
from keras.models import load_model
from keras_compressor.compressor import compress
def count_total_params(model):
"""Counts the number of parameters in a model
See:
https://github.com/fchollet/keras/blob/172397ebf45d58ba256c10004c6fce8b40df286b/keras/utils/layer_utils.py#L114-L117
:param model: Keras model instance
:return: trainable_count, non_trainable_count
:rtype: tuple of int
"""
trainable_count = int(
numpy.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
numpy.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
return trainable_count, non_trainable_count
def gen_argparser():
parser = argparse.ArgumentParser(description='compress keras model')
parser.add_argument('model', type=str, metavar='model.h5',
help='target model, whose loss is specified by `model.compile()`.')
parser.add_argument('compressed', type=str, metavar='compressed.h5',
help='compressed model path')
parser.add_argument('--error', type=float, default=0.1, metavar='0.1',
help='layer-wise acceptable error. '
'If this value is larger, compressed model will be '
'less accurate and achieve better compression rate. '
'Default: 0.1')
parser.add_argument('--log-level', type=str, default='INFO',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'],
help='log level. Default: INFO')
return parser
def main():
parser = gen_argparser()
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level))
model = load_model(args.model) # type: keras.models.Model
total_params_before = sum(count_total_params(model))
model = compress(model, acceptable_error=args.error)
total_params_after = sum(count_total_params(model))
model.save(args.compressed)
print('\n'.join((
'Compressed model',
' before #params {:>20,d}',
' after #params {:>20,d} ({:.2%})',
)).format(
total_params_before, total_params_after, 1 - float(total_params_after) / total_params_before,
))
if __name__ == '__main__':
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/cifar10/train.py | training/helpers/keras_compressor/example/cifar10/train.py | from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.datasets import cifar10
from keras.layers import BatchNormalization, Conv2D, Dense, Dropout, Flatten, Input, MaxPool2D
from keras.models import Model
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
def preprocess(X):
return X.astype('float32') / 255 * 2 - 1
class_num = 10
batch_size = 128
epochs = 300
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols, channels = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], channels, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], channels, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channels)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channels)
def gen_model():
# refer to http://torch.ch/blog/2015/07/30/cifar.html
img_input = Input(shape=(img_rows, img_cols, channels))
h = img_input
h = Conv2D(64, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.3)(h)
h = Conv2D(64, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = MaxPool2D((2, 2), strides=(2, 2))(h)
h = Conv2D(128, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(128, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = MaxPool2D((2, 2), strides=(2, 2))(h)
h = Conv2D(256, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(256, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(256, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = MaxPool2D((2, 2), strides=(2, 2))(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = MaxPool2D((2, 2), strides=(2, 2))(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = Dropout(0.4)(h)
h = Conv2D(512, (3, 3), activation='relu', padding='same')(h)
h = BatchNormalization()(h)
h = MaxPool2D((2, 2), strides=(2, 2))(h)
h = Flatten()(h)
h = Dropout(0.5)(h)
h = Dense(512, activation='relu')(h)
h = BatchNormalization()(h)
h = Dropout(0.5)(h)
h = Dense(class_num, activation='softmax')(h)
model = Model(img_input, h)
return model
model = gen_model()
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
datagen = ImageDataGenerator(
zoom_range=0.05,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
)
datagen.fit(X_train)
model.fit_generator(
datagen.flow(X_train, y_train, batch_size=batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
callbacks=[
EarlyStopping(patience=20),
],
)
score = model.evaluate(X_test, y_test)
print('test accuracy: ', score[1])
# re-compile model
# not to save optimizer variables in model data
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.save('model_raw.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/cifar10/compress.py | training/helpers/keras_compressor/example/cifar10/compress.py | import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
model = load_model('./model_raw.h5')
model = compress(model, 3e-1)
model.save('./model_compressed.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/cifar10/finetune.py | training/helpers/keras_compressor/example/cifar10/finetune.py | import keras.backend as K
import keras.callbacks as C
from keras.datasets import cifar10
from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras_compressor import custom_objects
def preprocess(X):
return X.astype('float32') / 255 * 2 - 1
class_num = 10
batch_size = 128
epochs = 12
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols, channel = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], channel, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], channel, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channel)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channel)
model = load_model('model_compressed.h5', custom_objects)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.fit(
X_train, y_train,
batch_size=128,
epochs=epochs,
validation_data=(X_test, y_test),
callbacks=[
C.EarlyStopping(patience=20),
],
)
score = model.evaluate(X_test, y_test)
print('test accuracy: ', score[1])
# re-compile model
# not to save optimizer variables in model data
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.save('model_finetuned.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/cifar10/evaluate.py | training/helpers/keras_compressor/example/cifar10/evaluate.py | import sys
from keras import backend as K
from keras.datasets import cifar10
from keras.models import load_model
from keras.utils import to_categorical
from keras_compressor import custom_objects
def preprocess(X):
return X.astype('float32') / 255 * 2 - 1
def usage():
print('{} model.h5'.format(sys.argv[0]))
def load_cifar10():
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols, channel = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], channel, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], channel, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, channel)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, channel)
return (X_train, y_train), (X_test, y_test)
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
model_path = sys.argv[1]
_, (X_test, y_test) = load_cifar10()
model = load_model(model_path, custom_objects)
result = model.evaluate(X_test, y_test, verbose=0)
model.summary()
print(result)
if __name__ == '__main__':
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/mnist/train.py | training/helpers/keras_compressor/example/mnist/train.py | from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Dropout, Flatten, Input, MaxPool2D
from keras.models import Model
from keras.utils.np_utils import to_categorical
def preprocess(X):
return X.astype('float32') / 255
class_num = 10
batch_size = 128
epochs = 100
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
def gen_model():
# from keras mnist tutorial
img_input = Input(shape=(img_rows, img_cols, 1))
h = img_input
h = Conv2D(32, (3, 3), activation='relu')(h)
h = Dropout(0.25)(h)
h = Conv2D(64, (3, 3), activation='relu')(h)
h = MaxPool2D((2, 2))(h)
h = Dropout(0.25)(h)
h = Flatten()(h)
h = Dense(128, activation='relu')(h)
h = Dropout(0.5)(h)
h = Dense(class_num, activation='softmax')(h)
model = Model(img_input, h)
return model
model = gen_model()
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.fit(
X_train, y_train,
batch_size=128,
epochs=epochs,
validation_data=(X_test, y_test),
callbacks=[
EarlyStopping(patience=3),
],
)
score = model.evaluate(X_test, y_test)
print('test accuracy: ', score[1])
# re-compile model
# not to save optimizer variables in model data
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.save('model_raw.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/mnist/compress.py | training/helpers/keras_compressor/example/mnist/compress.py | import logging
from keras.models import load_model
from keras_compressor.compressor import compress
logging.basicConfig(
level=logging.INFO,
)
model = load_model('./model_raw.h5')
model = compress(model, 7e-1)
model.save('./model_compressed.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/mnist/finetune.py | training/helpers/keras_compressor/example/mnist/finetune.py | from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.datasets import mnist
from keras.models import load_model
from keras.utils.np_utils import to_categorical
from keras_compressor.layers import custom_layers
def preprocess(X):
return X.astype('float32') / 255
class_num = 10
batch_size = 128
epochs = 100
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
model = load_model('model_compressed.h5', custom_layers)
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.fit(
X_train, y_train,
batch_size=128,
epochs=epochs,
validation_data=(X_test, y_test),
callbacks=[
EarlyStopping(patience=3),
],
)
score = model.evaluate(X_test, y_test)
print('test accuracy: ', score[1])
# re-compile model
# not to save optimizer variables in model data
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.save('model_finetuned.h5')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/example/mnist/evaluate.py | training/helpers/keras_compressor/example/mnist/evaluate.py | import sys
from keras import backend as K
from keras.datasets import mnist
from keras.models import load_model
from keras.utils import to_categorical
from keras_compressor import custom_objects
def preprocess(X):
return X.astype('float32') / 255
def usage():
print('{} model.h5'.format(sys.argv[0]))
def load_mnist():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_test = preprocess(X_train), preprocess(X_test)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
_, img_rows, img_cols = X_train.shape
if K.image_data_format() == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
return (X_train, y_train), (X_test, y_test)
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
model_path = sys.argv[1]
_, (X_test, y_test) = load_mnist()
model = load_model(model_path, custom_objects)
result = model.evaluate(X_test, y_test, verbose=0)
model.summary()
print(result)
if __name__ == '__main__':
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/utils.py | training/helpers/keras_compressor/keras_compressor/utils.py | from typing import Any, Dict, List
from keras.engine.topology import Layer, Node
def swap_layer_connection(old_layer: Layer, new_layer: Layer) -> None:
'''connect nodes of calc graph for new_layer and disconnect ones for old_layers
Keras manages calculation graph by nodes which hold connection between
layres. To swap old layer and new layer, it is required to delete nodes
of old layer and to create new nodes of new layer.
:arg old_layer: Old layer. The connection to/from this layer will be removed.
:arg new_layer: New layer. The connection to/from old layer will be connected to/from
this layer.
:return: None
'''
# the set of inbound layer which have old outbound_node
inbound_layers = set()
# create new inbound nodes
for node in old_layer._inbound_nodes: # type: Node
Node(
new_layer, node.inbound_layers,
node.node_indices, node.tensor_indices,
node.input_tensors, node.output_tensors,
node.input_masks, node.output_masks,
node.input_shapes, node.output_shapes,
)
inbound_layers.union(set(node.inbound_layers))
# remove old outbound node of inbound layers
for layer in inbound_layers: # type: Layer
old_nodes = filter(
lambda n: n.outbound_layer == old_layer,
layer._outbound_nodes,
)
for n in old_nodes: # type: Node
layer._outbound_nodes.remove(n)
# the set of outbound layer which have old inbound_nodes
outbound_layers = set()
# create new outbound nodes
for node in old_layer._outbound_nodes: # type: Node
layers = list(node.inbound_layers)
while old_layer in layers:
idx = layers.index(old_layer)
layers[idx] = new_layer
Node(
node.outbound_layer, layers,
node.node_indices, node.tensor_indices,
node.input_tensors, node.output_tensors,
node.input_masks, node.output_masks,
node.input_shapes, node.output_shapes,
)
outbound_layers.add(node.outbound_layer)
# remove old inbound_node of outbound layers
for layer in outbound_layers: # type: Layer
old_nodes = filter(
lambda n: old_layer in n.inbound_layers,
layer._inbound_nodes,
)
for n in old_nodes:
layer._inbound_nodes.remove(n)
def convert_config(
base_config: Dict[str, Any],
ignore_args: List[str],
converts: Dict[str, List[str]],
new_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
'''convert old layer's config to new layer's config.
:param base_config: Base config. Generally a config of old layer.
:param ignore_args: Ignore arg names. Not required arg names in new layer,
though them is required in old layer.
:param converts: Ignore name conversion dictionary, whose key is old layer's
arg name in base_config, and whose value is new layer's arg names(list).
:param new_kwargs: The new kwargs.
:return: Converted config.
'''
kwargs = {}
for k, v in base_config.items():
if k in ignore_args:
continue
elif k in converts:
for new_k in converts[k]:
kwargs[new_k] = v
else:
kwargs[k] = v
kwargs.update(new_kwargs)
return kwargs
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/__init__.py | training/helpers/keras_compressor/keras_compressor/__init__.py | from .layers import custom_layers
__all__ = ['custom_objects']
custom_objects = dict(custom_layers.items()) # shallow copy
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/compressor.py | training/helpers/keras_compressor/keras_compressor/compressor.py | import logging
from collections import defaultdict
from typing import Dict, List, Type
from keras.engine import Layer, Model
from .factorizer import Factorizer
from .factorizers.svd import SVDFactorizer
from .factorizers.tucker import TuckerFactorizer
from .utils import swap_layer_connection
logger = logging.getLogger(__name__)
def compress(model: Model, acceptable_error: float,
factorizers=None) -> Model:
"""compress model under acceptable error
compress each model's layer by using given factorizers.
If the factorizer compress the layer, swap the layer and compressed layer by re-creating
node on computational graph.
:param model: Target model
:param acceptable_error: Layer-wize acceptable output error. If this value is smaller
the compressed model will be more accurate. The calculation process of this error is
depend on each factorizer. So see the implementation.
:param factorizers: Applicable factorizers. Factorizer factorize each layer if factorizer
can factorize the layer.
:return: Compressed model
"""
if factorizers is None:
factorizers = [SVDFactorizer, TuckerFactorizer]
layer2factorizers = defaultdict(list) # type: Dict[Type[Layer], List[Type[Factorizer]]]
for fact in factorizers:
for layer in fact.factorize_target_layers:
layer2factorizers[layer].append(fact)
for layer_idx, layer in enumerate(model.layers):
layer_class = type(layer)
if layer_class not in layer2factorizers:
logger.info(
'factorizer not found layer:{!r}'.format(layer)
)
continue
new_layer = None
for factorizer in layer2factorizers[layer_class]: # type: Factorizer
logger.info(
'factorizer found layer:{!r} factorizer:{!r}'.format(
layer, factorizer,
)
)
new_layer = factorizer.compress(layer, acceptable_error)
if new_layer is None: # failed factorization
logger.info(
'factorization failed layer:{!r} factorizer:{!r}'.format(
layer, factorizer,
)
)
continue
else: # succeeded factorization
break
if new_layer is not None:
logger.info(
'swap old/new layer old_layer:{!r} new_layer{!r}'.format(
layer, new_layer,
)
)
swap_layer_connection(layer, new_layer)
model.layers[layer_idx] = new_layer
new_model = Model(model.inputs, model.outputs)
new_model.compile(
optimizer=model.optimizer.__class__.__name__, # TODO: improve here
# model.optimizer is instance of Optimizer and hold some variables for target model.
# Optimizer must be re-initialized, because compress function changes model structure.
loss=model.loss,
metrics=model.metrics,
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
)
return new_model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/factorizer.py | training/helpers/keras_compressor/keras_compressor/factorizer.py | from typing import List, Optional, Type
from keras.layers import Layer
class Factorizer:
factorize_target_layers = [] # type: List[Type[Layer]]
@classmethod
def compress(cls, layer: Layer, acceptable_error: float) -> Optional[Layer]:
"""try to compress the layer under acceptable_error.
Outputs compressed layer if compression succeeded. If not, return None.
:param layer:
:param acceptable_error:
:return:
"""
raise NotImplementedError
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/layers.py | training/helpers/keras_compressor/keras_compressor/layers.py | from keras import backend as K
from keras import activations, constraints, initializers, regularizers
from keras.engine import InputSpec, Layer
from keras.layers import Dense
from keras.utils import conv_utils
class FactorizedDense(Layer):
"""Just your regular densely-connected NN layer.
This layer based on `keras.layers.core.Dense` and behave like it.
`FactorizedDense` implements the operation:
`output = activation(dot(dot(input, pre_kernel), post_kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `pre_kernel` and `post_kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `pre_kernel`.
# Arguments
units: Positive integer, dimensionality of the output space.
components: Positive integer or None, the size of internal components.
If given None, the output is calculated as the same manner as `Dense` layer.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
pre_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
post_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
pre_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
post_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
kernel_constraint: Constraint function applied to
the `kernel` weights matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
# Output shape
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
target_layer_types = [Dense]
def __init__(self, units, components,
activation=None,
use_bias=True,
pre_kernel_initializer='glorot_uniform',
post_kernel_initializer='glorot_uniform',
bias_initializer='zeros',
pre_kernel_regularizer=None,
post_kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
pre_kernel_constraint=None,
post_kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(FactorizedDense, self).__init__(**kwargs)
self.units = units
self.components = components
self.activation = activations.get(activation)
self.use_bias = use_bias
self.pre_kernel_initializer = initializers.get(pre_kernel_initializer)
self.post_kernel_initializer = initializers.get(post_kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.pre_kernel_regularizer = regularizers.get(pre_kernel_regularizer)
self.post_kernel_regularizer = regularizers.get(post_kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.pre_kernel_constraint = constraints.get(pre_kernel_constraint)
self.post_kernel_constraint = constraints.get(post_kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
is_factorized = self.components is not None
if is_factorized:
shape = (input_dim, self.components)
else:
shape = (input_dim, self.units)
self.pre_kernel = self.add_weight(shape,
initializer=self.pre_kernel_initializer,
name='pre_kernel',
regularizer=self.pre_kernel_regularizer,
constraint=self.pre_kernel_constraint)
if not is_factorized:
self.post_kernel = None
else:
self.post_kernel = self.add_weight((self.components, self.units),
initializer=self.post_kernel_initializer,
name='kernel',
regularizer=self.post_kernel_regularizer,
constraint=self.post_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
def call(self, inputs):
h = K.dot(inputs, self.pre_kernel)
if self.post_kernel is not None:
h = K.dot(h, self.post_kernel)
if self.use_bias:
h = K.bias_add(h, self.bias)
if self.activation is not None:
h = self.activation(h)
return h
def compute_output_shape(self, input_shape):
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'components': self.components,
'use_bias': self.use_bias,
'pre_kernel_initializer': initializers.serialize(self.pre_kernel_initializer),
'post_kernel_initializer': initializers.serialize(self.post_kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'pre_kernel_regularizer': regularizers.serialize(self.pre_kernel_regularizer),
'post_kernel_regularizer': regularizers.serialize(self.post_kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'pre_kernel_constraint': constraints.serialize(self.pre_kernel_constraint),
'post_kernel_constraint': constraints.serialize(self.post_kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(FactorizedDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FactorizedConv2DTucker(Layer):
"""2D convolution layer with tucker decomposition.
This layer is based on `keras.layers.convolution.Conv2D` and behave like it.
The difference is the kernel is factorized by tucker decomposition.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`, it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
# Arguments
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
input_components: Integer or None, the number of components
of kernel for the input channel axis. If given None, the
factorization of input side is skipped.
output_components: Integer or None, the number of components
of kernel for the output channel axis. If given None, the
factorization of output side is skipped.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, width, height, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, width, height)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use
(see [activations](../activations.md)).
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
pre_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
post_kernel_initializer: Initializer for the `kernel` weights matrix
(see [initializers](../initializers.md)).
bias_initializer: Initializer for the bias vector
(see [initializers](../initializers.md)).
pre_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
post_kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix
(see [regularizer](../regularizers.md)).
bias_regularizer: Regularizer function applied to the bias vector
(see [regularizer](../regularizers.md)).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
(see [regularizer](../regularizers.md)).
pre_kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
post_kernel_constraint: Constraint function applied to the kernel matrix
(see [constraints](../constraints.md)).
bias_constraint: Constraint function applied to the bias vector
(see [constraints](../constraints.md)).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
input_components=None,
output_components=None,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
pre_kernel_initializer='glorot_uniform',
kernel_initializer='glorot_uniform',
post_kernel_initializer='glorot_uniform',
bias_initializer='zeros',
pre_kernel_regularizer=None,
kernel_regularizer=None,
post_kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
pre_kernel_constraint=None,
kernel_constraint=None,
post_kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(FactorizedConv2DTucker, self).__init__(**kwargs)
rank = 2
self.rank = rank
self.input_components = input_components
self.output_components = output_components
self.filters = filters
self.output_components = output_components
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = K.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.pre_kernel_initializer = initializers.get(pre_kernel_initializer)
self.kernel_initializer = initializers.get(kernel_initializer)
self.post_kernel_initializer = initializers.get(post_kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.pre_kernel_regularizer = regularizers.get(pre_kernel_regularizer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.post_kernel_regularizer = regularizers.get(post_kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.pre_kernel_constraint = constraints.get(pre_kernel_constraint)
self.kernel_constraint = constraints.get(kernel_constraint)
self.post_kernel_constraint = constraints.get(post_kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=rank + 2) # batch, H, W, C
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
if self.input_components is None:
input_components = input_dim
else:
input_components = self.input_components
if self.output_components is None:
output_components = self.filters
else:
output_components = self.output_components
kernel_shape = self.kernel_size + (input_components, output_components)
if self.input_components is None:
self.pre_kernel = None
else:
pre_kernel_shape = (1, 1) + (input_dim, self.input_components)
self.pre_kernel = self.add_weight(pre_kernel_shape,
initializer=self.pre_kernel_initializer,
name='pre_kernel',
regularizer=self.pre_kernel_regularizer,
constraint=self.pre_kernel_constraint)
self.kernel = self.add_weight(kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.output_components is None:
self.post_kernel = None
else:
post_kernel_shape = (1, 1) + (self.output_components, self.filters)
self.post_kernel = self.add_weight(post_kernel_shape,
initializer=self.post_kernel_initializer,
name='post_kernel',
regularizer=self.post_kernel_regularizer,
constraint=self.post_kernel_constraint)
if self.use_bias:
self.bias = self.add_weight((self.filters,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.input_spec = InputSpec(ndim=self.rank + 2,
axes={channel_axis: input_dim})
self.built = True
def call(self, inputs):
h = inputs
if self.pre_kernel is not None:
h = K.conv2d(
h,
self.pre_kernel,
strides=(1, 1),
padding='valid',
data_format=self.data_format,
dilation_rate=(1, 1),
)
h = K.conv2d(
h,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if self.post_kernel is not None:
h = K.conv2d(
h,
self.post_kernel,
strides=(1, 1),
padding='valid',
data_format=self.data_format,
dilation_rate=(1, 1),
)
outputs = h
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'input_components': self.input_components,
'output_components': self.output_components,
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'pre_kernel_initializer': initializers.serialize(self.pre_kernel_initializer),
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'post_kernel_initializer': initializers.serialize(self.post_kernel_initializer),
'bias_initializer': initializers.serialize(self.kernel_initializer),
'pre_kernel_regularizer': regularizers.serialize(self.pre_kernel_regularizer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'post_kernel_regularizer': regularizers.serialize(self.post_kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'pre_kernel_constraint': constraints.serialize(self.pre_kernel_constraint),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'post_kernel_constraint': constraints.serialize(self.post_kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(FactorizedConv2DTucker, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
custom_layers = {
'FactorizedConv2DTucker': FactorizedConv2DTucker,
'FactorizedDense': FactorizedDense,
}
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/factorizers/tucker.py | training/helpers/keras_compressor/keras_compressor/factorizers/tucker.py | import itertools
import logging
import math
from queue import PriorityQueue
from typing import Optional, Tuple
import numpy as np
from keras import backend as K
from keras.layers import Conv2D
from keras_compressor.factorizer import Factorizer
from keras_compressor.layers import FactorizedConv2DTucker
from keras_compressor.utils import convert_config
from sklearn.utils.extmath import randomized_svd
logger = logging.getLogger(__name__)
__all__ = ['TuckerFactorizer']
class ProblemData:
'''Parameter search problem data structure
'''
def __init__(self, x_range: range, y_range: range):
self.x_range = x_range
self.y_range = y_range
def __str__(self):
return '<Problem x_range={} y_range={}>'.format(
self.x_range, self.y_range,
)
def __lt__(self, other: 'ProblemData'):
return self.diag_length < other.diag_length
def __eq__(self, other: 'ProblemData'):
return self.x_range == other.x_range and self.y_range == other.y_range
@property
def diag_length(self):
return math.sqrt(len(self.x_range) ** 2 + len(self.y_range) ** 2)
class Tucker:
'''Pure tucker decomposition functions
'''
@classmethod
def factorize(cls, W: np.ndarray, in_comps: Optional[int], out_comps: Optional[int]) \
-> Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray]]:
"""pure tucker decomposition
:param W: W x H x I x O
:param in_comps: N
:param out_comps: M
:return:
C: W x H x N x M,
U_in: I x N
U_out: O x M
"""
if in_comps is None:
U_in = None
else:
U_in, _, _ = randomized_svd(cls._flatten(W, 2), in_comps)
U_in = U_in.astype(W.dtype)
if out_comps is None:
U_out = None
else:
U_out, _, _ = randomized_svd(cls._flatten(W, 3), out_comps)
U_out = U_out.astype(W.dtype)
C = W.copy()
if U_in is not None:
C = np.einsum('whio,in->whno', C, U_in)
if U_out is not None:
C = np.einsum('whno,om->whnm', C, U_out)
C = C.astype(W.dtype)
return C, U_in, U_out
@staticmethod
def _get_matrix(W: np.ndarray, i: int, axis: int) -> np.ndarray:
'''util function for tucker decomposition
:param W:
:param i:
:param axis:
:return:
'''
sli = [slice(None) for _ in range(W.ndim)]
sli[axis] = i
return W[sli]
@classmethod
def _flatten(cls, W: np.ndarray, axis: int) -> np.ndarray:
'''util function for tucker decomposition
:param W:
:param axis:
:return:
'''
dim = 1
dims = []
for i, v in enumerate(W.shape):
if i != axis:
dim *= v
dims.append(v)
res = np.zeros((W.shape[axis], dim))
for i in range(W.shape[axis]):
res[i] = cls._get_matrix(W, i, axis).ravel()
return res
class TuckerParamSearcher:
'''tucker decomposition parameter searcher
'''
def __init__(self, W: np.ndarray):
width, height, in_dim, out_dim = W.shape
self.width = width
self.height = height
self.in_dim = in_dim
self.out_dim = out_dim
self.W = W
self.best_point = None
self.best_param_num = width * height * in_dim * out_dim
self.prob_queue = PriorityQueue()
def add_problem(self, prob: ProblemData):
param_num = self.calc_min_param_num_by(prob)
self.prob_queue.put((param_num, prob))
def calc_min_param_num_by(self, prob: ProblemData):
res = []
for in_comp, out_comp in itertools.product(
[prob.x_range.start, prob.x_range.stop],
[prob.y_range.start, prob.y_range.stop],
):
res.append(self.calc_param_num(in_comp, out_comp))
return min(res)
def calc_param_num(self, in_comp: int, out_comp: int):
params = self.width * self.height * in_comp * out_comp
if in_comp != self.in_dim: # compression in input channel
params += self.in_dim * in_comp
if out_comp != self.out_dim: # compression in output channel
params += self.out_dim * out_comp
return params
def update_best_point_if_needed(self, in_comp, out_comp):
current_param_num = self.calc_param_num(in_comp, out_comp)
if current_param_num < self.best_param_num:
self.best_point = (in_comp, out_comp)
self.best_param_num = current_param_num
logger.debug('update best_point={} best_param_num={}'.format(
self.best_point, self.best_param_num,
))
def factorize_in_acceptable_error(self, acceptable_error: float) \
-> Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray]]:
"""
:param W: W x H x I x O
:param acceptable_error:
:param initial_problems: initial problems
:return:
C: W x H x N x M
U_in: I x N
U_out: O x M
"""
# Search N and M whose the number of parameter is the smallest
# under acceptable error.
# This algorithm is based on divide and conquer algorithm and
# based on the assumption that `error` monotonically decrease by increasing N or M.
width, height, in_dim, out_dim = self.width, self.height, self.in_dim, self.out_dim
self.add_problem(ProblemData(range(in_dim, in_dim), range(out_dim, out_dim)))
if 2 <= in_dim and 2 <= out_dim:
self.add_problem(ProblemData(range(1, in_dim - 1), range(1, out_dim - 1)))
if 1 <= in_dim:
self.add_problem(ProblemData(range(in_dim, in_dim), range(1, out_dim - 1)))
if 1 <= out_dim:
self.add_problem(ProblemData(range(1, in_dim - 1), range(out_dim, out_dim)))
while not self.prob_queue.empty():
_, current_prob = self.prob_queue.get() # type: ProblemData
if self.best_param_num < self.calc_min_param_num_by(current_prob):
logger.debug('no more best_param_num:{} prob:{}'.format(self.best_param_num, current_prob))
break
if len(current_prob.x_range) == 0 and len(current_prob.y_range) == 0:
self.update_best_point_if_needed(current_prob.x_range.start, current_prob.y_range.start)
continue
logger.debug('current queue.size:{} prob:{}'.format(
self.prob_queue.qsize(), current_prob,
))
result = self._find_edge_point(
acceptable_error, current_prob,
) # type: Optional[Tuple[int,int]]
logger.debug('result={} prob={}'.format(
result, current_prob
))
if result is None:
continue
x, y = result # type: Tuple[int, int]
self.update_best_point_if_needed(x, y)
# divide current problem to sub-problems
if len(current_prob.x_range) == 0 or len(current_prob.y_range) == 0:
# X.
# Y
logger.debug('no sub-problem:{}'.format(current_prob))
continue
if len(current_prob.x_range) == 1 and len(current_prob.y_range) == 1:
# X# -> (| and _) or .
# Y
if x == current_prob.x_range.stop and y == current_prob.y_range.stop:
# right top point
# _
# X# -> X and |
# Y Y Y
sub_prob1 = ProblemData(
x_range=range(current_prob.x_range.start, x),
y_range=range(y, y),
)
sub_prob2 = ProblemData(
x_range=range(x, x),
y_range=range(current_prob.y_range.start, y),
)
self.add_problem(sub_prob1)
self.add_problem(sub_prob2)
logger.debug('two sub-problems:{}, (x,y)=({},{}) -> {},{}'.format(
current_prob, x, y,
sub_prob1, sub_prob2
))
continue
else: # x == current_prob.x_range.start and y == current_prob.y_range.start
logger.debug('no sub-problem:{}'.format(current_prob))
continue
if len(current_prob.x_range) == 1 and len(current_prob.y_range) > 1:
# X####### -> X |
# Y Y
sub_prob = ProblemData(
x_range=current_prob.x_range,
y_range=range(y, y)
)
logger.debug('one row space, one sub-problem:{}, (x,y)=({},{}) -> {}'.format(
current_prob, x, y,
sub_prob,
))
continue
if len(current_prob.x_range) > 1 and len(current_prob.y_range) == 1:
# # _
# X# -> X
# # Y
# Y
sub_prob = ProblemData(
x_range=range(x, x),
y_range=current_prob.y_range,
)
logger.debug('one column space, one sub-problem:{}, (x,y)=({},{}) -> {}'.format(
current_prob, x, y,
sub_prob,
))
if len(current_prob.x_range) >= 2 and len(current_prob.y_range) >= 2:
# ### ##
# X### -> X## and X
# ### #
# Y Y Y
sub_prob1 = ProblemData(
x_range=range(current_prob.x_range.start, x),
y_range=range(y, current_prob.y_range.stop),
)
sub_prob2 = ProblemData(
x_range=range(x, current_prob.x_range.stop),
y_range=range(current_prob.y_range.start, y),
)
self.add_problem(sub_prob1)
self.add_problem(sub_prob2)
logger.debug('two sub-problems:{}, (x,y)=({},{}) -> {},{}'.format(
current_prob, x, y,
sub_prob1, sub_prob2
))
continue
if self.best_point is None:
logger.debug('no factorization is best')
return self.W, None, None
in_comp, out_comp = self.best_point
if in_comp >= self.in_dim:
in_comp = None
if out_comp >= self.out_dim:
out_comp = None
C, U_in, U_out = Tucker.factorize(self.W, in_comp, out_comp)
return C, U_in, U_out
def _find_edge_point(self, acceptable_error: float, current_prob: ProblemData) -> Optional[Tuple[int, int]]:
x_range = current_prob.x_range
y_range = current_prob.y_range
acceptable_points = []
# consider that acceptable point doesn't exist in the current_prob space.
while len(x_range) > 0 or len(y_range) > 0:
if len(x_range) in [0, 1]:
x = x_range.start
else:
x = round((x_range.start + x_range.stop) / 2)
if len(y_range) in [0, 1]:
y = y_range.start
else:
y = round((y_range.start + y_range.stop) / 2)
logger.debug('binary search (x,y)=({}, {}) x_range={} y_range={} prob={}'.format(
x, y, x_range, y_range, current_prob
))
C, U_in, U_out = Tucker.factorize(self.W, x, y)
error = self.calc_error(self.W, C, U_in, U_out)
if error < acceptable_error:
logger.debug('binary search: under threshold={} error={}'.format(
acceptable_error, error,
))
acceptable_points.append((x, y))
# update ranges
x_range = range(x_range.start, x)
y_range = range(y_range.start, y)
else:
logger.debug('binary search: over threshold={} error={}'.format(
acceptable_error, error,
))
# update ranges
if x + 1 <= x_range.stop:
new_x_start = x + 1
else:
new_x_start = x
x_range = range(new_x_start, x_range.stop)
if y + 1 <= y_range.stop:
new_y_start = y + 1
else:
new_y_start = y
y_range = range(new_y_start, y_range.stop)
if len(acceptable_points) == 0:
return None
else:
return acceptable_points[-1]
@staticmethod
def calc_error(W: np.ndarray, C: np.ndarray, U_in: np.ndarray, U_out: np.ndarray) -> float:
"""calculate expected bound of error of output of layer
:param W: W x H x I x O
:param C: W x H x N x M
:param U_in: I x N
:param U_out: O x M
:return:
"""
W_hat = np.einsum('whnm,in,om->whio', C, U_in, U_out)
elemental_error = np.abs(W - W_hat)
error_bound = np.mean(elemental_error) / np.mean(np.abs(W))
return error_bound
class TuckerFactorizer(Factorizer):
factorize_target_layers = [Conv2D]
@classmethod
def compress(cls, old_layer: Conv2D, acceptable_error: float) -> Optional[FactorizedConv2DTucker]:
'''Compress layer's kernel 4D tensor using tucker decomposition under acceptable_error.
If it can't reduce the number of parameters, returns `None`.
:param old_layer:
:param acceptable_error:
:return:
'''
W = K.get_value(old_layer.kernel)
searcher = TuckerParamSearcher(W)
C, U_in, U_out = searcher.factorize_in_acceptable_error(acceptable_error)
kernel = C
if U_in is None and U_out is None: # compression failed
return None
if U_in is None:
input_components = None
pre_kernel = None
else:
input_components = U_in.shape[1]
pre_kernel = U_in[np.newaxis, np.newaxis, :, :]
if U_out is None:
output_components = None
post_kernel = None
else:
output_components = U_out.shape[1]
post_kernel = U_out.T[np.newaxis, np.newaxis, :, :]
base_config = old_layer.get_config()
new_config = convert_config(
base_config,
ignore_args=[
'kernel_constraint',
],
converts={
'kernel_regularizer': [
'pre_kernel_regularizer',
'kernel_regularizer',
'post_kernel_regularizer',
],
'kernel_initializer': [
'pre_kernel_initializer',
'kernel_initializer',
'post_kernel_initializer',
],
},
new_kwargs={
'input_components': input_components,
'output_components': output_components,
}
)
new_layer = FactorizedConv2DTucker(**new_config)
new_layer.build(old_layer.get_input_shape_at(0)) # to initialize weight variables
K.set_value(new_layer.kernel, kernel)
if pre_kernel is not None:
K.set_value(new_layer.pre_kernel, pre_kernel)
if post_kernel is not None:
K.set_value(new_layer.post_kernel, post_kernel)
return new_layer
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/factorizers/svd.py | training/helpers/keras_compressor/keras_compressor/factorizers/svd.py | import logging
import math
from typing import Optional, Tuple
import numpy as np
from keras import backend as K
from keras.engine import Layer
from keras.layers import Dense
from sklearn.utils.extmath import randomized_svd
from ..factorizer import Factorizer
from ..layers import FactorizedDense
from ..utils import convert_config
logger = logging.getLogger(__name__)
class SVDFactorizer(Factorizer):
factorize_target_layers = [Dense]
@staticmethod
def _factorize(W: np.ndarray, components: int):
"""
:param W: I x O
:param components: K
:return:
U: I x K
V: K x O
"""
u, s, v = randomized_svd(W, components)
scale = np.diag(np.sqrt(s))
U, V = u.dot(scale).astype(W.dtype), scale.dot(v).astype(W.dtype)
return U, V
@staticmethod
def _calc_error(W: np.ndarray, U: np.ndarray, V: np.ndarray):
'''
:param W: I x O
:param U: I x K
:param V: K x O
:return:
'''
elemental_error = np.abs(W - U.dot(V))
error_bound = np.mean(elemental_error) / np.mean(np.abs(W))
return error_bound
@classmethod
def compress(cls, old_layer: Dense, acceptable_error: float) -> Optional[Layer]:
'''compress old_layer under acceptable error using SVD.
If it can't reduce the number of parameters, returns None,
:param old_layer:
:param acceptable_error:
:return:
'''
W = K.get_value(old_layer.kernel)
logger.debug('factorization start W.shape:{}'.format(W.shape))
max_comps = math.floor(np.size(W) / sum(W.shape))
U, V = cls._factorize(W, max_comps)
if cls._calc_error(W, U, V) >= acceptable_error:
# Factorizer can't reduce the number of parameters in acceptable error by SVD.
# So, this factorizer failed compression.
return None
U, V = cls._compress_in_acceptable_error(
W, acceptable_error,
start_param_range=range(1, max_comps),
)
components = U.shape[-1]
base_config = old_layer.get_config()
new_config = convert_config(
base_config,
ignore_args=[
'kernel_constraint',
],
converts={
'kernel_regularizer': [
'pre_kernel_regularizer',
'post_kernel_regularizer',
],
'kernel_initializer': [
'pre_kernel_initializer',
'post_kernel_initializer',
]
},
new_kwargs={
'components': components,
},
)
new_layer = FactorizedDense(**new_config)
new_layer.build(old_layer.get_input_shape_at(0)) # to initialize weight variables
K.set_value(new_layer.pre_kernel, U)
K.set_value(new_layer.post_kernel, V)
return new_layer
@classmethod
def _compress_in_acceptable_error(cls, W, acceptable_error: float, start_param_range: range) \
-> Tuple[np.ndarray, np.ndarray]:
param_range = start_param_range
while len(param_range) > 0: # while not (param_range.start == param_range.stop)
logger.debug('current param_range:{}'.format(param_range))
if len(param_range) == 1:
ncomp = param_range.start
else:
ncomp = round((param_range.start + param_range.stop) / 2)
U, V = cls._factorize(W, ncomp)
error = cls._calc_error(W, U, V)
if error <= acceptable_error: # smallest ncomp is equal to or smaller than ncomp
# On the assumption that `error` monotonically decreasing by increasing ncomp
logger.debug('under acceptable error ncomp:{} threshold:{} error:{}'.format(
ncomp, acceptable_error, error,
))
param_range = range(param_range.start, ncomp)
else: # the best is larger than ncomp
logger.debug('over acceptable error ncomp:{} threshold:{} error:{}'.format(
ncomp, acceptable_error, error,
))
param_range = range(ncomp + 1, param_range.stop)
# param_range.start == param_range.stop
smallest_ncomp = param_range.start
logger.debug('smallest_ncomp:{}, W.shape:{} compress_rate:{}'.format(
smallest_ncomp, W.shape, sum(W.shape) * smallest_ncomp / np.size(W),
))
U, V = cls._factorize(W, smallest_ncomp)
return U, V
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/keras_compressor/keras_compressor/factorizers/__init__.py | training/helpers/keras_compressor/keras_compressor/factorizers/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/create_csv.py | annotation/create_csv.py | '''
annotate.py
Annotate audio, text, image, or video files for use with regression modeling in Allie.
All you need is a folder, which identifies the type of file within it, and then it goes
through each file to annotate (as .JSON)
'''
import os, sys, datetime, json, time
import pandas as pd
from optparse import OptionParser
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
# get all the options from the terminal
parser = OptionParser()
parser.add_option("-d", "--directory", dest="annotate_dir",
help="the directory to annotate", metavar="annotate_DIR")
parser.add_option("-s", "--sampletype", dest="sampletype",
help="specify the type of model to make predictions (e.g. audio, text, image, video, csv)", metavar="SAMPLETYPE")
parser.add_option("-c", "--classtype", dest="classtype",
help="specify the class type (e.g. stress level", metavar="CLASSTYPE")
parser.add_option("-p", "--problemtype", dest="problemtype",
help="specify the problem type (-c classification or -r regression", metavar="PROBLEMTYPE")
(options, args) = parser.parse_args()
curdir=os.getcwd()
prevdir=prev_dir(os.getcwd())+'/features/'
sys.path.append(prevdir)
from standard_array import make_features
# get annotate directory and sampletype
class_=options.classtype
problemtype=options.problemtype
annotate_dir=options.annotate_dir
os.chdir(annotate_dir)
sampletype=options.sampletype
listdir=os.listdir()
if sampletype == None:
sampletype = classifyfolder(listdir)
listdir=os.listdir()
data=dict()
classes=list()
filepaths=list()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
g=json.load(open(listdir[i]))
labels=g['labels']
for j in range(len(labels)):
try:
if list(labels[j]) == [class_] and labels[j][class_]['problemtype'] == problemtype:
value=labels[j][class_]['value']
filepath=labels[j][class_]['annotate_dir']+labels[j][class_]['file']
print(value)
print(filepath)
classes.append(value)
filepaths.append(filepath)
except:
pass
data[class_]=classes
data['paths']=filepaths
os.chdir(curdir)
df = pd.DataFrame(data)
df.to_csv('%s_data.csv'%(class_), index=False) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/annotate.py | annotation/annotate.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
___ _ _ _ ___ ______ _____
/ _ \ | | | | (_) / _ \ | ___ \_ _|
/ /_\ \_ __ _ __ ___ | |_ __ _| |_ _ ___ _ __ / /_\ \| |_/ / | |
| _ | '_ \| '_ \ / _ \| __/ _` | __| |/ _ \| '_ \ | _ || __/ | |
| | | | | | | | | | (_) | || (_| | |_| | (_) | | | | | | | || | _| |_
\_| |_/_| |_|_| |_|\___/ \__\__,_|\__|_|\___/|_| |_| \_| |_/\_| \___/
Annotate audio, text, image, or video files for use with regression modeling in Allie.
All you need is a folder, which identifies the type of file within it, and then it goes
through each file to annotate (as .JSON)
Read more about file annotation @ https://github.com/jim-schwoebel/allie/tree/master/annotation
'''
import os, sys, datetime, json, time
from optparse import OptionParser
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetype=most_common(filetypes)
return filetype
def annotate_file(class_, filetype, file, problemtype):
# now go through and annotate each file
if filetype == 'audio':
print('playing file... %s'%(file.upper()))
os.system('play "%s"'%(file))
elif filetype == 'image':
print('opening file... %s'%(file.upper()))
os.system('open "%s"'%(file))
elif filetype == 'video':
print('playing file... %s'%(file.upper()))
os.system('open "%s"'%(file))
elif filetype == 'text':
print('opening file... %s'%(file.upper()))
os.system('open "%s"'%(file))
else:
print('file type not supported for annotation')
annotation='0'
if problemtype in ['r', 'regression']:
annotation = input('%s value?\n'%(class_.upper()))
else:
annotation = input('%s label 1 (yes) or 0 (no)?\n'%(class_.upper()))
# only get a float back
try:
annotation=float(annotation)
except:
print('error annotating, annotating again...')
while True:
annotation = annotate_file(class_, filetype, file)
try:
annotation=float(annotation)
break
except:
pass
label=dict()
label[class_]={'value': annotation,
'datetime': str(datetime.datetime.now()),
'filetype': filetype,
'file': file,
'problemtype': problemtype,
'annotate_dir': annotate_dir}
annotation=[label]
print(annotation)
return annotation
# get all the options from the terminal
parser = OptionParser()
parser.add_option("-d", "--directory", dest="annotate_dir",
help="the directory to annotate", metavar="annotate_dir")
parser.add_option("-s", "--sampletype", dest="sampletype",
help="specify the type of model to make predictions (e.g. audio, text, image, video, csv)", metavar="sampletype")
parser.add_option("-c", "--classtype", dest="classtype",
help="specify the class type (e.g. stress level", metavar="classtype")
parser.add_option("-p", "--problemtype", dest="problemtype",
help="specify the problem type (-c classification or -r regression", metavar="problemtype")
(options, args) = parser.parse_args()
prevdir=prev_dir(os.getcwd())+'/features/'
sys.path.append(prevdir)
from standard_array import make_features
# get annotate directory and sampletype
class_=options.classtype
problemtype=options.problemtype
annotate_dir=options.annotate_dir
os.chdir(annotate_dir)
sampletype=options.sampletype
listdir=os.listdir()
if sampletype == None:
sampletype = classifyfolder(listdir)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
try:
if listdir[i].endswith('.json'):
pass
else:
jsonfilename=listdir[i][0:-4]+'.json'
if jsonfilename not in listdir:
annotation=annotate_file(class_, sampletype, listdir[i], problemtype)
basearray=make_features(sampletype)
basearray['labels']=annotation
jsonfile=open(jsonfilename,'w')
json.dump(basearray, jsonfile)
jsonfile.close()
elif jsonfilename in listdir:
g=json.load(open(jsonfilename))
labels=g['labels']
classin=False
for j in range(len(labels)):
try:
print(list(labels[j]))
print(labels[j][class_]['problemtype'])
if list(labels[j]) == [class_] and labels[j][class_]['problemtype'] == problemtype:
classin=True
except:
pass
if classin == True:
print('skipping %s, already annotated'%(listdir[i]))
else:
annotation=annotate_file(class_, sampletype, listdir[i], problemtype)
labels.append(annotation)
g['labels']=labels
jsonfile=open(jsonfilename,'w')
json.dump(g, jsonfile)
jsonfile.close()
except:
print('error - file %s not recognized'%(listdir[i]))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/annotate_audio.py | annotation/helpers/annotate_audio.py | import os, time, shutil
from tqdm import tqdm
listdir=os.listdir()
try:
os.mkdir('coconut')
except:
pass
try:
os.mkdir('other')
except:
pass
wavfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.wav'):
wavfiles.append(listdir[i])
for i in tqdm(range(len(wavfiles))):
wavfile=wavfiles[i]
os.system('play %s \n'%(wavfile))
yesorno=input('coconut? -y or -n \n')
if yesorno == 'y':
shutil.move(os.getcwd()+'/'+wavfile, os.getcwd()+'/coconut/'+wavfile)
else:
shutil.move(os.getcwd()+'/'+wavfile, os.getcwd()+'/other/'+wavfile) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/balancedelete.py | annotation/helpers/helpers/balancedelete.py | import os, random, shutil
## helper functions
def get_wav():
# get all .WAV or .MP3 files in folder and count the number of them
listdir=os.listdir()
count=0
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav', '.mp3']:
count = count+1
return count
def random_remove(remove_num):
# remove a number of files to balnace classes.
listdir=os.listdir()
wavfiles=list()
random.shuffle(listdir)
for i in range(len(listdir)):
if listdir[i][-4:] in ['.wav', '.mp3']:
wavfiles.append(listdir[i])
for i in range(remove_num):
os.remove(wavfiles[i])
print('removed %s .wav or .mp3 files'%(remove_num))
# now go to main script
listdir=os.listdir()
# find all folders
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
curdir=os.getcwd()
counts=list()
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
count=get_wav()
counts.append(count)
# now find minimum
min_=min(counts)
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
count=get_wav()
if count > min_:
remove_num=count-min_
random_remove(remove_num)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/data-preprocess.py | annotation/helpers/helpers/data-preprocess.py | '''
Data pre-processing for keras and sklearn
(good tutorial here)
'''
import numpy as np
from numpy import argmax, array
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from keras.utils import to_categorical
# define example
data = ['cold', 'cold', 'warm', 'cold', 'hot', 'hot', 'warm', 'cold', 'warm', 'hot']
# one hot encoding via sklearn library
values = np.array(data)
print(values)
# integer encode
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
print(integer_encoded)
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
print(onehot_encoded)
# invert first example
inverted = label_encoder.inverse_transform([argmax(onehot_encoded[0, :])])
print(inverted)
# one hot encoding via keras library
data = [1, 3, 2, 0, 3, 2, 2, 1, 0, 1]
data = array(data)
print(data)
# one hot encode
encoded = to_categorical(data)
print(encoded)
# invert encoding
inverted = argmax(encoded[0])
print(inverted)
# to get back original list
inverted=list()
for i in range(len(encoded)):
invert=argmax(encoded[i])
inverted.append(invert)
inverted=array(inverted)
print(inverted)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/musicgenre_download.py | annotation/helpers/helpers/musicgenre_download.py | import os
import json
import pafy
import json
import time
import wave
import random
import ffmpy
import soundfile as sf
import getpass
os.chdir("/Users/"+getpass.getuser()+"/Desktop/genres")
optionlist=list()
one="'Feel it Still' by Portugal. The Man. https://www.youtube.com/watch?v=pBkHHoOIIn8"
#tone=duration of track one in seconds
two="'Feels like Summer' by Wheezer. https://www.youtube.com/watch?v=efPWrIvzGgc"
#ttwo=duration of track two in seconds
three="'The Man' by the Killers. https://www.youtube.com/watch?v=w3xcybdis1k"
four="'Thunder' by Imagine Dragons. https://www.youtube.com/watch?v=fKopy74weus"
five="'Wish I knew You' by the Rivavlists. https://www.youtube.com/watch?v=o0Pt7M0weUI"
six="'Believer' by Imagine Dragons. https://www.youtube.com/watch?v=7wtfhZwyrcc"
seven="'Suit and Jacket' by Judah & the Lion. https://www.youtube.com/watch?v=AigOUsOEhSY"
eight="'High' by Sir Sly. https://www.youtube.com/watch?v=qIOaU7Sm-ZE"
nine="'Dig Down' by Muse. https://www.youtube.com/watch?v=b4ozdiGys5g"
ten="'Lay It On Me' by Vance Joy. https://www.youtube.com/watch?v=VXXD1Qxpisw"
eleven="'Whole Wide World' by Cage The Elephant. https://www.youtube.com/watch?v=cYGakznEllM"
twelve="'Everything Now' by Arcade Fire. https://www.youtube.com/watch?v=zC30BYR3CUk"
thirteen="'Ahead of Myself' by X Ambassadors. https://www.youtube.com/watch?v=z-Jsc8TmcAU"
fourteen="'The Way You Used To Do' by Queens of the Stnoe Age. https://www.youtube.com/watch?v=GvyNyFXHj4k"
fifteen="'Angela' by the Lumineers. https://www.youtube.com/watch?v=_II0fc7hgNY"
sixteen="'Walk On Water' by Thirty Seconds to Mars. https://www.youtube.com/watch?v=FA2w-PMKspo"
seventeen="'Perfect Places' by Lorde. https://www.youtube.com/watch?v=J0DjcsK_-HY"
eighteen="'Champion' by Fall Out Boy. https://www.youtube.com/watch?v=JJJpRl2cTJc"
nineteen="'Vacation' by Thr Dirty Heads. https://www.youtube.com/watch?v=7zok9co_8E4"
twenty="'Lights Out' by Royal Blood. https://www.youtube.com/watch?v=ZSznpyG9CHY"
twentyone="'Revolution Radio' by Green Day. https://www.youtube.com/watch?v=B4zc-f0TIZ4"
twentytwo="'Less Than' by Nine Inch Nails. https://www.youtube.com/watch?v=gDV-dOvqKzQ"
twentythree="'So Close' by Andrew McMahon in the Wilderness. https://www.youtube.com/watch?v=e5ZUfzJoG1E"
twentyfour="'The Violence' by Rise Against. https://www.youtube.com/watch?v=Y7bvMlfmTm4"
twentyfive="'The Sky Is a Neighborhood' by Foo Fighters. https://www.youtube.com/watch?v=TRqiFPpw2fY"
twentysix="'Golden Dandelions' by Barns Courtney. https://www.youtube.com/watch?v=u8ymzuwrS6M"
twentyseven="'Its a Trip!' by Joywave. https://www.youtube.com/watch?v=0xfXcZSb8LE"
twentyeight="'Run' by Foo Fighters. https://www.youtube.com/watch?v=ifwc5xgI3QM"
twentynine="'Little One' by Highly Suspect. https://www.youtube.com/watch?v=eKcIedFBiVU"
thirty="'The Wanting' by J Roddy Walston & The Business. https://www.youtube.com/watch?v=S6VhNqKaKj0"
option1=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option1)):
optionlist.append(option1[i])
one="Edvard Grieg – Peer Gynt Suite No. 1, Op. 46: Morning Mood. https://www.youtube.com/watch?v=kzTQ9fjforY"
two="Ludwig van Beethoven – Symphony No. 5 In C Minor, Op. 67, Fate: I. Allegro Con Brio. https://www.youtube.com/watch?v=tV6CpVfU7ig"
three="Antonio Vivaldi – The Four Seasons, Op. 8, Spring: Allegro. https://www.youtube.com/watch?v=ygpf6mxTUeY"
four="Samuel Barber – Adagio for Strings. https://www.youtube.com/watch?v=izQsgE0L450"
five="Richard Wagner – The Valkyrie: Ride of the Valkyries. https://www.youtube.com/watch?v=GGU1P6lBW6Q"
six="Frédéric Chopin – Nocturne No. 2 In E-Flat Major, Op. 9. https://www.youtube.com/watch?v=9E6b3swbnWg"
seven="Johann Pachelbel – Canon in D major. https://www.youtube.com/watch?v=NlprozGcs80"
eight="Carl Orff – Carmina Burana: O Fortuna. https://www.youtube.com/watch?v=GXFSK0ogeg4"
nine="Johann Sebastian Bach – Orchestral Suite No. 3 in D major, BWV 1068: Air. https://www.youtube.com/watch?v=GkWjO8ZJcpc"
ten="Gustav Holst – The Planets, Op. 32: Jupiter, the Bringer of Jollity. https://www.youtube.com/watch?v=Nz0b4STz1lo"
eleven="Claude Debussy – Suite bergamasque, L 75: Clair de Lune. https://www.youtube.com/watch?v=vG-vmVrHOGE"
twelve="Giuseppe Verdi – Nabucco: Chorus of the Hebrew Slaves (Va', Pensiero, Sull'ali Dorate). https://www.youtube.com/watch?v=-DIcS5-8RD8"
thirteen="Wolfgang Amadeus Mozart – Piano Concerto No. 21 in C major, K. 467: II. Andante. https://www.youtube.com/watch?v=LA_BYwlzDVQ"
fourteen="Johann Sebastian Bach – Brandenburg Concerto No. 3 in G major, BWV 1048: Allegro. https://www.youtube.com/watch?v=Xq2WTXtKurk"
fifteen="Jules Massenet – Thaïs: Meditation. https://www.youtube.com/watch?v=luL1T1WQC2k"
sixteen="Antonín Dvořák – Symphony No. 9 In E Minor, Op. 95, From the New World: II. Largo. https://www.youtube.com/watch?v=Aa-aD0SdxTY"
seventeen="Johann Strauss II – On the Beautiful Blue Danube, Op. 314. https://www.youtube.com/watch?v=EHt2tW_nvp8"
eighteen="Johannes Brahms – Hungarian Dance No. 5 In G Minor. https://www.youtube.com/watch?v=3X9LvC9WkkQ"
nineteen="Pyotr Ilyich Tchaikovsky – Swan Lake Suite, Op. 20: Scene. https://www.youtube.com/watch?v=wEgOM9iYETg"
twenty="Erik Satie – Gymnopédie No. 1. https://www.youtube.com/watch?v=S-Xm7s9eGxU"
twentyone="Wolfgang Amadeus Mozart – Requiem, K. 626: Lacrimosa Dies Illa. https://www.youtube.com/watch?v=5IFtGSaFzbM"
twentytwo="Ludwig van Beethoven – Bagatelle In A Minor, WoO 59, Für Elise. https://www.youtube.com/watch?v=NShTqdkCrXQ"
twentythree="Edward Elgar – Pomp and Circumstance, Op. 39: Land of Hope and Glory. https://www.youtube.com/watch?v=0bknTe9nM8A"
twentyfour="Georges Bizet – Carmen Suite No. 2: Habanera. https://www.youtube.com/watch?v=EcFJTc28soQ"
twentyfive="Ludwig van Beethoven – Symphony No. 9 In D Minor, Op. 125, Choral: Ode an Die Freude"
twentysix="Jacques Offenbach – The Tales of Hoffmann: Barcarolle. https://www.youtube.com/watch?v=g7czptgEvvU"
twentyseven="Remo Giazotto – Adagio In G Minor for Strings and Organ (after T. Albinoni). https://www.youtube.com/watch?v=fzOLoIu4Gw8"
twentyeight="Wolfgang Amadeus Mozart – Serenade No. 13 In G Major, K. 525, Eine Kleine Nachtmusik: I. Allegro. https://www.youtube.com/watch?v=z4Hfv00eqoI"
twentynine="Gioachino Rossini – The Barber of Seville: Overture. https://www.youtube.com/watch?v=OloXRhesab0"
thirty="W. A. Mozart - Die Zauberflöte (The Magic Flute), K. 620 - Overture. https://www.youtube.com/watch?v=_kgcu8yhojc"
option2=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option2)):
optionlist.append(option2[i])
one="'Body Like A Back Road' by Sam Hunt. https://www.youtube.com/watch?v=Mdh2p03cRfw"
two="'Small Town Boy' by Dustin Lynch. https://www.youtube.com/watch?v=pz9yRC-LWhU"
three="'What Ifs' by Kane Brown. https://www.youtube.com/watch?v=fM8V1XOI-14"
four="'No Such Thing As A Broken Heart' by Old Dominion. https://www.youtube.com/watch?v=MZdpppFRLqw"
five="'In Case You Didnt Know' by Brett Young. https://www.youtube.com/watch?v=7qaHdHpSjX8"
six="'When It Rains It Pours' by Luke Combs. https://www.youtube.com/watch?v=uXyxFMbqKYA"
seven="'Heartache On the Dance Floor' by Jon Pardi. https://www.youtube.com/watch?v=Cg4Eui4sGlk"
eight="'Unforgettable' by Thomas Rhett. https://www.youtube.com/watch?v=84hwjCnAVjU"
nine="'Drinkin' Problem' by Midland. https://www.youtube.com/watch?v=g7f6HiQ2LuU"
ten="'All The Pretty Girls' by Kenny Chesney. https://www.youtube.com/watch?v=embINtOC_i0"
eleven="'Light It Up' by Lukey Bryan. https://www.youtube.com/watch?v=8wpUv4I_fQk"
twelve="'Craving You' by Thomas Rhett. https://www.youtube.com/watch?v=zruDce8zbEo"
thirteen="'They Don't Know' by Jason Aldean. https://www.youtube.com/watch?v=SQZPIsGoR1M"
fourteen="'Every Little Thing' by Carly Pearce. https://www.youtube.com/watch?v=pm6DHCpmIWg"
fifteen="'More Girls Like You' by Kip Moore. https://www.youtube.com/watch?v=6-UHrVz1pR8"
sixteen="'It Ain't my Fault' by Brothers Osborne. https://www.youtube.com/watch?v=MyOGVk_ypnM"
seventeen="'For Her' by Chris Lane. https://www.youtube.com/watch?v=tyFY2I2Qlyk"
eighteen="'Greatest Love Story' by LANCO. https://www.youtube.com/watch?v=aHl0tlUYDBI"
nineteen="'I Could Use A Love Song' by Maren Morris. https://www.youtube.com/watch?v=ErdZ_W35xRs"
twenty="'Fix A Drink' by Chris Janson. https://www.youtube.com/watch?v=-_Op0bQfMoo"
twentyone="'Round Here Buzz' by Eric Church. https://www.youtube.com/watch?v=Z3Zj2RgNyj0"
twentytwo="'Ring on Every Finger' by LOCASH. https://www.youtube.com/watch?v=LMDDdj_M0js"
twentythree="'Ask Me How I Know' by Garth Brooks. https://www.youtube.com/watch?v=Dsxiw-d9Qmg"
twentyfour="'Losing Sleep' by Chris Young. https://www.youtube.com/watch?v=NShYb80xnjw"
twentyfive="'You Broke Up With Me' by Walker Hayes. https://www.youtube.com/watch?v=z6jfEH7D5bg"
twentysix="'Yours' by Russell Dickerson. https://www.youtube.com/watch?v=gFccdvKehQI"
twentyseven="'Smooth' by Florida Georgia Line. https://www.youtube.com/watch?v=QZD5TCFpNXg"
twentyeight="'Last Time For Everything' by Brad Paisley. https://www.youtube.com/watch?v=LWkoquUvD98"
twentynine="'Like I Loved You' by Brett Young. https://www.youtube.com/watch?v=PG2azZM4w4o"
thirty="'All On Me' by Devin Dawson. https://www.youtube.com/watch?v=ntCMoh-0ogo"
option3=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option3)):
optionlist.append(option3[i])
one="'Feels' by Calvin Harris. https://www.youtube.com/watch?v=ozv4q2ov3Mk"
two="'Something Just Like This' by the Chainsmokers & Coldplay. https://www.youtube.com/watch?v=FM7MFYoylVs"
three="'Stay' by Zedd & Alessia Cara. https://www.youtube.com/watch?v=h--P8HzYZ74"
four="'No Promises' by Cheat Codes. https://www.youtube.com/watch?v=jn40gqhxoSY"
five="'It Ain't Me' by Kygo x Selena Gomez. https://www.youtube.com/watch?v=D5drYkLiLI8"
six="'Swish Swish' by Katy Perry Featuring Nicki Minaj. https://www.youtube.com/watch?v=iGk5fR-t5AU"
seven="'Silence' by Marshmello, featuring Khalid. https://www.youtube.com/watch?v=tk36ovCMsU8"
eight="'Rockabye' by Clean Bandit featuring Sean Paul & Anne-Marie. https://www.youtube.com/watch?v=papuvlVeZg8"
nine="'2U' by David Guetta featuring Justin Bieber. https://www.youtube.com/watch?v=RqcjBLMaWCg"
ten="'Slide' by Calvin Harris. https://www.youtube.com/watch?v=8Ee4QjCEHHc"
eleven="'Mama' by Jonas Blue. https://www.youtube.com/watch?v=qPTfXwPf_HM"
twelve="'Know No Better' by Major Lazer. https://www.youtube.com/watch?v=Sgp0WDMH88g"
thirteen="'Honest' by the Chainsmokers. https://www.youtube.com/watch?v=Lsv5IeI8bA8"
fourteen="'Get Low' by Zedd & Liam Payne. https://www.youtube.com/watch?v=cSX0-MP6tjw"
fifteen="'Rollin' by Calvin Harris. https://www.youtube.com/watch?v=5f_JiibvQAM"
sixteen="'Symphony' by Clean Bandit. https://www.youtube.com/watch?v=aatr_2MstrI"
seventeen="'Would You Ever' by Skrillex. https://www.youtube.com/watch?v=r-SurvChGFk"
eighteen="'More Than You Know' by Axwell & Ingrosso. https://www.youtube.com/watch?v=GsF05B8TFWg"
nineteen="'Lonely Together' by Avicii. https://www.youtube.com/watch?v=ruDrVMBCLaw"
twenty="'Without You' by Avicii. https://www.youtube.com/watch?v=jUe8uoKdHao"
twentyone="'Rich Love' by OneRepublic. https://www.youtube.com/watch?v=sJ6hAQjW9Aw"
twentytwo="'Instruction' by Jax Jones. https://www.youtube.com/watch?v=MQXLpSl26q4"
twentythree="'There For You' by Martin Garrix x Troye Sivan. https://www.youtube.com/watch?v=pNNMr5glICM"
twentyfour="'All My Love' by Cash Cash. https://www.youtube.com/watch?v=HYrvDBgKAPo"
twentyfive="'First Time' by Kygo & Ellie Goulding. https://www.youtube.com/watch?v=OlH1RCs96JA"
twentysix="'OK' by Robin Schultz. https://www.youtube.com/watch?v=P9-4xHVc7uk"
twentyseven="'Moving On' by Marshmello. https://www.youtube.com/watch?v=yU0tnrEk8H4"
twentyeight="'Tired' by Alan Walker. https://www.youtube.com/watch?v=g4hGRvs6HHU"
twentynine="'Find Me' by Marshmello. https://www.youtube.com/watch?v=ymq1WdGUcw8"
thirty="'Pizza' by Martin Garrix. https://www.youtube.com/watch?v=JsKIAO11q1Y"
option4=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option4)):
optionlist.append(option4[i])
one="'Caminando' by David Benoit and Marc Antoine. https://www.youtube.com/watch?v=zo_RELAS7V0"
two="'Frankie B' by Gerald Albright. https://www.youtube.com/watch?v=3GemcfAYuvw"
three="'Girl Talk' by Cindy Bradley. https://www.youtube.com/watch?v=oHrm8dvPDWs"
four="'Next To Me' by Lindsey Webster. https://www.youtube.com/watch?v=-zdMwQ2cWnY"
five="'Let's Take A Ride' by Brian Culbertson. https://www.youtube.com/watch?v=dTmsemPB9TY"
six="'Between You and I' by Riley Richard. https://www.youtube.com/watch?v=OBHHy2LJFZc"
seven="'Uncle Nick' by Nick Colionne. https://www.youtube.com/watch?v=_ozECYHNaJk"
eight="'Piccadilly Circus' by Paul Brown. https://www.youtube.com/watch?v=zJosV9Bw3DE"
nine="'Trininty' by Jackiem Joyner. https://www.youtube.com/watch?v=EHG_43UczAo"
ten="'Let It Go' by Jonathan Fritzen. https://www.youtube.com/watch?v=FJ9x-CZGq1k"
eleven="'Down The Road' by Paul Jackson, Jr. https://www.youtube.com/watch?v=MnlFINIQpJI"
twelve="'Vivid' by Blake Aron. https://www.youtube.com/watch?v=qgBJI8KEeC8"
thirteen="'Happy Hour' by Chuck Loeb. https://www.youtube.com/watch?v=Zom2UbDm52Q"
fourteen="'I Don't Mind' by Adam Hawley. https://www.youtube.com/watch?v=P4MlGsVh24c"
fifteen="'Tick Tock' by Boney James. https://www.youtube.com/watch?v=UnC8bkwPo2w"
sixteen="'Deixa' by Marc Antoine. https://www.youtube.com/watch?v=via8CmbNA8Q"
seventeen="'Here to Stay' by Darryl Williams. https://www.youtube.com/watch?v=ejRX6KaG2e4"
eighteen="'Now What' by Walter Beasley. https://www.youtube.com/watch?v=JyOBHB8NQTc"
nineteen="'Road Trip' by Andre Cavor. https://www.youtube.com/watch?v=eU_-xKtutdI"
twenty="'Going Out' by Julian Vaughn. https://www.youtube.com/watch?v=dBaIaZy1-I4"
twentyone="'Baby Coffee' by Michael J. Thomas. https://www.youtube.com/watch?v=cM4Q0v01wQU"
twentytwo="'Early Arrival' by Ragan Whiteside. https://www.youtube.com/watch?v=GSqHRblW9UI"
twentythree="'Carmanology' by the Allen Carman Project. https://www.youtube.com/watch?v=3BVDhAN41DE"
twentyfour="'Where I Left Off' by Oli Silk. https://www.youtube.com/watch?v=eC6ikAWRq9E"
twentyfive="'Let's Take It Back' by Najee. https://www.youtube.com/watch?v=N_UqrRjs9r0"
twentysix="'Lay Lady Lay' by Jack DeJohnette, Larry Grenadier, John Medeski, & John Scofield. https://www.youtube.com/watch?v=BjZC9G4p4Hw"
twentyseven="'The Edge of Twilight' by Keiko Matsui. https://www.youtube.com/watch?v=csg-7f539CI&list=PL6-ukl_LI7_TM1msq2UW9XpbHYsyMImeq&index=15"
twentyeight="'Water Lily' by Keiko Matsui. https://www.youtube.com/watch?v=g2ia7ogeEwE&list=PL6-ukl_LI7_TM1msq2UW9XpbHYsyMImeq&index=22"
twentynine="'Angel of the South' by Acoustic Alchemy. https://www.youtube.com/watch?v=qKgRrqgAvho&index=2&list=PLvBIYgM9CkyQqvJJqVKRJdbu_w5VG6dS4"
thirty="'Passion Play' by Acoustic Alchemy. https://www.youtube.com/watch?v=vkXpS8oBt2A&index=16&list=PLvBIYgM9CkyQqvJJqVKRJdbu_w5VG6dS4"
option5=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option5)):
optionlist.append(option5[i])
one="'Bodak Yellow (Money Moves)' by Cardi B. https://www.youtube.com/watch?v=PEGccV-NOm8"
two="'Unforgettable' by French Montana. https://www.youtube.com/watch?v=CTFtOOh47oo"
three="'1-800-273-8255' by Logic. https://www.youtube.com/watch?v=Kb24RrHIbFk"
four="'Bank Account' by 21 Savage. https://www.youtube.com/watch?v=eCK772REqw0"
five="'Rake It Up' by Yo. https://www.youtube.com/watch?v=OrSadmwmmAs"
six="'Humble' by Kendrick Lamar. https://www.youtube.com/watch?v=tvTRZJ-4EyI"
seven="'XO Tour Llif3' by Lil Uzi Vert. https://www.youtube.com/watch?v=WrsFXgQk5UI"
eight="'Congratulations' by Post Malone. https://www.youtube.com/watch?v=SC4xMk98Pdc"
nine="'I'm The One' by DJ Khaled. https://www.youtube.com/watch?v=weeI1G46q0o"
ten="'Jocelyn Flores' by XXXTENTACION. https://www.youtube.com/watch?v=C1D3G2VGQ_8"
eleven="'Loyalty' by Kendrick Lamar. https://www.youtube.com/watch?v=Dlh-dzB2U4Y"
twelve="'The Way Life Goes' by Lil Uzi Vert. https://www.youtube.com/watch?v=oKu2FVy0oUo"
thirteen="'I Get The Bag' by Gucci Mane. https://www.youtube.com/watch?v=uo14xGYwWd4"
fourteen="'It's A Vibe' by 2 Chainz. https://www.youtube.com/watch?v=tU3p6mz-uxU"
fifteen="'Sauce It Up' by Lil Uzi Vert. https://www.youtube.com/watch?v=BnRNXWAGENE"
sixteen="'Crew' by Goldlink. https://www.youtube.com/watch?v=nhNqbe6QENY"
seventeen="'Everyday We Lit' by YFN Lucci. https://www.youtube.com/watch?v=44Vk5KyQbiA"
eighteen="'Drowning' by A Boogie Wit da Hoodie. https://www.youtube.com/watch?v=rvaJ7QlhH0g"
nineteen="'Magnolia' by Playboi Carti. https://www.youtube.com/watch?v=oCveByMXd_0"
twenty="'Everybody Dies in Their Nightmares' by XXXTENTACION. https://www.youtube.com/watch?v=Tg6HGcj7pGo"
twentyone="'Transportin' by Kodk Black. https://www.youtube.com/watch?v=Ns167_llTiA"
twentytwo="'Butterfly Effect' by Travis Scott. https://www.youtube.com/watch?v=_EyZUTDAH0U"
twentythree="'444+222' by Lil Uzi Vert. https://www.youtube.com/watch?v=9qfUYeMfl2Q"
twentyfour="'Roll In Peace' by Kodak Black. https://www.youtube.com/watch?v=hwCLVBCNIt0"
twentyfive="'DNA' by Kendrick Lamar. https://www.youtube.com/watch?v=NLZRYQMLDW4"
twentysix="'Versace On The Floor' by Bruno Mars. https://www.youtube.com/watch?v=-FyjEnoIgTM"
twentyseven="'Revenge' by XXXTENTACION. https://www.youtube.com/watch?v=3CJsqGa1ltM"
twentyeight="'B.E.D. by Jacquees. https://www.youtube.com/watch?v=ul1H_p_FeaA"
twentynine="'For Real' by Lil Uzi Vert. https://www.youtube.com/watch?v=xnnfQ0inQp8"
thirty="'Questions' by Chris Brown. https://www.youtube.com/watch?v=mH76VvWkNwA"
option6=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option6)):
optionlist.append(option6[i])
one="'All I Want For Christmas Is You' by Mariah Carey. https://www.youtube.com/watch?v=yXQViqx6GMY"
two="'Hallelujah' by Pentatonix. https://www.youtube.com/watch?v=LRP8d7hhpoQ"
three="'Rockin' Around the Christmas Tree.' by Brenda Lee. https://www.youtube.com/watch?v=_6xNuUEnh2g"
four="'Jingle Bell Rock' by Bobby Helms. https://www.youtube.com/watch?v=itcMLwMEeMQ"
five="'Feliz Navidad' by Jose Feliciano. https://www.youtube.com/watch?v=xMtuVP8Mj4o"
six="'Mary, Did You Know?' by Pentatonix. https://www.youtube.com/watch?v=ifCWN5pJGIE"
seven="'A Holly Jolly Christmas' by Burl Ives. https://www.youtube.com/watch?v=nVMCUtsmWmQ"
eight="'The Christmas Song' by Nat King Cole. https://www.youtube.com/watch?v=hwacxSnc4tI"
nine="'It's the Most Wonderful Time Of The Year' by Andy Williams. https://www.youtube.com/watch?v=gFtb3EtjEic"
ten="'Last Christmas' by Wham! https://www.youtube.com/watch?v=4hhew1QKi-U"
eleven="'Rudolph The Red-Nosed Reindeer' by Gene Autry. https://www.youtube.com/watch?v=7ara3-hDH6I"
twelve="'White Christmas' by Bing Crosby. https://www.youtube.com/watch?v=GJSUT8Inl14"
thirteen="'Christmas Eve' by Trans-Siberian Orchestra. https://www.youtube.com/watch?v=xJ_OnN5F4Yw"
fourteen="'You're One Mean One', Mr. Grinch' https://www.youtube.com/watch?v=WxVqliZCNw0"
fifteen="'Let It Snow, Let It Snow, Let It Snow' by Dean Martin. https://www.youtube.com/watch?v=mN7LW0Y00kE"
sixteen="'Blue Chrismas' by Elvis Presley. https://www.youtube.com/watch?v=Uwfz5mMLSDM"
seventeen="'Christmas Time Is Here' by Vince Guaraldi Trio. https://www.youtube.com/watch?v=YvI_FNrczzQ"
eighteen="'It's Beginning To Look A Lot Like Christmas' by Michael Buble. https://www.youtube.com/watch?v=EyKMPXqKlFk"
nineteen="'Wonderful Christmastime' by Paul McCartney. https://www.youtube.com/watch?v=V9BZDpni56Y"
twenty="'Sleigh Ride' by Ronettes. https://www.youtube.com/watch?v=Y6rDA2Czz0E"
twentyone="'Happy Xmas' by John Lennon & Yoko Ono. https://www.youtube.com/watch?v=z8Vfp48laS8"
twentytwo="'Christmas Soon' by the Trans-Siberian Orhcestra. https://www.youtube.com/watch?v=4cP26ndrmtg"
twentythree="'Santa Tell Me' by Ariana Grande. https://www.youtube.com/watch?v=nlR0MkrRklg"
twentyfour="'Mistletoe' by Justin Bieber. https://www.youtube.com/watch?v=LUjn3RpkcKY"
twentyfive="'Have Yourself A Merry Little Christmas' by Frank Sinatra. https://www.youtube.com/watch?v=nZ6yQgBvuoI"
twentysix="'Merry Christmas Darling' by Carpenters. https://www.youtube.com/watch?v=YR1ujXx2p-I"
twentyseven="'Linus & Lucy' and Vince Guaraldi Trio. https://www.youtube.com/watch?v=x6zypc_LhnM"
twentyeight="'St. Brick Intro' by Guci Mane. https://www.youtube.com/watch?v=ywaX-veaB40"
twentynine="'I Want A Hippopotamus For Christmas' by Gayla Peevey. https://www.youtube.com/watch?v=7oOzszFIBcE"
thirty="'Little Saint Nick' by the Beach Boys. https://www.youtube.com/watch?v=aSynDh_K0EE"
option7=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option7)):
optionlist.append(option7[i])
one="'Feel It Still' by Portugal. The Man. https://www.youtube.com/watch?v=pBkHHoOIIn8"
two="'Do I Wanna Know' by Artic Monkeys. https://www.youtube.com/watch?v=bpOSxM0rNPM"
three="'Beezeblocks' by atl-J. https://www.youtube.com/watch?v=rVeMiVU77wo"
four="'Sweater Weather' by The Neighbourhood. https://www.youtube.com/watch?v=GCdwKhTtNNw"
five="'Take a Walk' by Passion Pit. https://www.youtube.com/watch?v=dZX6Q-Bj_xg"
six="'Chocolate' by The 1975. https://www.youtube.com/watch?v=CHk5SWVO4p8"
seven="'Angels' by The xx. https://www.youtube.com/watch?v=_nW5AF0m9Zw"
eight="'My Number' by Foals. https://www.youtube.com/watch?v=bAsGFnLl2u0"
nine="'Entertainment' by Phoenix. https://www.youtube.com/watch?v=qaMyr36uIv8"
ten="'Falling' by HAIM. https://www.youtube.com/watch?v=AIjVpRAXK18"
eleven="'Sun' by Two Door Cinema Club. https://www.youtube.com/watch?v=sKyK1Mme9Sc"
twelve="'Feels Like We Only Go Backwards' by Tame Impala."
thirteen="'Recovery' by Frank Turner. https://www.youtube.com/watch?v=F1L5zJ2afLs"
fourteen="'Don't Save Me' by HAIM. https://www.youtube.com/watch?v=kiqIush2nTA"
fifteen="'Global Concepts' by Robert DeLong. https://www.youtube.com/watch?v=JND-sxlg7YU"
sixteen="'Miracle Mile' by Cold War Kids. https://www.youtube.com/watch?v=1F6gAN6MOII"
seventeen="'Song For Zula' by Phosphorescent. https://www.youtube.com/watch?v=ZPxQYhGpdvg"
eighteen="'Tessellate' by alt-J. https://www.youtube.com/watch?v=Qg6BwvDcANg"
nineteen="'Oblivion' by M83. https://www.youtube.com/watch?v=822P87a773c"
twenty="'Tap Out' by The Strokes. https://www.youtube.com/watch?v=-7PINAYE4z4"
twentyone="'Matilda' by alt-J. https://www.youtube.com/watch?v=Q06wFUi5OM8"
twentytwo="'Carried Away' by Passion Pit. https://www.youtube.com/watch?v=Evz03lIJ3f0"
twentythree="'Heavy Feet' by Local Natives. https://www.youtube.com/watch?v=h2zWfxW60z0"
twentyfour="'Trying To Be Cool' by Phoenix. https://www.youtube.com/watch?v=OePvsCfKHJg"
twentyfive="'Trembling Hands' by The Temper Trap. https://www.youtube.com/watch?v=iW0uYfq3VLU"
twentysix="'Kemosabe' by Everything Everything. https://www.youtube.com/watch?v=TKKMfJ8cZoQ"
twentyseven="'The Way I Tend To Be' by Frank Turner. https://www.youtube.com/watch?v=Cf5O2M5GaEA"
twentyeight="'Sunset' by The xx. https://www.youtube.com/watch?v=M2JrAhmZmpM"
twentynine="'The Bay' by Metronomy. https://www.youtube.com/watch?v=9PnOG67flRA"
thirty="'Time To Run' by Lord Huron. https://www.youtube.com/watch?v=5_e8RRTT0r8"
option8=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option8)):
optionlist.append(option8[i])
#toby mac
one="'Real Love' by Blanca. https://www.youtube.com/watch?v=AKG_3u_kdJ8"
two="'Old Church Choir' by Zach Williams. https://www.youtube.com/watch?v=yOEviTLJOqo"
three="'O Come to the Alatar' by Elevation Worship. https://www.youtube.com/watch?v=ycWDFd0yCHA"
four="'Love Broke Thru' by Toby Mac. https://www.youtube.com/watch?v=44l9PRI4c2M"
five="'Full of Faith' by Cody Carnes. https://www.youtube.com/watch?v=IPPGvudBaiM"
six="'Gracefully Broken' by Matt Redman. https://www.youtube.com/watch?v=__haUJns_b8"
seven="'I'll Find You' by Lecrae. https://www.youtube.com/watch?v=Jv8IqJm6q7w"
eight="'So Will I (100 Billion X)' Hillsong United. https://www.youtube.com/watch?v=EuYOnYL6G0Y"
nine="'Broken Things' by Matthew West. https://www.youtube.com/watch?v=WdUu6ZsdVfM&list=RDWdUu6ZsdVfM"
ten="'The Gospel' by Ryan Stevenson. https://www.youtube.com/watch?v=NTdFEZhjiko"
eleven="'Hard Love' by NEEDTOBREATHE. https://www.youtube.com/watch?v=tE3Fp8C_ufg"
twelve="'Hills and Valleys' by Tauren Wells. https://www.youtube.com/watch?v=p4rRCjrAyCs"
thirteen="'Bleed the Same' by Mandisa. https://www.youtube.com/watch?v=UEzCQBwQkdA"
fourteen="'The Answer' by Jeremy Camp. https://www.youtube.com/watch?v=rQHXJi1EhDM"
fifteen="'A Million Lights' by Michael W. Smith. https://www.youtube.com/watch?v=DaTcRrINSXo"
sixteen="'Death Was Arrested' by North Point InsideOut. https://www.youtube.com/watch?v=uMsMiluCUUI"
seventeen="'Hold You Down' by Deraj. https://www.youtube.com/watch?v=ZXgiea7hW90"
eighteen="'What A Beautiful Name' by Hillsong Worship. https://www.youtube.com/watch?v=nQWFzMvCfLE"
nineteen="'Jesus & You' by Matthew West. https://www.youtube.com/watch?v=kJhVA8YW5yY"
twenty="'Details' by Sarah Reeves. https://www.youtube.com/watch?v=gTICoQC8PRw"
twentyone="'Bulletproof' by Citizen Way. https://www.youtube.com/watch?v=RzqpK7ZaH6o"
twentytwo="'Rescuer (Good News)' by Rend Collective. https://www.youtube.com/watch?v=sAg7rn7fH3Q"
twentythree="'You Belong' by Jasmine Murray. https://www.youtube.com/watch?v=4-GIcCOHoMw"
twentyfour="'Even If' by MercyMe. https://www.youtube.com/watch?v=B6fA35Ved-Y"
twentyfive="'Build My Life' by Christy Nockels. https://www.youtube.com/watch?v=QJCxe0cd15A"
twentysix="'Mountain - Radio Version' by Bryan & Katie Torwalt. https://www.youtube.com/watch?v=IUAOF5LLXDc"
twentyseven="'Control (Somehow You Want Me)' by Tenth Avenue North. https://www.youtube.com/watch?v=kFfztu8-bBQ"
twentyeight="'Different' by Micah Tyler. https://www.youtube.com/watch?v=3MtfLap4qcc"
twentynine="'I know' by Kim Walker Smith. https://www.youtube.com/watch?v=mAq-74wRSFQ"
thirty="'Home' by Chris Tomlin. https://www.youtube.com/watch?v=BCiBQqfHSvQ"
option9=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option9)):
optionlist.append(option9[i])
one="'Bella y Sensual' by Romeo Santos. https://www.youtube.com/watch?v=ybzDgGCL1Xk"
two="'Felices los 4' by Maluma. https://www.youtube.com/watch?v=n6WUcjyagN8"
three="'Mayores' by Becky G'Krippy Kush' by Farruko. https://www.youtube.com/watch?v=GMFewiplIbw"
four="'Krippy Kush' by Farruko. https://www.youtube.com/watch?v=j1_JW7An2l0"
five="'Se Preparo' by Ozuna. https://www.youtube.com/watch?v=KWGrPNqz4uc"
six="'Escapate Conmigo' by Wisin. https://www.youtube.com/watch?v=3X9wEwulYhk"
seven="Una Lady Como Tu' by Mnaual Turizo. https://www.youtube.com/watch?v=T3pstB1gWyo"
eight="'Robarte un Beso' by Carlos Vives. https://www.youtube.com/watch?v=Mtau4v6foHA"
nine="'Mi Gente' by J. Balvin. https://www.youtube.com/watch?v=wnJ6LuUFpMo"
ten="'Explicale' by Yandel. https://www.youtube.com/watch?v=U516oP9nt2o"
eleven="'Ni Tu Ni Yo' by Jennifer Lopez. https://www.youtube.com/watch?v=V5_tnpdnNz4"
twelve="Hey DJ' by CNCO. https://www.youtube.com/watch?v=X6wQOW9ihDA"
thirteen="Doble Personalidad' by Noriel. https://www.youtube.com/watch?v=1qpKJJgBXmw"
fourteen="'Perro Fiel' by Shakira. https://www.youtube.com/watch?v=o5U5ivOnJjs"
fifteen="'Ganas Locas' by Prince Royce. https://www.youtube.com/watch?v=Ztf7QEetikY"
sixteen="Imitadora' by Romeo Santos. https://www.youtube.com/watch?v=mhHqonzsuoA"
seventeen="'Si Tu La Ves' by Nicky Jam. https://www.youtube.com/watch?v=mcGBVy3-W4s"
eighteen="'Internacionales' by Bomba Estereo. https://www.youtube.com/watch?v=tWwWoDFoubw"
nineteen="'Loco Enamorado' by Abrham Mateo. https://www.youtube.com/watch?v=cmIKUyUrKl4"
twenty="'Unforgettable' by French Montana. https://www.youtube.com/watch?v=CTFtOOh47oo"
twentyone="'Sastre de Tu Amor' by Orishas. https://www.youtube.com/watch?v=n_CLTHgFF4c"
twentytwo="'Me Rehuso' by Danny Ocean. https://www.youtube.com/watch?v=aDCcLQto5BM"
twentythree="'Hey Guapo' by Play-N-Skillz. https://www.youtube.com/watch?v=Vzs9JgtW_lI"
twentyfour="'No Le Hablen de Amor' by CD9. https://www.youtube.com/watch?v=_ixn-FRppEk"
twentyfive="'Criminal' by Natti Natasha. https://www.youtube.com/watch?v=VqEbCxg2bNI"
twentysix="'Muevete' by MIX5. https://www.youtube.com/watch?v=KfaU_RY14HQ"
twentyseven="'Que Me Has Hecho' by Chayanne. https://www.youtube.com/watch?v=q_OGqmx3DNQ"
twentyeight="'Just As I am' by Spiff TV. https://www.youtube.com/watch?v=i1dVohtr4Uk"
twentynine="'SUBEME LA RADIO' by Enrique Iglesias. https://www.youtube.com/watch?v=9sg-A-eS6Ig"
thirty="'El Amante' by Nicky Jam. https://www.youtube.com/watch?v=YG2p6XBuSKA"
option10=[one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen,seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive,twentysix,twentyseven,twentyeight,twentynine,thirty]
for i in range(len(option10)):
optionlist.append(option10[i])
#phillip wesley
#ludivico einvaldi
one="'River Flows In You' by Yuruma. https://www.youtube.com/watch?v=XsTjI75uEUQ"
two="'Comptine d'un autre ete, l'apres-midi' by Yann Tierson. https://www.youtube.com/watch?v=NvryolGa19A"
three="'Watermark' by Enya. https://www.youtube.com/watch?v=NO5tb20qQnA"
four="'Song For Sienna' by Brian Crain. https://www.youtube.com/watch?v=2MYXZi2q02A"
five="'Nuvole bianche' by Ludovico Einaudi. https://www.youtube.com/watch?v=kcihcYEOeic"
six="'One Man's Dream' by Yanni. https://www.youtube.com/watch?v=STSzCX36U6o"
seven="'Sundial Dreams' by Kevin Kern. https://www.youtube.com/watch?v=ERGGPB_ok18"
eight="'Chrisofori's Dream' by David Lanz. https://www.youtube.com/watch?v=9wxrB41PMhw"
nine="'Near light' by Olafur Arnalds. https://www.youtube.com/watch?v=0kYc55bXJFI"
ten="'Waterfall' by Jon Schmidt. https://www.youtube.com/watch?v=8P9hAN-teOU"
eleven="'Opus 28' by Dustin O'Halloran. https://www.youtube.com/watch?v=iQTWbS2SlVY"
twelve="'Angel Eyes' by Jim Brickman. https://www.youtube.com/watch?v=3jcN20Efpq0"
thirteen="'The Approaching Night.' by Philip Wesley. https://www.youtube.com/watch?v=MsTQjB1f4-A"
fourteen="'A Beautiful Distraction' by Michele McLaughlin. https://www.youtube.com/watch?v=erbuUytTB44"
fifteen="'Breathe' by Greg Maroney. https://www.youtube.com/watch?v=9eqyir5JGpA"
sixteen="'Winter Walk' by David Nevue. https://www.youtube.com/watch?v=g9J4GPURT0s"
seventeen="'Rococo' by Brique a Braq. https://www.youtube.com/watch?v=XpZn_8Nx9w4"
eighteen="'Love's River' by Laura Sullivan. https://www.youtube.com/watch?v=bzHgyYj2-8I"
nineteen="'Simply Satie' by Michael Dulin. https://www.youtube.com/watch?v=xcArvm3yCOI"
twenty="'Walk With Me' by Joe Bongiorno. https://www.youtube.com/watch?v=IY7btFAPl1M"
twentyone="'Surrender' by Solomon Keal. https://www.youtube.com/watch?v=y2u0IE_adDM"
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/deletejsonfolders.py | annotation/helpers/helpers/deletejsonfolders.py | import os
from tqdm import tqdm
os.chdir('/Users/jimschwoebel/desktop/deletejson')
hostdir=os.getcwd()
listdir=os.listdir()
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
for i in tqdm(range(len(folders))):
os.chdir(folders[i])
listdir=os.listdir()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
os.remove(listdir[j])
os.chdir(hostdir)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/random20secsplice.py | annotation/helpers/helpers/random20secsplice.py | import soundfile as sf
import os
import ffmpy
import random
import getpass
genre=input('what folder do you want to create 20 sec splices for?')
dir1='/Users/'+getpass.getuser()+'/Desktop/genres/'+genre
dir2='/Users/'+getpass.getuser()+'/Desktop/genres/'+genre+'_snipped'
os.chdir(dir1)
os.mkdir(dir2)
listdir=os.listdir()
for i in range(len(listdir)):
try:
os.chdir(dir1)
file=listdir[i]
data, samplerate = sf.read(file)
totalframes=len(data)
totalseconds=int(totalframes/samplerate)
startsec=random.randint(0,totalseconds-21)
endsec=startsec+20
startframe=samplerate*startsec
endframe=samplerate*endsec
#write file to resave wave file at those frames
os.chdir(dir2)
sf.write('snipped_'+file, data[int(startframe):int(endframe)], samplerate)
except:
print('error, skipping...')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/make_controls.py | annotation/helpers/helpers/make_controls.py | '''
Make_controls.py
Generate control data from a list of folders filled with .wav files.
'''
import soundfile as sf
import os, ffmpy, random, shutil
# CONVERT FILE
def convert_file(filename):
#take in an audio file and convert with ffpeg file type
#types of input files: .ogg
#output file type: .wav
newfile=filename[0:-4]+'.wav'
ff = ffmpy.FFmpeg(
inputs={filename:None},
outputs={newfile: None}
)
ff.run()
os.remove(filename)
return newfile
##############################################################################
default_dir=os.getcwd()+'/'
os.chdir(default_dir)
g=os.listdir()
h=0
##############################################################################
# CONVERT ALL FILES TO WAV (COMMENTED OUT)
convertfiles='n'
if convertfiles in ['y','yes']:
#check first if all the files are .wav files and if not convert them and delete the other file type
for i in range(len(g)):
try:
if g[i] not in ['.DS_Store']:
os.chdir(default_dir+g[i])
h=os.listdir()
for j in range(len(h)):
try:
if h[j][-4:]!='.wav':
print('converting %s'%(h[j]))
new_file=convert_file(h[j])
except:
print('error')
except:
print('error')
else:
pass
##############################################################################
class_default=input('what is the class that will not be used as a control?')
control_dir=class_default+'_controls'
os.mkdir(control_dir)
os.chdir(default_dir+class_default)
q=os.listdir()
filenum=len(q)
movedlist=list()
movedlist.append('')
ind=g.index('.DS_Store')
if ind>=0:
del g[ind]
else:
pass
#need equal amount of controls
#loop over total number of files in class over the number of classes -1 (not including class)
count=0
for i in range(int(len(q)/(len(g)-1))):
for j in range(len(g)):
print(g[j])
if g[j] not in [class_default]:
try:
print('changing to %s directory'%(default_dir+g[j]))
os.chdir(default_dir+g[j])
h=os.listdir()
file_num=len(h)
cur_file=''
count=0
while cur_file in movedlist:
if count>file_num:
break
randint=random.randint(0,file_num-1)
cur_file=h[randint]
count=count+1
print('copying file: %s'%(cur_file))
shutil.copy(default_dir+g[j]+'/'+cur_file,default_dir+control_dir+'/'+cur_file)
movedlist.append(cur_file)
count=count+1
except:
print('error')
if count==0:
for j in range(len(g)):
print(g[j])
if g[j] not in [class_default]:
try:
print('changing to %s directory'%(default_dir+g[j]))
os.chdir(default_dir+g[j])
h=os.listdir()
file_num=len(h)
cur_file=''
count=0
while cur_file in movedlist:
if count>file_num:
break
randint=random.randint(0,file_num-1)
cur_file=h[randint]
count=count+1
print('copying file: %s'%(cur_file))
shutil.copy(default_dir+g[j]+'/'+cur_file,default_dir+control_dir+'/'+cur_file)
movedlist.append(cur_file)
count=count+1
except:
print('error')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/audio-network.py | annotation/helpers/helpers/audio-network.py | '''
Draw graphs to visualize audio data with network theory.
Network theory - https://github.com/networkx/networkx
Documentation - https://networkx.github.io/documentation/networkx-1.10/tutorial/tutorial.html#drawing-graphs
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edge('A', 'B', weight=4)
>>> G.add_edge('B', 'D', weight=2)
>>> G.add_edge('A', 'C', weight=3)
>>> G.add_edge('C', 'D', weight=4)
>>> nx.shortest_path(G, 'A', 'D', weight='weight')
['A', 'B', 'D']
'''
import networkx as nx
import numpy.linalg
import matplotlib.pyplot as plt
n = 1000 # 1000 nodes
m = 5000 # 5000 edges
G = nx.gnm_random_graph(n,m)
L = nx.normalized_laplacian_matrix(G)
e = numpy.linalg.eigvals(L.A)
print("Largest eigenvalue:", max(e))
print("Smallest eigenvalue:", min(e))
plt.hist(e,bins=100) # histogram with 100 bins
plt.xlim(0,2) # eigenvalues between 0 and 2
plt.show()
# mlab.show() # interactive window
#TUTORIAL
# can do this for many audio features (mfcc coefficients)
##
##G = nx.Graph(day="Friday")
##G.add_nodes_from(range(100,110))
##G.add_node("spam")
##G.remove_nodes_from("spam")
##
##G.add_path(range(100,110))
##
##G.add_edge('A', 'B', weight=1)
##G.add_edge('A', 'C', weight=1)
##G.add_edge('C', 'D', weight=1)
##G.add_edge('B', 'D', weight=1)
##
##G.remove_edge('B','D')
##
##nx.draw(G)
##plt.savefig("path.png")
##
##
##print(G.edges(data='weight'))
##print(G.number_of_nodes())
##print(G.number_of_edges())
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/yscrape.py | annotation/helpers/helpers/yscrape.py | import os
import json
import pafy
import json
import time
import wave
import ffmpy
import pandas as pd
import soundfile as sf
import shutil
filename=input('what is the file name? \n')
desktop="/Users/jim/Desktop/"
os.chdir(desktop)
foldername=filename[0:-5]
destfolder=desktop+foldername+'/'
try:
os.mkdir(foldername)
os.chdir(destfolder)
except:
os.chdir(destfolder)
#move file to destfolder
shutil.move(desktop+filename,destfolder+filename)
#load xls sheet
loadfile=pd.read_excel(filename)
link=loadfile.iloc[:,0]
length=loadfile.iloc[:,1]
times=loadfile.iloc[:,2]
label=loadfile.iloc[:,3]
#initialize lists
links=list()
lengths=list()
start_times=list()
end_times=list()
labels=list()
#only make links that are in youtube processable
for i in range(len(link)):
if str(link[i]).find('youtube.com/watch') != -1:
links.append(str(link[i]))
lengths.append(str(length[i]))
#find the dash for start/stop times
time=str(times[i])
index=time.find('-')
start_time=time[0:index]
#get start time in seconds
start_minutes=int(start_time[0])
start_seconds=int(start_time[-2:])
start_total=start_minutes*60+start_seconds
#get end time in seconds
end_time=time[index+1:]
end_minutes=int(end_time[0])
end_seconds=int(end_time[-2:])
end_total=end_minutes*60+end_seconds
#update lists
start_times.append(start_total)
end_times.append(end_total)
#labels
labels.append(str(label[i]))
files=list()
for i in range(len(links)):
try:
video=pafy.new(links[i])
bestaudio=video.getbestaudio()
filename=bestaudio.download()
start=start_times[i]
end=end_times[i]
extension=bestaudio.extension
#get file extension and convert to .wav for processing later
os.rename(filename,'%s_start_%s_end_%s%s'%(str(i),start,end,extension))
filename='%s_start_%s_end_%s%s'%(str(i),start,end,extension)
if extension not in ['.wav']:
xindex=filename.find(extension)
filename=filename[0:xindex]
ff=ffmpy.FFmpeg(
inputs={filename+extension:None},
outputs={filename+'.wav':None}
)
ff.run()
os.remove(filename+extension)
file=filename+'.wav'
data,samplerate=sf.read(file)
totalframes=len(data)
totalseconds=totalframes/samplerate
startsec=int(start_times[i])
startframe=samplerate*startsec
endsec=int(end_times[i])
endframe=samplerate*endsec
sf.write('snipped'+file, data[startframe:endframe], samplerate)
os.remove(file)
#can write json too
except:
print('no urls')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/remove_json.py | annotation/helpers/helpers/remove_json.py | '''
Remove_json.py
Remove all json files in sub-directories.
Useful when you are cloning directories that have already been featurized
to get new feature embeddings with nlx-model repo.
'''
import os
def removejson(listdir):
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
os.remove(listdir[i])
listdir=os.listdir()
hostdir=os.getcwd()
for i in range(len(listdir)):
try:
os.chdir(hostdir+'/'+listdir[i])
listdir2=os.listdir()
removejson(listdir2)
except:
pass
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/pickclass_byprobability.py | annotation/helpers/helpers/pickclass_byprobability.py | import pickle, os, json
import numpy as np
def pick_class(classlist):
names=['teens','twenties','thirties','fourties','fifties','sixties','seventies']
probabilities=[.0666,.48888,.2296296,.08888,.08888,.0296,.0074]
freqs=list()
for i in range(len(classlist)):
try:
index=names.index(classlist[i])
freq=probabilities[index]
freqs.append(freqs)
except:
freq=0
freqs.append(freq)
#now pick the maxinum frequency
maxfreq=np.amax(freqs)
index=freqs.index(maxfreq)
return classlist[index]
def classify(features):
listdir=os.listdir()
model_list=['teens.pickle','twenties.pickle','thirties.pickle','fourties.pickle',
'fifties.pickle','sixties.pickle','seventies.pickle']
classlist=list()
model_acc=list()
deviations=list()
modeltypes=list()
modelslist=list()
for i in range(len(model_list)):
modelname=model_list[i]
loadmodel=open(modelname,'rb')
model=pickle.load(loadmodel)
loadmodel.close()
output=str(model.predict(features)[0])
classname=output
if classname.count('controls')>0:
pass
else:
classlist.append(classname)
if len(classlist)>1:
winclass=pick_class(classlist)
modelslist.append(winclass+'.pickle')
g=json.load(open(winclass+'.json'))
model_acc.append(g['accuracy'])
deviations.append(g['deviation'])
modeltypes.append(g['modeltype'])
elif len(classlist)==1:
winclass=classlist[0]
modelslist.append(winclass+'.pickle')
g=json.load(open(winclass+'.json'))
model_acc.append(g['accuracy'])
deviations.append(g['deviation'])
modeltypes.append(g['modeltype'])
else:
winclass='n/a'
g=json.load(open(winclass+'.json'))
model_acc.append(0)
deviations.append(0)
modeltypes.append('n/a')
return winclass
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/automata.py | annotation/helpers/helpers/automata.py | ''' Cellular automata
Could be useful for audio applications.
REFERENCES
https://faingezicht.com/articles/2017/01/23/wolfram/
http://mathworld.wolfram.com/Rule30.html
'''
def window(iterable, stride=3):
for index in range(len(iterable) - stride + 1):
yield iterable[index:index + stride]
def generate_pattern(state, rule, MAX_TIME):
for time in range(MAX_TIME):
print(state)
patterns = window(state)
state = ''.join(rule[pat] for pat in patterns)
state = '0{}0'.format(state)
print(state)
'''
window function creates list of all possible states
list(window('footbar'))
['foo', 'oot', 'otb', 'tba', 'bar']'''
#rule 30, 90, 110, 184
RULES = {30: {"111": '0', "110": '0', "101": '0', "000": '0',
"100": '1', "011": '1', "010": '1', "001": '1'},
90: {"111": "0", "110": "1", "101": "0", "100": "1",
"011": "1", "010": "0", "001": "1", "000": "0"},
110: {"111": '0', "110": '1', "101": '1', "100": '0',
"011": '1', "010": '1', "001": '1', "000": '0'},
184: {"111": "1", "110": "0", "101": "1", "100": "1",
"011": "1", "010": "0", "001": "0", "000": "0"}
}
initial_state = '00000000000000000000100000000000000000000'
list(window(initial_state))
generate_pattern(initial_state, RULES[30], 30)
# figure out the rules of the system in speech propagation....
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/githubtable.py | annotation/helpers/helpers/githubtable.py | '''
Load all model accuracies, names, and standard deviations
and output them in a spreadsheet.
This is intended for any model file directory using the nlx-model repository.'''
import json, os, xlsxwriter, getpass
def sort_list(list1, list2):
zipped_pairs = zip(list2, list1)
z = [x for _, x in sorted(zipped_pairs)]
return z
os.chdir(os.getcwd()[0:-(len('nlx-datalabeling'))]+'nlx-audiomodel/models')
listdir=os.listdir()
names=list()
accs=list()
stds=list()
modeltypes=list()
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
try:
g=json.load(open(listdir[i]))
acc=g['accuracy']
name=g['model']
std=g['deviation']
modeltype=g['modeltype']
names.append(name)
accs.append(acc)
stds.append(std)
modeltypes.append(modeltype)
except:
print('error %s'%(listdir[i]))
names=sort_list(names, accs)
stds=sort_list(stds, accs)
modeltypes=sort_list(modeltypes, accs)
accs=sort_list(accs, accs)
file = open('table.txt','w')
file.write('| '+'Model Name' + ' |')
file.write(' Accuracy'+' |')
file.write(' Standard Deviation' + ' |')
file.write(' Modeltype'+ ' |')
file.write('\n')
file.write('|-----|-----|-----|-----|')
file.write('\n')
print(names)
for j in range(len(names)):
file.write('| '+str(names[j])+' |')
file.write(' '+str(accs[j])+' |')
file.write(' '+str(stds[j])+' |')
file.write(' '+str(modeltypes[j])+' |')
file.write('\n')
file.close()
os.system('open %s'%(os.getcwd()+'/table.txt'))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/facedetect.py | annotation/helpers/helpers/facedetect.py | import numpy as np
import cv2
#put these files on the desktop
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('face.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
increment=0
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
newimg=img[y:y+h,x:x+w]
#save only the face
cv2.imwrite('only_face' + str(increment) + '.jpg', newimg)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
increment=increment+1
cv2.imshow('img',img)
cv2.imwrite('faces.png',img)
#detect smile throughout all images (and get timestamp annotated)
#detect sadness throughout all images (and get timestamp annotated)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/excel2json.py | annotation/helpers/helpers/excel2json.py | import librosa
import os
import soundfile as sf
import xlsxwriter
import pandas as pd
import time
import json
exceldirectory='/Users/jim/Desktop/neurolex/voicemails/'
jsondirectory='/Users/jim/Desktop/neurolex/voicemails/jsonfiles/'
jsonexceldirectory='/Users/jim/Desktop/neurolex/voicemails/jsonfiles-excel/'
os.chdir(exceldirectory)
ls=pd.read_excel('voicemails.xlsx')
filename=ls.iloc[:,0]
#name of file to align with json files
gender=ls.iloc[:,2]
#0=music, 1=male, 2=femaile, 3=male/female, 4=child, 5=multi-child)
age=ls.iloc[:,3]
#0=music, 1=adult, 2=child
sadness=ls.iloc[:,4]
#0=music, 1=least, 10=most
happiness=ls.iloc[:,5]
#0=music, 1=least, 10=most
stress=ls.iloc[6]
#0=music, 1=lest, 10=most
dialect=ls.iloc[7]
#0=music, 1=american dialect, 2=foreign dialect
voicemusic=ls.iloc[8]
#1=voice, 2=music, 3=multi-music and voice
fatigue=ls.iloc[9]
#0=music, 1=least, 10=most
audioquality=ls.iloc[10]
#0=nothing, 1=lowest, 10=highest
sickness=ls.iloc[11]
#1=natural, 2=non-natural, 3=music, 4=sick
##os.chdir(jsondirectory)
##jsonfiles=os.listdir()
os.chdir(jsonexceldirectory)
for g in range(len(filename)):
#try:
#find json file recording ID field
## jsonfileread=open(jsonfiles[i],'r').read()
## jsonfile=json.loads(jsonfileread)
## jsonfileid=jsonfile['recordingID']
## #search for this recording ID in excel file
## for g in range(len(filename)):
## if filename[g]==jsonfileid:
## indval=int(g)
#looks for the index of the filename matching row
#creates array of new data to add to json
newdata={
'filename':filename[g],
'gender': int(gender[g]),
'age': int(age[g]),
'sadness': int(sadness[g]),
'happiness': int(happiness[g]),
'stress': stress[g],
'dialect': int(dialect[g]),
'voicemusic': int(voicemusic[g]),
'fatigue': int(fatigue[g]),
'audioquality': int(audioquality[g]),
'sickness': int(sickness[g]),
}
#dump to new directory
json.dump(jsonfile)
#except:
#if not in excel file and no match, print this
# print('no file match found')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/markov-chain.py | annotation/helpers/helpers/markov-chain.py | '''
Make markov chain.
Following tutorial on Datacamp
https://www.datacamp.com/community/tutorials/markov-chains-python-tutorial
'''
import numpy as np
import random
# the statespace
states = ["Sleep","Icecream","Run"]
# Possible sequences of events
transitionName = [["SS","SR","SI"],["RS","RR","RI"],["IS","IR","II"]]
# Probabilities matrix (transition matrix)
transitionMatrix = [[0.2,0.6,0.2],[0.1,0.6,0.3],[0.2,0.7,0.1]]
if sum(transitionMatrix[0])+sum(transitionMatrix[1])+sum(transitionMatrix[1]) != 3:
print("Somewhere, something went wrong. Transition matrix, perhaps?")
else:
print("All is gonna be okay, you should move on!! ;)")
# A function that implements the Markov model to forecast the state/mood.
def activity_forecast(days):
# Choose the starting state
activityToday = "Sleep"
activityList = [activityToday]
i = 0
prob = 1
while i != days:
if activityToday == "Sleep":
change = np.random.choice(transitionName[0],replace=True,p=transitionMatrix[0])
if change == "SS":
prob = prob * 0.2
activityList.append("Sleep")
pass
elif change == "SR":
prob = prob * 0.6
activityToday = "Run"
activityList.append("Run")
else:
prob = prob * 0.2
activityToday = "Icecream"
activityList.append("Icecream")
elif activityToday == "Run":
change = np.random.choice(transitionName[1],replace=True,p=transitionMatrix[1])
if change == "RR":
prob = prob * 0.5
activityList.append("Run")
pass
elif change == "RS":
prob = prob * 0.2
activityToday = "Sleep"
activityList.append("Sleep")
else:
prob = prob * 0.3
activityToday = "Icecream"
activityList.append("Icecream")
elif activityToday == "Icecream":
change = np.random.choice(transitionName[2],replace=True,p=transitionMatrix[2])
if change == "II":
prob = prob * 0.1
activityList.append("Icecream")
pass
elif change == "IS":
prob = prob * 0.2
activityToday = "Sleep"
activityList.append("Sleep")
else:
prob = prob * 0.7
activityToday = "Run"
activityList.append("Run")
i += 1
return activityList
# To save every activityList
list_activity = []
count = 0
# `Range` starts from the first count up until but excluding the last count
for iterations in range(1,10000):
list_activity.append(activity_forecast(2))
# Check out all the `activityList` we collected
#print(list_activity)
# Iterate through the list to get a count of all activities ending in state:'Run'
for smaller_list in list_activity:
if(smaller_list[2] == "Run"):
count += 1
# Calculate the probability of starting from state:'Sleep' and ending at state:'Run'
percentage = (count/10000) * 100
print("The probability of starting at state:'Sleep' and ending at state:'Run'= " + str(percentage) + "%")
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/visualizemodels.py | annotation/helpers/helpers/visualizemodels.py | '''
Load all model accuracies, names, and standard deviations
and output them in a spreadsheet.
This is intended for any model file directory using the nlx-model repository.'''
import json, os, xlsxwriter, getpass
os.chdir('/Users/'+getpass.getuser()+'/nlx-model/nlx-audiomodel/models')
listdir=os.listdir()
names=list()
accs=list()
stds=list()
modeltypes=list()
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
try:
g=json.load(open(listdir[i]))
acc=g['accuracy']
name=g['model']
std=g['deviation']
modeltype=g['modeltype']
names.append(name)
accs.append(acc)
stds.append(std)
modeltypes.append(modeltype)
except:
print('error %s'%(listdir[i]))
workbook = xlsxwriter.Workbook('summary.xlsx')
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Model Name')
worksheet.write('B1', 'Accuracy')
worksheet.write('C1', 'Standard Deviation')
worksheet.write('D1', 'Modeltype')
for j in range(len(names)):
worksheet.write('A%s'%(str(j+2)), names[j])
worksheet.write('B%s'%(str(j+2)), accs[j])
worksheet.write('C%s'%(str(j+2)), stds[j])
worksheet.write('D%s'%(str(j+2)), modeltypes[j])
workbook.close()
os.system('open %s'%(os.getcwd()+'/summary.xlsx'))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/helpers/extract_noise.py | annotation/helpers/helpers/extract_noise.py | import shutil, os, random
from pydub import AudioSegment
try:
os.mkdir('noise')
except:
shutil.rmtree('noise')
os.mkdir('noise')
def extract_noise(filename, length):
song = AudioSegment.from_mp3(filename)
first = song[100:100+length]
first.export(filename[0:-4]+'_noise.mp3')
shutil.move(os.getcwd()+'/'+filename[0:-4]+'_noise.mp3', os.getcwd()+'/noise/'+filename[0:-4]+'_noise.mp3')
listdir=os.listdir()
mp3files=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
mp3files.append(listdir[i])
random.shuffle(mp3files)
for i in range(len(mp3files)):
extract_noise(mp3files[i],300)
if i == 100:
break
os.chdir('noise')
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp3':
os.system('play %s'%(listdir[i]))
remove=input('should remove? type y to remove')
if remove=='y':
os.remove(listdir[i])
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/youtube_scrape/download_playlist.py | annotation/helpers/youtube_scrape/download_playlist.py | '''
================================================
YOUTUBE_SCRAPE REPOSITORY
================================================
repository name: youtube_scrape
repository version: 1.0
repository link: https://github.com/jim-schwoebel/youtube_scrape
author: Jim Schwoebel
author contact: js@neurolex.co
description: Library for scraping youtube videos. Alternative to pafy, pytube, and youtube-dl.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-07-23
This code (youtube_scrape) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
LICENSE TERMS
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
SERVICE STATEMENT
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in kafka distributed architectures, microservices
built on top of Node.JS / python / docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
NOTE
================================================
Download a playlist from the URLS previously generated
with make_playlist.py script.
Make sure you have at least around 10GB of disk space
before bulk downloading videos, as they can take up a lot of space.
'''
import requests, json, os, shutil
from bs4 import BeautifulSoup
from pytube import YouTube
playlist_name=input('what is the name of the playlist to download?')
hostdir=os.getcwd()
os.chdir(os.getcwd()+'/playlists/')
try:
if playlist_name[-5:] != '.json':
g=json.load(open(playlist_name+'.json'))
entries=g['entries']
links=list()
elif playlist_name[-5:] == '.json':
g=json.load(open(playlist_name))
entries=g['entries']
links=list()
except:
print('error loading playlist. Please make sure it is in the playlists folder and you type in the name properly. \n\n For example yc_podcast.json ==> yc_podcast or yc_podcast.json')
if playlist_name[-5:]=='.json':
foldername=playlist_name[0:-5]
else:
foldername=playlist_name
foldername
try:
os.mkdir(foldername)
os.chdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
os.chdir(foldername)
for i in range(len(entries)):
link=entries[i]['link']
links.append(link)
print(link)
# download files
for i in range(len(links)):
try:
link=links[i]
print('downloading %s'%(link))
YouTube(link).streams.first().download()
except:
print('error')
# rename videos in order
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-5:] in ['.webm']:
os.rename(listdir[i],str(i)+'.webm')
os.system('ffmpeg -i %s %s'%(str(i)+'.webm',str(i)+'.mp4'))
os.remove(str(i)+'.webm')
elif listdir[i][-4:] in ['.mp4']:
os.rename(listdir[i],str(i)+'.mp4')
# now make audio for each .mp4 file
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp4':
os.system('ffmpeg -i %s %s'%(listdir[i],listdir[i][0:-4]+'.wav'))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/youtube_scrape/make_playlist.py | annotation/helpers/youtube_scrape/make_playlist.py | '''
================================================
YOUTUBE_SCRAPE REPOSITORY
================================================
repository name: youtube_scrape
repository version: 1.0
repository link: https://github.com/jim-schwoebel/youtube_scrape
author: Jim Schwoebel
author contact: js@neurolex.co
description: Library for scraping youtube videos. Alternative to pafy, pytube, and youtube-dl.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-07-23
This code (youtube_scrape) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
LICENSE TERMS
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
SERVICE STATEMENT
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in kafka distributed architectures, microservices
built on top of Node.JS / python / docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
'''
###########################################################################
# IMPORT STATEMENTS ##
###########################################################################
import requests, json, os
from bs4 import BeautifulSoup
from pytube import YouTube
###########################################################################
# HELPER FUNCTIONS ##
###########################################################################
def scrapelinks(playlist, links):
#https://www.youtube.com/playlist?list=PL1v-PVIZFDsqbzPIsEPZPnvcgIQ8bNTKS
page=requests.get(playlist)
base='https://www.youtube.com/watch?v='
soup=BeautifulSoup(page.content, 'lxml')
g=soup.find_all('tr',class_='pl-video yt-uix-tile ')
entries=list()
totaltime=0
for i in range(len(g)):
try:
h=str(g[i])
# get titles
h1=h.find('data-title="')+len('data-title="')
h2=h[h1:].find('"')
title=h[h1:h1+h2]
# get links
h3=h.find('data-video-id="')+len('data-video-id="')
h4=h[h3:].find('"')
link=base+h[h3:h3+h4]
# get duration (in seconds)
h5=h.find('<div class="timestamp"><span aria-label="')
h6=h[h5:]
hsoup=BeautifulSoup(h6,'lxml')
htext=hsoup.text.replace('\n','').replace(' ','')
hmin=htext.split(':')
duration=int(hmin[0])*60+int(hmin[1])
totaltime=totaltime+duration
if link not in links:
# avoids duplicate links
links.append(link)
entry={
'title':title,
'link':link,
'duration':duration
}
entries.append(entry)
except:
print('error')
return entries, len(entries), totaltime, links
###########################################################################
## MAIN CODE BASE ##
###########################################################################
playlists=list()
entries=list()
t=1
totalnum=0
totaltime=0
links=list()
playlist_name=input('what do you want to name this playlist (e.g. angry)?')
while t>0:
#try:
playlist=input('what is the playlist id or URL?')
if playlist.find('playlist?list=')>0:
playlists.append(playlist)
entry, enum, nowtime, link=scrapelinks(playlist, links)
links=links+link
totalnum=totalnum+enum
totaltime=totaltime+nowtime
entries=entries+entry
elif playlist not in ['', 'n']:
playlist='https://www.youtube.com/playlist?list='+playlist
playlists.append(playlist)
entry, enum, nowtime, link=scrapelinks(playlist, links)
links=links+link
totalnum=totalnum+enum
totaltime=totaltime+nowtime
entries=entries+entry
else:
break
#except:
#print('error')
os.chdir(os.getcwd()+'/playlists')
data={
'entrynum':totalnum,
'total time':totaltime,
'playlist url':playlists,
'entries':entries,
}
jsonfile=open(playlist_name+'.json','w')
json.dump(data,jsonfile)
jsonfile.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/youtube_scrape/archived/get_audio.py | annotation/helpers/youtube_scrape/archived/get_audio.py | '''
get audio from each file (for processing)
'''
import os
folder=input('what playlist do you want audio?')
os.chdir(folder)
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-4:]=='.mp4':
os.system('ffmpeg -i %s %s'%(listdir[i],listdir[i][0:-4]+'.wav'))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/youtube_scrape/archived/download_playlist_url.py | annotation/helpers/youtube_scrape/archived/download_playlist_url.py | '''
Extract playlist URLs
(for further processing)
'''
import requests, json, os
from bs4 import BeautifulSoup
from pytube import YouTube
base='https://www.youtube.com/watch?v='
playlist_name=input('what do you want to name this playlist (e.g. angry)?')
#angry
playlist=input('what is the playlist url?')
#https://www.youtube.com/playlist?list=PL1v-PVIZFDsqbzPIsEPZPnvcgIQ8bNTKS
page=requests.get(playlist)
soup=BeautifulSoup(page.content, 'lxml')
g=soup.find_all('tr',class_='pl-video yt-uix-tile ')
entries=list()
links=list()
totaltime=0
for i in range(len(g)):
try:
h=str(g[i])
# get titles
h1=h.find('data-title="')+len('data-title="')
h2=h[h1:].find('"')
title=h[h1:h1+h2]
# get links
h3=h.find('data-video-id="')+len('data-video-id="')
h4=h[h3:].find('"')
link=base+h[h3:h3+h4]
# get duration (in seconds)
h5=h.find('<div class="timestamp"><span aria-label="')
h6=h[h5:]
hsoup=BeautifulSoup(h6,'lxml')
htext=hsoup.text.replace('\n','').replace(' ','')
hmin=htext.split(':')
duration=int(hmin[0])*60+int(hmin[1])
totaltime=totaltime+duration
if link not in links:
# avoids duplicate links
links.append(link)
entry={
'title':title,
'link':link,
'duration':duration
}
entries.append(entry)
except:
print('error')
os.mkdir(playlist_name)
os.chdir(os.getcwd()+'/'+playlist_name)
data={
'entrynum':len(entries),
'total time':totaltime,
'playlist url':playlist,
'entries':entries,
}
jsonfile=open('entries.json','w')
json.dump(data,jsonfile)
jsonfile.close()
for i in range(len(entries)):
try:
link=entries[i]['link']
print('downloading %s'%(link))
YouTube(link).streams.first().download()
except:
print('error')
# rename videos in order
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-5:] in ['.webm']:
os.rename(listdir[i],str(i)+'.webm')
os.system('ffmpeg -i %s %s'%(str(i)+'.webm',str(i)+'.mp4'))
os.remove(str(i)+'.webm')
elif listdir[i][-4:] in ['.mp4']:
os.rename(listdir[i],str(i)+'.mp4')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/annotation/helpers/youtube_scrape/archived/download_playlist_id.py | annotation/helpers/youtube_scrape/archived/download_playlist_id.py | '''
Extract playlist URLs
(for further processing)
'''
import requests, json, os
from bs4 import BeautifulSoup
from pytube import YouTube
base='https://www.youtube.com/watch?v='
playlist_name=input('what do you want to name this playlist (e.g. angry)?')
#angry
playlist=input('what is the playlist id?')
playlist='https://www.youtube.com/playlist?list='+playlist
#https://www.youtube.com/playlist?list=PL1v-PVIZFDsqbzPIsEPZPnvcgIQ8bNTKS
page=requests.get(playlist)
soup=BeautifulSoup(page.content, 'lxml')
g=soup.find_all('tr',class_='pl-video yt-uix-tile ')
entries=list()
links=list()
totaltime=0
for i in range(len(g)):
try:
h=str(g[i])
# get titles
h1=h.find('data-title="')+len('data-title="')
h2=h[h1:].find('"')
title=h[h1:h1+h2]
# get links
h3=h.find('data-video-id="')+len('data-video-id="')
h4=h[h3:].find('"')
link=base+h[h3:h3+h4]
# get duration (in seconds)
h5=h.find('<div class="timestamp"><span aria-label="')
h6=h[h5:]
hsoup=BeautifulSoup(h6,'lxml')
htext=hsoup.text.replace('\n','').replace(' ','')
hmin=htext.split(':')
duration=int(hmin[0])*60+int(hmin[1])
totaltime=totaltime+duration
if link not in links:
# avoids duplicate links
links.append(link)
entry={
'title':title,
'link':link,
'duration':duration
}
entries.append(entry)
except:
print('error')
os.mkdir(playlist_name)
os.chdir(os.getcwd()+'/'+playlist_name)
data={
'entrynum':len(entries),
'total time':totaltime,
'playlist url':playlist,
'entries':entries,
}
jsonfile=open('entries.json','w')
json.dump(data,jsonfile)
jsonfile.close()
for i in range(len(entries)):
try:
link=entries[i]['link']
print('downloading %s'%(link))
YouTube(link).streams.first().download()
except:
print('error')
# rename videos in order
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-5:] in ['.webm']:
os.rename(listdir[i],str(i)+'.webm')
os.system('ffmpeg -i %s %s'%(str(i)+'.webm',str(i)+'.mp4'))
os.remove(str(i)+'.webm')
elif listdir[i][-4:] in ['.mp4']:
os.rename(listdir[i],str(i)+'.mp4')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/visualize/visualize.py | visualize/visualize.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| | | (_) | (_) / _ \ | ___ \_ _|
| | | |_ ___ _ _ __ _| |_ _______ / /_\ \| |_/ / | |
| | | | / __| | | |/ _` | | |_ / _ \ | _ || __/ | |
\ \_/ / \__ \ |_| | (_| | | |/ / __/ | | | || | _| |_
\___/|_|___/\__,_|\__,_|_|_/___\___| \_| |_/\_| \___/
Takes in a folder or set of folders of featurized files and outputs
visualizations to look deeper at the data.
This is often useful as a precursor before building machine learning
models to uncover relationships in the data.
Note that this automatically happens as part of the modeling process
if visualize==True in settings.
This is also restricted to classification problems for version 1.0 of Allie.
Usage: python3 visualize.py [problemtype] [folder A] [folder B] ... [folder N]
Example: python3 visualize.py audio males females
'''
import os, sys, json, time, shutil
os.system('pip3 install yellowbrick==1.1 scikit-plot==0.3.7 umap==0.1.1 umap-learn==0.4.4')
from tqdm import tqdm
from yellowbrick.features import Rank1D, Rank2D, Manifold, FeatureImportances
from yellowbrick.features.pca import PCADecomposition
from sklearn.ensemble import ExtraTreesClassifier
import matplotlib.pyplot as plt
from sklearn import preprocessing
import numpy as np
from yellowbrick.text import UMAPVisualizer
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from yellowbrick.classifier import precision_recall_curve, discrimination_threshold
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from yellowbrick.regressor import residuals_plot
from yellowbrick.regressor import prediction_error
from sklearn.cluster import KMeans
from yellowbrick.cluster import silhouette_visualizer
from sklearn.cluster import MiniBatchKMeans
from yellowbrick.cluster import intercluster_distance
from sklearn.metrics import auc, roc_curve
from yellowbrick.classifier.rocauc import roc_auc
from yellowbrick.regressor import cooks_distance
from yellowbrick.features import RadViz
from yellowbrick.target.feature_correlation import feature_correlation
import seaborn as sns
import umap
from sklearn.model_selection import train_test_split
# feature selection
from sklearn.feature_selection import SelectPercentile, chi2
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.preprocessing import MinMaxScaler
# other things in scikitlearn
import scikitplot as skplt
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import load_digits
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import roc_curve
from sklearn.cluster import KMeans
from sklearn import metrics
from itertools import cycle
import random
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_classes():
count=2
classes=list()
while True:
try:
class_=sys.argv[count]
classes.append(class_)
print(classes)
count=count+1
except:
break
# added this in for the CLI
if classes == []:
classnum=input('how many classes do you want to model? (e.g. 2)\n')
classnum = int(classnum)
for i in range(classnum):
classes.append(input('what is class #%s\n'%(str(i+1))))
return classes
def get_features(classes, problem_type, default_features, balance_data):
features=list()
feature_labels=list()
class_labels=list()
curdir=os.getcwd()
lengths=list()
minlength=0
if balance_data == True:
for i in range(len(classes)):
os.chdir(curdir+'/'+classes[i])
listdir=os.listdir()
jsonfiles=list()
for j in range(len(listdir)):
if listdir[j].endswith('.json'):
jsonfiles.append(listdir[j])
lengths.append(len(jsonfiles))
minlength=np.amin(lengths)
print('minimum length is...')
print(minlength)
time.sleep(2)
for i in range(len(classes)):
print('----------LOADING %s----------'%(classes[i].upper()))
os.chdir(curdir+'/'+classes[i])
listdir=os.listdir()
jsonfiles=list()
for j in range(len(listdir)):
if listdir[j].endswith('.json'):
jsonfiles.append(listdir[j])
g=json.load(open(jsonfiles[0]))
feature_list=list(g['features'][problem_type])
for j in tqdm(range(len(jsonfiles))):
if balance_data==True:
if class_labels.count(classes[i]) > minlength:
break
else:
try:
g=json.load(open(jsonfiles[j]))
feature_=list()
label_=list()
for k in range(len(feature_list)):
if feature_list[k] in default_features:
feature_=feature_+g['features'][problem_type][feature_list[k]]['features']
label_=label_+g['features'][problem_type][feature_list[k]]['labels']
# quick quality check to only add to list if the feature_labels match in length the features_
if len(feature_) == len(label_):
features.append(feature_)
feature_labels.append(label_)
class_labels.append(classes[i])
except:
print('error loading feature embedding: %s'%(feature_list[k].upper()))
else:
try:
g=json.load(open(jsonfiles[j]))
feature_=list()
label_=list()
for k in range(len(feature_list)):
if feature_list[k] in default_features:
feature_=feature_+g['features'][problem_type][feature_list[k]]['features']
label_=label_+g['features'][problem_type][feature_list[k]]['labels']
# quick quality check to only add to list if the feature_labels match in length the features_
if len(feature_) == len(label_):
features.append(feature_)
feature_labels.append(label_)
class_labels.append(classes[i])
except:
print('error loading feature embedding: %s'%(feature_list[k].upper()))
return features, feature_labels, class_labels
def plot_roc_curve(y_test, probs, clf_names):
cycol = cycle('bgrcmyk')
for i in range(len(probs)):
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 2))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.savefig('roc_curve.png')
plt.close()
def get_indices(selectedlabels, labels):
'''
takes in an list of labels and gets back indices for these labels; useful to restructure
arrays around the right features for heatmaps.
selectedfeatures = string of selected features from univariate feature selection
labels = string of all potential labels in right order
'''
indices=list()
for i in range(len(selectedlabels)):
indices.append(labels.index(selectedlabels[i]))
return indices
def restructure_features(selectedlabels, features, labels):
# get top 20 features instead of all features for the heatmaps
# get top 20 feature indices
indices=get_indices(selectedlabels, labels)
# now that we have the indices, only select these indices in all numpy array features
newfeatures=list()
for i in range(len(features)):
feature=dict()
for j in range(len(indices)):
feature[selectedlabels[j]]=features[i][indices[j]]
newfeatures.append(feature)
newfeatures=pd.DataFrame(newfeatures)
newfeatures.to_csv('data.csv',index=False)
return newfeatures, selectedlabels
def visualize_features(classes, problem_type, curdir, default_features, balance_data, test_size):
# make features into label encoder here
features, feature_labels, class_labels = get_features(classes, problem_type, default_features, balance_data)
# now preprocess features for all the other plots
os.chdir(curdir)
le = preprocessing.LabelEncoder()
le.fit(class_labels)
tclass_labels = le.transform(class_labels)
# process features to help with clustering
se=preprocessing.StandardScaler()
t_features=se.fit_transform(features)
X_train, X_test, y_train, y_test = train_test_split(features, tclass_labels, test_size=test_size, random_state=42)
# print(len(features))
# print(len(feature_labels))
# print(len(class_labels))
# print(class_labels)
# GET TRAINING DATA DURING MODELING PROCESS
##################################
# get filename
# csvfile=''
# print(classes)
# for i in range(len(classes)):
# csvfile=csvfile+classes[i]+'_'
# get training and testing data for later
# try:
# print('loading training files...')
# X_train=pd.read_csv(prev_dir(curdir)+'/models/'+csvfile+'train.csv')
# y_train=X_train['class_']
# X_train.drop(['class_'], axis=1)
# X_test=pd.read_csv(prev_dir(curdir)+'/models/'+csvfile+'test.csv')
# y_test=X_test['class_']
# X_test.drop(['class_'], axis=1)
# y_train=le.inverse_transform(y_train)
# y_test=le.inverse_transform(y_test)
# except:
# print('error loading in training files, making new test data')
# Visualize each class (quick plot)
##################################
visualization_dir='visualization_session'
try:
os.mkdir(visualization_dir)
os.chdir(visualization_dir)
except:
shutil.rmtree(visualization_dir)
os.mkdir(visualization_dir)
os.chdir(visualization_dir)
objects = tuple(set(class_labels))
y_pos = np.arange(len(objects))
performance=list()
for i in range(len(objects)):
performance.append(class_labels.count(objects[i]))
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.xticks(rotation=90)
plt.title('Counts per class')
plt.ylabel('Count')
plt.xlabel('Class')
plt.tight_layout()
plt.savefig('classes.png')
plt.close()
# set current directory
curdir=os.getcwd()
# ##################################
# # CLUSTERING!!!
# ##################################
##################################
# Manifold type options
##################################
'''
"lle"
Locally Linear Embedding (LLE) uses many local linear decompositions to preserve globally non-linear structures.
"ltsa"
LTSA LLE: local tangent space alignment is similar to LLE in that it uses locality to preserve neighborhood distances.
"hessian"
Hessian LLE an LLE regularization method that applies a hessian-based quadratic form at each neighborhood
"modified"
Modified LLE applies a regularization parameter to LLE.
"isomap"
Isomap seeks a lower dimensional embedding that maintains geometric distances between each instance.
"mds"
MDS: multi-dimensional scaling uses similarity to plot points that are near to each other close in the embedding.
"spectral"
Spectral Embedding a discrete approximation of the low dimensional manifold using a graph representation.
"tsne" (default)
t-SNE: converts the similarity of points into probabilities then uses those probabilities to create an embedding.
'''
os.mkdir('clustering')
os.chdir('clustering')
# tSNE
plt.figure()
viz = Manifold(manifold="tsne", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="tsne.png")
plt.close()
# os.system('open tsne.png')
# viz.show()
# PCA
plt.figure()
visualizer = PCADecomposition(scale=True, classes=set(classes))
visualizer.fit_transform(np.array(features), tclass_labels)
visualizer.poof(outpath="pca.png")
plt.close()
# os.system('open pca.png')
# spectral embedding
plt.figure()
viz = Manifold(manifold="spectral", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="spectral.png")
plt.close()
# lle embedding
plt.figure()
viz = Manifold(manifold="lle", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="lle.png")
plt.close()
# ltsa
# plt.figure()
# viz = Manifold(manifold="ltsa", classes=set(classes))
# viz.fit_transform(np.array(features), tclass_labels)
# viz.poof(outpath="ltsa.png")
# plt.close()
# hessian
# plt.figure()
# viz = Manifold(manifold="hessian", method='dense', classes=set(classes))
# viz.fit_transform(np.array(features), tclass_labels)
# viz.poof(outpath="hessian.png")
# plt.close()
# modified
plt.figure()
viz = Manifold(manifold="modified", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="modified.png")
plt.close()
# isomap
plt.figure()
viz = Manifold(manifold="isomap", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="isomap.png")
plt.close()
# mds
plt.figure()
viz = Manifold(manifold="mds", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="mds.png")
plt.close()
# spectral
plt.figure()
viz = Manifold(manifold="spectral", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="spectral.png")
plt.close()
# UMAP embedding
plt.figure()
umap = UMAPVisualizer(metric='cosine', classes=set(classes), title="UMAP embedding")
umap.fit_transform(np.array(features), class_labels)
umap.poof(outpath="umap.png")
plt.close()
# alternative UMAP
# import umap.plot
# plt.figure()
# mapper = umap.UMAP().fit(np.array(features))
# fig=umap.plot.points(mapper, labels=np.array(tclass_labels))
# fig = fig.get_figure()
# fig.tight_layout()
# fig.savefig('umap2.png')
# plt.close(fig)
#################################
# FEATURE RANKING!!
#################################
os.chdir(curdir)
os.mkdir('feature_ranking')
os.chdir('feature_ranking')
# You can get the feature importance of each feature of your dataset
# by using the feature importance property of the model.
plt.figure(figsize=(12,12))
model = ExtraTreesClassifier()
model.fit(np.array(features),tclass_labels)
# print(model.feature_importances_)
feat_importances = pd.Series(model.feature_importances_, index=feature_labels[0])
feat_importances.nlargest(20).plot(kind='barh')
plt.title('Feature importances (ExtraTrees)', size=16)
plt.title('Feature importances with %s features'%(str(len(features[0]))))
plt.tight_layout()
plt.savefig('feature_importance.png')
plt.close()
# os.system('open feature_importance.png')
# get selected labels for top 20 features
selectedlabels=list(dict(feat_importances.nlargest(20)))
new_features, new_labels = restructure_features(selectedlabels, t_features, feature_labels[0])
new_features_, new_labels_ = restructure_features(selectedlabels, features, feature_labels[0])
# Shapiro rank algorithm (1D)
plt.figure(figsize=(28,12))
visualizer = Rank1D(algorithm='shapiro', classes=set(classes), features=new_labels)
visualizer.fit(np.array(new_features), tclass_labels)
visualizer.transform(np.array(new_features))
# plt.tight_layout()
visualizer.poof(outpath="shapiro.png")
plt.title('Shapiro plot (top 20 features)', size=16)
plt.close()
# os.system('open shapiro.png')
# visualizer.show()
# pearson ranking algorithm (2D)
plt.figure(figsize=(12,12))
visualizer = Rank2D(algorithm='pearson', classes=set(classes), features=new_labels)
visualizer.fit(np.array(new_features), tclass_labels)
visualizer.transform(np.array(new_features))
plt.tight_layout()
visualizer.poof(outpath="pearson.png")
plt.title('Pearson ranking plot (top 20 features)', size=16)
plt.close()
# os.system('open pearson.png')
# visualizer.show()
# feature importances with top 20 features for Lasso
plt.figure(figsize=(12,12))
viz = FeatureImportances(Lasso(), labels=new_labels_)
viz.fit(np.array(new_features_), tclass_labels)
plt.tight_layout()
viz.poof(outpath="lasso.png")
plt.close()
# correlation plots with feature removal if corr > 0.90
# https://towardsdatascience.com/feature-selection-correlation-and-p-value-da8921bfb3cf
# now remove correlated features
# --> p values
# --> https://towardsdatascience.com/the-next-level-of-data-visualization-in-python-dd6e99039d5e / https://github.com/WillKoehrsen/Data-Analysis/blob/master/plotly/Plotly%20Whirlwind%20Introduction.ipynb- plotly for correlation heatmap and scatterplot matrix
# --> https://seaborn.pydata.org/tutorial/distributions.html
data=new_features
corr = data.corr()
plt.figure(figsize=(12,12))
fig=sns.heatmap(corr)
fig = fig.get_figure()
plt.title('Heatmap with correlated features (top 20 features)', size=16)
fig.tight_layout()
fig.savefig('heatmap.png')
plt.close(fig)
columns = np.full((corr.shape[0],), True, dtype=bool)
for i in range(corr.shape[0]):
for j in range(i+1, corr.shape[0]):
if corr.iloc[i,j] >= 0.9:
if columns[j]:
columns[j] = False
selected_columns = data.columns[columns]
data = data[selected_columns]
corr=data.corr()
plt.figure(figsize=(12,12))
fig=sns.heatmap(corr)
fig = fig.get_figure()
plt.title('Heatmap without correlated features (top 20 features)', size=16)
fig.tight_layout()
fig.savefig('heatmap_clean.png')
plt.close(fig)
# radviz
# Instantiate the visualizer
plt.figure(figsize=(12,12))
visualizer = RadViz(classes=classes, features=new_labels)
visualizer.fit(np.array(new_features), tclass_labels)
visualizer.transform(np.array(new_features))
visualizer.poof(outpath="radviz.png")
visualizer.show()
plt.close()
# feature correlation plot
plt.figure(figsize=(28,12))
visualizer = feature_correlation(np.array(new_features), tclass_labels, labels=new_labels)
visualizer.poof(outpath="correlation.png")
visualizer.show()
plt.tight_layout()
plt.close()
os.mkdir('feature_plots')
os.chdir('feature_plots')
newdata=new_features_
newdata['classes']=class_labels
for j in range(len(new_labels_)):
fig=sns.violinplot(x=newdata['classes'], y=newdata[new_labels_[j]])
fig = fig.get_figure()
fig.tight_layout()
fig.savefig('%s_%s.png'%(str(j), new_labels_[j]))
plt.close(fig)
os.mkdir('feature_plots_transformed')
os.chdir('feature_plots_transformed')
newdata=new_features
newdata['classes']=class_labels
for j in range(len(new_labels)):
fig=sns.violinplot(x=newdata['classes'], y=newdata[new_labels[j]])
fig = fig.get_figure()
fig.tight_layout()
fig.savefig('%s_%s.png'%(str(j), new_labels[j]))
plt.close(fig)
##################################################
# PRECISION-RECALL CURVES
##################################################
os.chdir(curdir)
os.mkdir('model_selection')
os.chdir('model_selection')
plt.figure()
visualizer = precision_recall_curve(GaussianNB(), np.array(features), tclass_labels)
visualizer.poof(outpath="precision-recall.png")
plt.close()
plt.figure()
visualizer = roc_auc(LogisticRegression(), np.array(features), tclass_labels)
visualizer.poof(outpath="roc_curve_train.png")
plt.close()
plt.figure()
visualizer = discrimination_threshold(
LogisticRegression(multi_class="auto", solver="liblinear"), np.array(features), tclass_labels)
visualizer.poof(outpath="thresholds.png")
plt.close()
plt.figure()
visualizer = residuals_plot(
Ridge(), np.array(features), tclass_labels, train_color="maroon", test_color="gold"
)
visualizer.poof(outpath="residuals.png")
plt.close()
plt.figure()
visualizer = prediction_error(Lasso(), np.array(features), tclass_labels)
visualizer.poof(outpath='prediction_error.png')
plt.close()
# outlier detection
plt.figure()
visualizer = cooks_distance(np.array(features), tclass_labels, draw_threshold=True, linefmt="C0-", markerfmt=",")
visualizer.poof(outpath='outliers.png')
plt.close()
# cluster numbers
plt.figure()
visualizer = silhouette_visualizer(KMeans(len(set(tclass_labels)), random_state=42), np.array(features))
visualizer.poof(outpath='siloutte.png')
plt.close()
# cluster distance
plt.figure()
visualizer = intercluster_distance(KMeans(len(set(tclass_labels)), random_state=777), np.array(features))
visualizer.poof(outpath='cluster_distance.png')
plt.close()
# plot percentile of features plot with SVM to see which percentile for features is optimal
features=preprocessing.MinMaxScaler().fit_transform(features)
clf = Pipeline([('anova', SelectPercentile(chi2)),
('scaler', StandardScaler()),
('logr', LogisticRegression())])
score_means = list()
score_stds = list()
percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 50, 60, 70, 80, 90, 100)
for percentile in percentiles:
clf.set_params(anova__percentile=percentile)
this_scores = cross_val_score(clf, np.array(features), class_labels)
score_means.append(this_scores.mean())
score_stds.append(this_scores.std())
plt.errorbar(percentiles, score_means, np.array(score_stds))
plt.title('Performance of the LogisticRegression-Anova varying the percent features selected')
plt.xticks(np.linspace(0, 100, 11, endpoint=True))
plt.xlabel('Percentile')
plt.ylabel('Accuracy Score')
plt.axis('tight')
plt.savefig('logr_percentile_plot.png')
plt.close()
# get PCA
pca = PCA(random_state=1)
pca.fit(X_train)
skplt.decomposition.plot_pca_component_variance(pca)
plt.savefig('pca_explained_variance.png')
plt.close()
# estimators
rf = RandomForestClassifier()
skplt.estimators.plot_learning_curve(rf, X_train, y_train)
plt.title('Learning Curve (Random Forest)')
plt.savefig('learning_curve.png')
plt.close()
# elbow plot
kmeans = KMeans(random_state=1)
skplt.cluster.plot_elbow_curve(kmeans, X_train, cluster_ranges=range(1, 30), title='Elbow plot (KMeans clustering)')
plt.savefig('elbow.png')
plt.close()
# KS statistic (only if 2 classes)
lr = LogisticRegression()
lr = lr.fit(X_train, y_train)
y_probas = lr.predict_proba(X_test)
skplt.metrics.plot_ks_statistic(y_test, y_probas)
plt.savefig('ks.png')
plt.close()
# precision-recall
nb = GaussianNB()
nb.fit(X_train, y_train)
y_probas = nb.predict_proba(X_test)
skplt.metrics.plot_precision_recall(y_test, y_probas)
plt.tight_layout()
plt.savefig('precision-recall.png')
plt.close()
## plot calibration curve
rf = RandomForestClassifier()
lr = LogisticRegression()
nb = GaussianNB()
svm = LinearSVC()
dt = DecisionTreeClassifier(random_state=0)
ab = AdaBoostClassifier(n_estimators=100)
gb = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
knn = KNeighborsClassifier(n_neighbors=7)
rf_probas = rf.fit(X_train, y_train).predict_proba(X_test)
lr_probas = lr.fit(X_train, y_train).predict_proba(X_test)
nb_probas = nb.fit(X_train, y_train).predict_proba(X_test)
# svm_scores = svm.fit(X_train, y_train).predict_proba(X_test)
dt_scores= dt.fit(X_train, y_train).predict_proba(X_test)
ab_scores= ab.fit(X_train, y_train).predict_proba(X_test)
gb_scores= gb.fit(X_train, y_train).predict_proba(X_test)
knn_scores= knn.fit(X_train, y_train).predict_proba(X_test)
probas_list = [rf_probas, lr_probas, nb_probas, # svm_scores,
dt_scores, ab_scores, gb_scores, knn_scores]
clf_names = ['Random Forest', 'Logistic Regression', 'Gaussian NB', # 'SVM',
'Decision Tree', 'Adaboost', 'Gradient Boost', 'KNN']
skplt.metrics.plot_calibration_curve(y_test,probas_list, clf_names)
plt.savefig('calibration.png')
plt.tight_layout()
plt.close()
# pick classifier type by ROC (without optimization)
probs = [rf_probas[:, 1], lr_probas[:, 1], nb_probas[:, 1], # svm_scores[:, 1],
dt_scores[:, 1], ab_scores[:, 1], gb_scores[:, 1], knn_scores[:, 1]]
plot_roc_curve(y_test, probs, clf_names)
# more elaborate ROC example with CV = 5 fold
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html#sphx-glr-auto-examples-model-selection-plot-roc-crossval-py
os.chdir(curdir)
return ''
def visualize_features_csv(csvdata, target, classes, problem_type, curdir, balance_data, test_size):
# make features into label encoder here
g=csvdata
data=dict()
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
feature_labels=[csv_feature_labels]
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
g=data
lengths=list()
alldata=list()
labels=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance_data==True:
if len(class_) > minlength:
print('%s greater than minlength (%s) by %s, equalizing...'%(classes[i], str(minlength), str(len(class_)-minlength)))
class_=class_[0:minlength]
for j in range(len(class_)):
alldata.append(class_[j])
labels.append(i)
# process features to help with clustering
se=preprocessing.StandardScaler()
t_features=se.fit_transform(alldata)
# make regular features
features=alldata
tclass_labels=labels
class_labels=labels
X_train, X_test, y_train, y_test = train_test_split(alldata, labels, test_size=test_size, random_state=42)
# print(len(features))
# print(len(feature_labels))
# print(len(class_labels))
# print(class_labels)
# GET TRAINING DATA DURING MODELING PROCESS
##################################
# get filename
# csvfile=''
# print(classes)
# for i in range(len(classes)):
# csvfile=csvfile+classes[i]+'_'
# get training and testing data for later
# try:
# print('loading training files...')
# X_train=pd.read_csv(prev_dir(curdir)+'/models/'+csvfile+'train.csv')
# y_train=X_train['class_']
# X_train.drop(['class_'], axis=1)
# X_test=pd.read_csv(prev_dir(curdir)+'/models/'+csvfile+'test.csv')
# y_test=X_test['class_']
# X_test.drop(['class_'], axis=1)
# y_train=le.inverse_transform(y_train)
# y_test=le.inverse_transform(y_test)
# except:
# print('error loading in training files, making new test data')
# Visualize each class (quick plot)
##################################
visualization_dir='visualization_session'
try:
os.mkdir(visualization_dir)
os.chdir(visualization_dir)
except:
shutil.rmtree(visualization_dir)
os.mkdir(visualization_dir)
os.chdir(visualization_dir)
objects = tuple(set(class_labels))
y_pos = np.arange(len(objects))
performance=list()
for i in range(len(objects)):
performance.append(class_labels.count(objects[i]))
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.xticks(rotation=90)
plt.title('Counts per class')
plt.ylabel('Count')
plt.xlabel('Class')
plt.tight_layout()
plt.savefig('classes.png')
plt.close()
# set current directory
curdir=os.getcwd()
# ##################################
# # CLUSTERING!!!
# ##################################
##################################
# Manifold type options
##################################
'''
"lle"
Locally Linear Embedding (LLE) uses many local linear decompositions to preserve globally non-linear structures.
"ltsa"
LTSA LLE: local tangent space alignment is similar to LLE in that it uses locality to preserve neighborhood distances.
"hessian"
Hessian LLE an LLE regularization method that applies a hessian-based quadratic form at each neighborhood
"modified"
Modified LLE applies a regularization parameter to LLE.
"isomap"
Isomap seeks a lower dimensional embedding that maintains geometric distances between each instance.
"mds"
MDS: multi-dimensional scaling uses similarity to plot points that are near to each other close in the embedding.
"spectral"
Spectral Embedding a discrete approximation of the low dimensional manifold using a graph representation.
"tsne" (default)
t-SNE: converts the similarity of points into probabilities then uses those probabilities to create an embedding.
'''
os.mkdir('clustering')
os.chdir('clustering')
# tSNE
plt.figure()
viz = Manifold(manifold="tsne", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="tsne.png")
plt.close()
# os.system('open tsne.png')
# viz.show()
# PCA
plt.figure()
visualizer = PCADecomposition(scale=True, classes=set(classes))
visualizer.fit_transform(np.array(features), tclass_labels)
visualizer.poof(outpath="pca.png")
plt.close()
# os.system('open pca.png')
# spectral embedding
plt.figure()
viz = Manifold(manifold="spectral", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="spectral.png")
plt.close()
# lle embedding
plt.figure()
viz = Manifold(manifold="lle", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="lle.png")
plt.close()
# ltsa
# plt.figure()
# viz = Manifold(manifold="ltsa", classes=set(classes))
# viz.fit_transform(np.array(features), tclass_labels)
# viz.poof(outpath="ltsa.png")
# plt.close()
# hessian
# plt.figure()
# viz = Manifold(manifold="hessian", method='dense', classes=set(classes))
# viz.fit_transform(np.array(features), tclass_labels)
# viz.poof(outpath="hessian.png")
# plt.close()
# modified
plt.figure()
viz = Manifold(manifold="modified", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="modified.png")
plt.close()
# isomap
plt.figure()
viz = Manifold(manifold="isomap", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="isomap.png")
plt.close()
# mds
plt.figure()
viz = Manifold(manifold="mds", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="mds.png")
plt.close()
# spectral
plt.figure()
viz = Manifold(manifold="spectral", classes=set(classes))
viz.fit_transform(np.array(features), tclass_labels)
viz.poof(outpath="spectral.png")
plt.close()
# UMAP embedding
plt.figure()
umap = UMAPVisualizer(metric='cosine', classes=set(classes), title="UMAP embedding")
umap.fit_transform(np.array(features), class_labels)
umap.poof(outpath="umap.png")
plt.close()
# alternative UMAP
# import umap.plot
# plt.figure()
# mapper = umap.UMAP().fit(np.array(features))
# fig=umap.plot.points(mapper, labels=np.array(tclass_labels))
# fig = fig.get_figure()
# fig.tight_layout()
# fig.savefig('umap2.png')
# plt.close(fig)
#################################
# FEATURE RANKING!!
#################################
os.chdir(curdir)
os.mkdir('feature_ranking')
os.chdir('feature_ranking')
# You can get the feature importance of each feature of your dataset
# by using the feature importance property of the model.
plt.figure(figsize=(12,12))
model = ExtraTreesClassifier()
model.fit(np.array(features),tclass_labels)
# print(model.feature_importances_)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/augment_vidaug.py | augmentation/video_augmentation/augment_vidaug.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _ _ _ _
/ _ \ | ___ \_ _| _ | | | (_) | |
/ /_\ \| |_/ / | | (_) | | | |_ __| | ___ ___
| _ || __/ | | | | | | |/ _` |/ _ \/ _ \
| | | || | _| |_ _ \ \_/ / | (_| | __/ (_) |
\_| |_/\_| \___/ (_) \___/|_|\__,_|\___|\___/
'''
import helpers.vidaug.vidaug.augmentors as va
from PIL import Image, ImageSequence
import os
import moviepy.editor as mp
def augment_vidaug(videofile, basedir):
def gif_loader(path, modality="RGB"):
frames = []
with open(path, 'rb') as f:
with Image.open(f) as video:
index = 1
for frame in ImageSequence.Iterator(video):
frames.append(frame.convert(modality))
index += 1
return frames
file = videofile
# convert file to gif
if file[-4:] != '.gif':
# only take first 10 seconds.
os.system('ffmpeg -i %s %s'%(file, file[0:-4]+'.gif'))
file=file[0:-4]+'.gif'
frames = gif_loader(os.getcwd()+"/%s"%(file))
sometimes = lambda aug: va.Sometimes(0.75, aug) # Used to apply augmentor with 75% probability
seq = va.Sequential([
va.RandomCrop(size=(240, 180)), # randomly crop video with a size of (240 x 180)
va.RandomRotate(degrees=10), # randomly rotates the video with a degree randomly choosen from [-10, 10]
sometimes(va.HorizontalFlip()) # horizontally flip the video with 50% probability
])
#augment the frames
video_aug = seq(frames)
# save augmentad frames as gif
video_aug[0].save(file[0:-4]+'.gif', save_all=True, append_images=video_aug[1:], duration=100, loop=0)
clip = mp.VideoFileClip(file[0:-4]+'.gif')
clip.write_videofile('augmented_'+file[0:-4]+'.mp4')
os.remove(file[0:-4]+'.gif')
return [videofile, 'augmented_'+file[0:-4]+'.mp4'] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/augment.py | augmentation/video_augmentation/augment.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _ _ _ _
/ _ \ | ___ \_ _| _ | | | (_) | |
/ /_\ \| |_/ / | | (_) | | | |_ __| | ___ ___
| _ || __/ | | | | | | |/ _` |/ _ \/ _ \
| | | || | _| |_ _ \ \_/ / | (_| | __/ (_) |
\_| |_/\_| \___/ (_) \___/|_|\__,_|\___|\___/
This section of Allie's API augments folders of video files using
the default_video_augmenters.
Usage: python3 augment.py [folder] [augment_type]
All augment_type options include:
["augment_vidaug"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/augmentation/video_augmentation
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def video_augment(augmentation_set, videofile, basedir):
# only load the relevant featuresets for featurization to save memory
if augmentation_set=='augment_vidaug':
augment_vidaug.augment_vidaug(videofile, basedir)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
video_transcribe=settings['transcribe_video']
default_video_transcribers=settings['default_video_transcriber']
try:
# assume 1 type of feature_set
augmentation_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
augmentation_sets=settings['default_video_augmenters']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'augment_vidaug' in augmentation_sets:
import augment_vidaug
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## NOW AUGMENT!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.mp4']:
filename=[listdir[i]]
for j in range(len(augmentation_sets)):
augmentation_set=augmentation_sets[j]
for k in range(len(filename)):
filename=video_augment(augmentation_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/setup.py | augmentation/video_augmentation/helpers/vidaug/setup.py | import setuptools
setuptools.setup(name='vidaug',
version='0.1',
description='Video Augmentation Library',
url='https://github.com/okankop/vidaug',
author='Okan Kopuklu',
author_email='okankopuklu@gmail.com',
license='MIT',
packages=setuptools.find_packages(),
zip_safe=False)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/__init__.py | augmentation/video_augmentation/helpers/vidaug/vidaug/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/flip.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/flip.py | """
Augmenters that apply video flipping horizontally and
vertically.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.HorizontalFlip(),
va.VerticalFlip() ])
List of augmenters:
* HorizontalFlip
* VerticalFlip
"""
import numpy as np
import PIL
class HorizontalFlip(object):
"""
Horizontally flip the video.
"""
def __call__(self, clip):
if isinstance(clip[0], np.ndarray):
return [np.fliplr(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
class VerticalFlip(object):
"""
Vertically flip the video.
"""
def __call__(self, clip):
if isinstance(clip[0], np.ndarray):
return [np.flipud(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.transpose(PIL.Image.FLIP_TOP_BOTTOM) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
' but got list of {0}'.format(type(clip[0])))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/crop.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/crop.py | """
Augmenters that apply video flipping horizontally and
vertically.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.HorizontalFlip(),
va.VerticalFlip() ])
List of augmenters:
* CenterCrop
* CornerCrop
* RandomCrop
"""
import numpy as np
import PIL
import numbers
import random
class CenterCrop(object):
"""
Extract center crop of thevideo.
Args:
size (sequence or int): Desired output size for the crop in format (h, w).
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
if size < 0:
raise ValueError('If size is a single number, it must be positive')
size = (size, size)
else:
if len(size) != 2:
raise ValueError('If size is a sequence, it must be of len 2.')
self.size = size
def __call__(self, clip):
crop_h, crop_w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if crop_w > im_w or crop_h > im_h:
error_msg = ('Initial image size should be larger then' +
'cropped size but got cropped sizes : ' +
'({w}, {h}) while initial image is ({im_w}, ' +
'{im_h})'.format(im_w=im_w, im_h=im_h, w=crop_w,
h=crop_h))
raise ValueError(error_msg)
w1 = int(round((im_w - crop_w) / 2.))
h1 = int(round((im_h - crop_h) / 2.))
if isinstance(clip[0], np.ndarray):
return [img[h1:h1 + crop_h, w1:w1 + crop_w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.crop((w1, h1, w1 + crop_w, h1 + crop_h)) for img in clip]
class CornerCrop(object):
"""
Extract corner crop of the video.
Args:
size (sequence or int): Desired output size for the crop in format (h, w).
crop_position (str): Selected corner (or center) position from the
list ['c', 'tl', 'tr', 'bl', 'br']. If it is non, crop position is
selected randomly at each call.
"""
def __init__(self, size, crop_position=None):
if isinstance(size, numbers.Number):
if size < 0:
raise ValueError('If size is a single number, it must be positive')
size = (size, size)
else:
if len(size) != 2:
raise ValueError('If size is a sequence, it must be of len 2.')
self.size = size
if crop_position is None:
self.randomize = True
else:
if crop_position not in ['c', 'tl', 'tr', 'bl', 'br']:
raise ValueError("crop_position should be one of " +
"['c', 'tl', 'tr', 'bl', 'br']")
self.randomize = False
self.crop_position = crop_position
self.crop_positions = ['c', 'tl', 'tr', 'bl', 'br']
def __call__(self, clip):
crop_h, crop_w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if self.randomize:
self.crop_position = self.crop_positions[random.randint(0,len(self.crop_positions) - 1)]
if self.crop_position == 'c':
th, tw = (self.size, self.size)
x1 = int(round((im_w - crop_w) / 2.))
y1 = int(round((im_h - crop_h) / 2.))
x2 = x1 + crop_w
y2 = y1 + crop_h
elif self.crop_position == 'tl':
x1 = 0
y1 = 0
x2 = crop_w
y2 = crop_h
elif self.crop_position == 'tr':
x1 = im_w - crop_w
y1 = 0
x2 = im_w
y2 = crop_h
elif self.crop_position == 'bl':
x1 = 0
y1 = im_h - crop_h
x2 = crop_w
y2 = im_h
elif self.crop_position == 'br':
x1 = im_w - crop_w
y1 = im_h - crop_h
x2 = im_w
y2 = im_h
if isinstance(clip[0], np.ndarray):
return [img[y1:y2, x1:x2, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.crop((x1, y1, x2, y2)) for img in clip]
class RandomCrop(object):
"""
Extract random crop of the video.
Args:
size (sequence or int): Desired output size for the crop in format (h, w).
crop_position (str): Selected corner (or center) position from the
list ['c', 'tl', 'tr', 'bl', 'br']. If it is non, crop position is
selected randomly at each call.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
if size < 0:
raise ValueError('If size is a single number, it must be positive')
size = (size, size)
else:
if len(size) != 2:
raise ValueError('If size is a sequence, it must be of len 2.')
self.size = size
def __call__(self, clip):
crop_h, crop_w = self.size
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
if crop_w > im_w or crop_h > im_h:
error_msg = ('Initial image size should be larger then' +
'cropped size but got cropped sizes : ' +
'({w}, {h}) while initial image is ({im_w}, ' +
'{im_h})'.format(im_w=im_w, im_h=im_h, w=crop_w,
h=crop_h))
raise ValueError(error_msg)
w1 = random.randint(0, im_w - crop_w)
h1 = random.randint(0, im_h - crop_h)
if isinstance(clip[0], np.ndarray):
return [img[h1:h1 + crop_h, w1:w1 + crop_w, :] for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.crop((w1, h1, w1 + crop_w, h1 + crop_h)) for img in clip]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/geometric.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/geometric.py | """
Augmenters that apply geometric transformations.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.RandomRotate(30),
va.RandomResize(0.2) ])
List of augmenters:
* GaussianBlur
* ElasticTransformation
* PiecewiseAffineTransform
* Superpixel
"""
from skimage import segmentation, measure
import numpy as np
import random
import numbers
import scipy
import PIL
import cv2
class GaussianBlur(object):
"""
Augmenter to blur images using gaussian kernels.
Args:
sigma (float): Standard deviation of the gaussian kernel.
"""
def __init__(self, sigma):
self.sigma = sigma
def __call__(self, clip):
if isinstance(clip[0], np.ndarray):
return [scipy.ndimage.gaussian_filter(img, sigma=self.sigma, order=0) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.filter(PIL.ImageFilter.GaussianBlur(radius=self.sigma)) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
class ElasticTransformation(object):
"""
Augmenter to transform images by moving pixels locally around using
displacement fields.
See
Simard, Steinkraus and Platt
Best Practices for Convolutional Neural Networks applied to Visual
Document Analysis
in Proc. of the International Conference on Document Analysis and
Recognition, 2003
for a detailed explanation.
Args:
alpha (float): Strength of the distortion field. Higher values mean
more "movement" of pixels.
sigma (float): Standard deviation of the gaussian kernel used to
smooth the distortion fields.
order (int): Interpolation order to use. Same meaning as in
`scipy.ndimage.map_coordinates` and may take any integer value in
the range 0 to 5, where orders close to 0 are faster.
cval (int): The constant intensity value used to fill in new pixels.
This value is only used if `mode` is set to "constant".
For standard uint8 images (value range 0-255), this value may also
come from the range 0-255. It may be a float value, even for
integer image dtypes.
mode : Parameter that defines the handling of newly created pixels.
May take the same values as in `scipy.ndimage.map_coordinates`,
i.e. "constant", "nearest", "reflect" or "wrap".
"""
def __init__(self, alpha=0, sigma=0, order=3, cval=0, mode="constant",
name=None, deterministic=False):
self.alpha = alpha
self.sigma = sigma
self.order = order
self.cval = cval
self.mode = mode
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
result = []
nb_images = len(clip)
for i in range(nb_images):
image = clip[i]
image_first_channel = np.squeeze(image[..., 0])
indices_x, indices_y = self._generate_indices(image_first_channel.shape, alpha=self.alpha, sigma=self.sigma)
result.append(self._map_coordinates(
clip[i],
indices_x,
indices_y,
order=self.order,
cval=self.cval,
mode=self.mode))
if is_PIL:
return [PIL.Image.fromarray(img) for img in result]
else:
return result
def _generate_indices(self, shape, alpha, sigma):
assert (len(shape) == 2),"shape: Should be of size 2!"
dx = scipy.ndimage.gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
dy = scipy.ndimage.gaussian_filter((np.random.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha
x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
return np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))
def _map_coordinates(self, image, indices_x, indices_y, order=1, cval=0, mode="constant"):
assert (len(image.shape) == 3),"image.shape: Should be of size 3!"
result = np.copy(image)
height, width = image.shape[0:2]
for c in range(image.shape[2]):
remapped_flat = scipy.ndimage.interpolation.map_coordinates(
image[..., c],
(indices_x, indices_y),
order=order,
cval=cval,
mode=mode
)
remapped = remapped_flat.reshape((height, width))
result[..., c] = remapped
return result
class PiecewiseAffineTransform(object):
"""
Augmenter that places a regular grid of points on an image and randomly
moves the neighbourhood of these point around via affine transformations.
Args:
displacement (init): gives distorted image depending on the valuse of displacement_magnification and displacement_kernel
displacement_kernel (init): gives the blury effect
displacement_magnification (float): it magnify the image
"""
def __init__(self, displacement=0, displacement_kernel=0, displacement_magnification=0):
self.displacement = displacement
self.displacement_kernel = displacement_kernel
self.displacement_magnification = displacement_magnification
def __call__(self, clip):
ret_img_group = clip
if isinstance(clip[0], np.ndarray):
im_size = clip[0].shape
image_w, image_h = im_size[1], im_size[0]
elif isinstance(clip[0], PIL.Image.Image):
im_size = clip[0].size
image_w, image_h = im_size[0], im_size[1]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
displacement_map = np.random.rand(image_h, image_w, 2) * 2 * self.displacement - self.displacement
displacement_map = cv2.GaussianBlur(displacement_map, None,
self.displacement_kernel)
displacement_map *= self.displacement_magnification * self.displacement_kernel
displacement_map = np.floor(displacement_map).astype('int32')
displacement_map_rows = displacement_map[..., 0] + np.tile(np.arange(image_h), (image_w, 1)).T.astype('int32')
displacement_map_rows = np.clip(displacement_map_rows, 0, image_h - 1)
displacement_map_cols = displacement_map[..., 1] + np.tile(np.arange(image_w), (image_h, 1)).astype('int32')
displacement_map_cols = np.clip(displacement_map_cols, 0, image_w - 1)
if isinstance(clip[0], np.ndarray):
return [img[(displacement_map_rows.flatten(), displacement_map_cols.flatten())].reshape(img.shape) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [PIL.Image.fromarray(np.asarray(img)[(displacement_map_rows.flatten(), displacement_map_cols.flatten())].reshape(np.asarray(img).shape)) for img in clip]
class Superpixel(object):
"""
Completely or partially transform images to their superpixel representation.
Args:
p_replace (int) : Defines the probability of any superpixel area being
replaced by the superpixel.
n_segments (int): Target number of superpixels to generate.
Lower numbers are faster.
interpolation (str): Interpolation to use. Can be one of 'nearest',
'bilinear' defaults to nearest
"""
def __init__(self, p_replace=0, n_segments=0, max_size=360,
interpolation="bilinear"):
self.p_replace = p_replace
self.n_segments = n_segments
self.interpolation = interpolation
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
# TODO this results in an error when n_segments is 0
replace_samples = np.tile(np.array([self.p_replace]), self.n_segments)
avg_image = np.mean(clip, axis=0)
segments = segmentation.slic(avg_image, n_segments=self.n_segments,
compactness=10)
if not np.max(replace_samples) == 0:
print("Converting")
clip = [self._apply_segmentation(img, replace_samples, segments) for img in clip]
if is_PIL:
return [PIL.Image.fromarray(img) for img in clip]
else:
return clip
def _apply_segmentation(self, image, replace_samples, segments):
nb_channels = image.shape[2]
image_sp = np.copy(image)
for c in range(nb_channels):
# segments+1 here because otherwise regionprops always misses
# the last label
regions = measure.regionprops(segments + 1,
intensity_image=image[..., c])
for ridx, region in enumerate(regions):
# with mod here, because slic can sometimes create more
# superpixel than requested. replace_samples then does
# not have enough values, so we just start over with the
# first one again.
if replace_samples[ridx % len(replace_samples)] == 1:
mean_intensity = region.mean_intensity
image_sp_c = image_sp[..., c]
image_sp_c[segments == ridx] = mean_intensity
return image_sp
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/temporal.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/temporal.py | """
Augmenters that apply temporal transformations.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.RandomRotate(30),
va.RandomResize(0.2) ])
List of augmenters:
* TemporalBeginCrop
* TemporalCenterCrop
* TemporalRandomCrop
* InverseOrder
* Downsample
* Upsample
* TemporalFit
* TemporalElasticTransformation
"""
import numpy as np
import PIL
import random
import math
class TemporalBeginCrop(object):
"""
Temporally crop the given frame indices at a beginning.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, clip):
out = clip[:self.size]
for img in out:
if len(out) >= self.size:
break
out.append(img)
return out
class TemporalCenterCrop(object):
"""
Temporally crop the given frame indices at a center.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, clip):
center_index = len(clip) // 2
begin_index = max(0, center_index - (self.size // 2))
end_index = min(begin_index + self.size, len(clip))
out = clip[begin_index:end_index]
for img in out:
if len(out) >= self.size:
break
out.append(img)
return out
class TemporalRandomCrop(object):
"""
Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, clip):
rand_end = max(0, len(clip) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(clip))
out = clip[begin_index:end_index]
for img in out:
if len(out) >= self.size:
break
out.append(img)
return out
class InverseOrder(object):
"""
Inverts the order of clip frames.
"""
def __call__(self, clip):
for i in range(len(clip)):
nb_images = len(clip)
return [clip[img] for img in reversed(range(1, nb_images))]
class Downsample(object):
"""
Temporally downsample a video by deleting some of its frames.
Args:
ratio (float): Downsampling ratio in [0.0 <= ratio <= 1.0].
"""
def __init__(self , ratio=1.0):
if ratio < 0.0 or ratio > 1.0:
raise TypeError('ratio should be in [0.0 <= ratio <= 1.0]. ' +
'Please use upsampling for ratio > 1.0')
self.ratio = ratio
def __call__(self, clip):
nb_return_frame = np.floor(self.ratio * len(clip))
return_ind = [int(i) for i in np.linspace(1, len(clip), num=nb_return_frame)]
return [clip[i-1] for i in return_ind]
class Upsample(object):
"""
Temporally upsampling a video by deleting some of its frames.
Args:
ratio (float): Upsampling ratio in [1.0 < ratio < infinity].
"""
def __init__(self , ratio=1.0):
if ratio < 1.0:
raise TypeError('ratio should be 1.0 < ratio. ' +
'Please use downsampling for ratio <= 1.0')
self.ratio = ratio
def __call__(self, clip):
nb_return_frame = np.floor(self.ratio * len(clip))
return_ind = [int(i) for i in np.linspace(1, len(clip), num=nb_return_frame)]
return [clip[i-1] for i in return_ind]
class TemporalFit(object):
"""
Temporally fits a video to a given frame size by
downsampling or upsampling.
Args:
size (int): Frame size to fit the video.
"""
def __init__(self, size):
if size < 0:
raise TypeError('size should be positive')
self.size = size
def __call__(self, clip):
return_ind = [int(i) for i in np.linspace(1, len(clip), num=self.size)]
return [clip[i-1] for i in return_ind]
class TemporalElasticTransformation(object):
"""
Stretches or schrinks a video at the beginning, end or middle parts.
In normal operation, augmenter stretches the beggining and end, schrinks
the center.
In inverse operation, augmenter shrinks the beggining and end, stretches
the center.
"""
def __call__(self, clip):
nb_images = len(clip)
new_indices = self._get_distorted_indices(nb_images)
return [clip[i] for i in new_indices]
def _get_distorted_indices(self, nb_images):
inverse = random.randint(0, 1)
if inverse:
scale = random.random()
scale *= 0.21
scale += 0.6
else:
scale = random.random()
scale *= 0.6
scale += 0.8
frames_per_clip = nb_images
indices = np.linspace(-scale, scale, frames_per_clip).tolist()
if inverse:
values = [math.atanh(x) for x in indices]
else:
values = [math.tanh(x) for x in indices]
values = [x / values[-1] for x in values]
values = [int(round(((x + 1) / 2) * (frames_per_clip - 1), 0)) for x in values]
return values
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/group.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/group.py | """
Augmenters that apply to a group of augmentations, like selecting
an augmentation from a list, or applying all the augmentations in
a list sequentially
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.HorizontalFlip(),
va.VerticalFlip() ])
List of augmenters:
* Sequential
* OneOf
* SomeOf
* Sometimes
"""
import numpy as np
import PIL
import random
class Sequential(object):
"""
Composes several augmentations together.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, random_order=False):
self.transforms = transforms
self.rand = random_order
def __call__(self, clip):
if self.rand:
rand_transforms = self.transforms[:]
random.shuffle(rand_transforms)
for t in rand_transforms:
clip = t(clip)
else:
for t in self.transforms:
clip = t(clip)
return clip
class OneOf(object):
"""
Selects one augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, clip):
select = random.choice(self.transforms)
clip = select(clip)
return clip
class SomeOf(object):
"""
Selects a given number of augmentation from a list.
Args:
transforms (list of "Augmentor" objects): The list of augmentations.
N (int): The number of augmentations to select from the list.
random_order (bool): Whether to apply the augmentations in random order.
"""
def __init__(self, transforms, N, random_order=True):
self.transforms = transforms
self.rand = random_order
if N > len(transforms):
raise TypeError('The number of applied augmentors should be smaller than the given augmentation number')
else:
self.N = N
def __call__(self, clip):
if self.rand:
tmp = self.transforms[:]
selected_trans = [tmp.pop(random.randrange(len(tmp))) for _ in range(self.N)]
for t in selected_trans:
clip = t(clip)
return clip
else:
indices = [i for i in range(len(self.transforms))]
selected_indices = [indices.pop(random.randrange(len(indices)))
for _ in range(self.N)]
selected_indices.sort()
selected_trans = [self.transforms[i] for i in selected_indices]
for t in selected_trans:
clip = t(clip)
return clip
class Sometimes(object):
"""
Applies an augmentation with a given probability.
Args:
p (float): The probability to apply the augmentation.
transform (an "Augmentor" object): The augmentation to apply.
Example: Use this this transform as follows:
sometimes = lambda aug: va.Sometimes(0.5, aug)
sometimes(va.HorizontalFlip)
"""
def __init__(self, p, transform):
self.transform = transform
if (p > 1.0) | (p < 0.0):
raise TypeError('Expected p to be in [0.0 <= 1.0], ' +
'but got p = {0}'.format(p))
else:
self.p = p
def __call__(self, clip):
if random.random() < self.p:
clip = self.transform(clip)
return clip
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/__init__.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/__init__.py | from __future__ import absolute_import
from .affine import *
from .crop import *
from .flip import *
from .group import *
from .temporal import *
from .intensity import *
from .geometric import *
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/affine.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/affine.py | """
Augmenters that apply affine transformations.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.RandomRotate(30),
va.RandomResize(0.2) ])
List of augmenters:
* RandomRotate
* RandomResize
* RandomTranslate
* RandomShear
"""
import numpy as np
import numbers
import random
import scipy
import PIL
import cv2
class RandomRotate(object):
"""
Rotate video randomly by a random angle within given boundsi.
Args:
degrees (sequence or int): Range of degrees to randomly
select from. If degrees is a number instead of sequence
like (min, max), the range of degrees, will be
(-degrees, +degrees).
"""
def __init__(self, degrees):
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError('If degrees is a single number,'
'must be positive')
degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError('If degrees is a sequence,'
'it must be of len 2.')
self.degrees = degrees
def __call__(self, clip):
angle = random.uniform(self.degrees[0], self.degrees[1])
if isinstance(clip[0], np.ndarray):
rotated = [scipy.misc.imrotate(img, angle) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
rotated = [img.rotate(angle) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return rotated
class RandomResize(object):
"""
Resize video bysoomingin and out.
Args:
rate (float): Video is scaled uniformly between
[1 - rate, 1 + rate].
interp (string): Interpolation to use for re-sizing
('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic').
"""
def __init__(self, rate=0.0, interp='bilinear'):
self.rate = rate
self.interpolation = interp
def __call__(self, clip):
scaling_factor = random.uniform(1 - self.rate, 1 + self.rate)
if isinstance(clip[0], np.ndarray):
im_h, im_w, im_c = clip[0].shape
elif isinstance(clip[0], PIL.Image.Image):
im_w, im_h = clip[0].size
new_w = int(im_w * scaling_factor)
new_h = int(im_h * scaling_factor)
new_size = (new_h, new_w)
if isinstance(clip[0], np.ndarray):
return [scipy.misc.imresize(img, size=(new_h, new_w),interp=self.interpolation) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.resize(size=(new_w, new_h), resample=self._get_PIL_interp(self.interpolation)) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
def _get_PIL_interp(self, interp):
if interp == 'nearest':
return PIL.Image.NEAREST
elif interp == 'lanczos':
return PIL.Image.LANCZOS
elif interp == 'bilinear':
return PIL.Image.BILINEAR
elif interp == 'bicubic':
return PIL.Image.BICUBIC
elif interp == 'cubic':
return PIL.Image.CUBIC
class RandomTranslate(object):
"""
Shifting video in X and Y coordinates.
Args:
x (int) : Translate in x direction, selected
randomly from [-x, +x] pixels.
y (int) : Translate in y direction, selected
randomly from [-y, +y] pixels.
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __call__(self, clip):
x_move = random.randint(-self.x, +self.x)
y_move = random.randint(-self.y, +self.y)
if isinstance(clip[0], np.ndarray):
rows, cols, ch = clip[0].shape
transform_mat = np.float32([[1, 0, x_move], [0, 1, y_move]])
return [cv2.warpAffine(img, transform_mat, (cols, rows)) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.transform(img.size, PIL.Image.AFFINE, (1, 0, x_move, 0, 1, y_move)) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
class RandomShear(object):
"""
Shearing video in X and Y directions.
Args:
x (int) : Shear in x direction, selected randomly from
[-x, +x].
y (int) : Shear in y direction, selected randomly from
[-y, +y].
"""
def __init__(self, x, y):
self.x = x
self.y = y
def __call__(self, clip):
x_shear = random.uniform(-self.x, self.x)
y_shear = random.uniform(-self.y, self.y)
if isinstance(clip[0], np.ndarray):
rows, cols, ch = clip[0].shape
transform_mat = np.float32([[1, x_shear, 0], [y_shear, 1, 0]])
return [cv2.warpAffine(img, transform_mat, (cols, rows)) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
return [img.transform(img.size, PIL.Image.AFFINE, (1, x_shear, 0, y_shear, 1, 0)) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/intensity.py | augmentation/video_augmentation/helpers/vidaug/vidaug/augmentors/intensity.py | """
Augmenters that apply transformations on the pixel intensities.
To use the augmenters, clone the complete repo and use
`from vidaug import augmenters as va`
and then e.g. :
seq = va.Sequential([ va.RandomRotate(30),
va.RandomResize(0.2) ])
List of augmenters:
* InvertColor
* Add
* Multiply
* Pepper
* Salt
"""
import numpy as np
import random
import PIL
from PIL import ImageOps
class InvertColor(object):
"""
Inverts the color of the video.
"""
def __call__(self, clip):
if isinstance(clip[0], np.ndarray):
return [np.invert(img) for img in clip]
elif isinstance(clip[0], PIL.Image.Image):
inverted = [ImageOps.invert(img) for img in clip]
else:
raise TypeError('Expected numpy.ndarray or PIL.Image' +
'but got list of {0}'.format(type(clip[0])))
return inverted
class Add(object):
"""
Add a value to all pixel intesities in an video.
Args:
value (int): The value to be added to pixel intesities.
"""
def __init__(self, value=0):
if value > 255 or value < -255:
raise TypeError('The video is blacked or whitened out since ' +
'value > 255 or value < -255.')
self.value = value
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
data_final = []
for i in range(len(clip)):
image = clip[i].astype(np.int32)
image += self.value
image = np.where(image > 255, 255, image)
image = np.where(image < 0, 0, image)
image = image.astype(np.uint8)
data_final.append(image.astype(np.uint8))
if is_PIL:
return [PIL.Image.fromarray(img) for img in data_final]
else:
return data_final
class Multiply(object):
"""
Multiply all pixel intensities with given value.
This augmenter can be used to make images lighter or darker.
Args:
value (float): The value with which to multiply the pixel intensities
of video.
"""
def __init__(self, value=1.0):
if value < 0.0:
raise TypeError('The video is blacked out since for value < 0.0')
self.value = value
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
data_final = []
for i in range(len(clip)):
image = clip[i].astype(np.float64)
image *= self.value
image = np.where(image > 255, 255, image)
image = np.where(image < 0, 0, image)
image = image.astype(np.uint8)
data_final.append(image.astype(np.uint8))
if is_PIL:
return [PIL.Image.fromarray(img) for img in data_final]
else:
return data_final
class Pepper(object):
"""
Augmenter that sets a certain fraction of pixel intensities to 0, hence
they become black.
Args:
ratio (int): Determines number of black pixels on each frame of video.
Smaller the ratio, higher the number of black pixels.
"""
def __init__(self, ratio=100):
self.ratio = ratio
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
data_final = []
for i in range(len(clip)):
img = clip[i].astype(np.float)
img_shape = img.shape
noise = np.random.randint(self.ratio, size=img_shape)
img = np.where(noise == 0, 0, img)
data_final.append(img.astype(np.uint8))
if is_PIL:
return [PIL.Image.fromarray(img) for img in data_final]
else:
return data_final
class Salt(object):
"""
Augmenter that sets a certain fraction of pixel intesities to 255, hence
they become white.
Args:
ratio (int): Determines number of white pixels on each frame of video.
Smaller the ratio, higher the number of white pixels.
"""
def __init__(self, ratio=100):
self.ratio = ratio
def __call__(self, clip):
is_PIL = isinstance(clip[0], PIL.Image.Image)
if is_PIL:
clip = [np.asarray(img) for img in clip]
data_final = []
for i in range(len(clip)):
img = clip[i].astype(np.float)
img_shape = img.shape
noise = np.random.randint(self.ratio, size=img_shape)
img = np.where(noise == 0, 255, img)
data_final.append(img.astype(np.uint8))
if is_PIL:
return [PIL.Image.fromarray(img) for img in data_final]
else:
return data_final
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/augment_textacy.py | augmentation/text_augmentation/augment_textacy.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____ _
/ _ \ | ___ \_ _| _ |_ _| | |
/ /_\ \| |_/ / | | (_) | | _____ _| |_
| _ || __/ | | | |/ _ \ \/ / __|
| | | || | _| |_ _ | | __/> <| |_
\_| |_/\_| \___/ (_) \_/\___/_/\_\\__|
'''
import os, sys, shutil
try:
import textacy
except:
os.system('pip3 install textacy==0.8.0')
import textacy
import textacy.augmentation.transforms as transforms
def augment_textacy(textfile, basedir):
filename1=textfile
text = open(textfile).read()
# "The quick brown fox jumps over the lazy dog."
doc = textacy.make_spacy_doc(text, lang="en")
tfs = [transforms.substitute_word_synonyms, transforms.delete_words, transforms.swap_chars, transforms.delete_chars]
augmenter=textacy.augmentation.augmenter.Augmenter(tfs, num=[0.5, 0.5, 0.5, 0.5])
augmented_text=augmenter.apply_transforms(doc)
filename2='augmented_'+textfile
textfile=open(filename2,'w')
textfile.write(str(augmented_text))
textfile.close()
return [filename1, filename2]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/augment.py | augmentation/text_augmentation/augment.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ _ \ | | | | (_)
/ /_\ \_ _ __ _ _ __ ___ ___ _ __ | |_ __ _| |_ _ ___ _ __
| _ | | | |/ _` | '_ ` _ \ / _ \ '_ \| __/ _` | __| |/ _ \| '_ \
| | | | |_| | (_| | | | | | | __/ | | | || (_| | |_| | (_) | | | |
\_| |_/\__,_|\__, |_| |_| |_|\___|_| |_|\__\__,_|\__|_|\___/|_| |_|
__/ |
|___/
___ ______ _____ _____ _
/ _ \ | ___ \_ _| _ |_ _| | |
/ /_\ \| |_/ / | | (_) | | _____ _| |_
| _ || __/ | | | |/ _ \ \/ / __|
| | | || | _| |_ _ | | __/> <| |_
\_| |_/\_| \___/ (_) \_/\___/_/\_\\__|
This section of Allie's API augments folders of .TXT files using
the default_text_augmenters.
Usage: python3 augment.py [folder] [augment_type]
All augment_type options include:
["augment_textacy", "augment_summary"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/augmentation/text_augmentation
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def text_augment(augmentation_set, textfile, basedir):
# only load the relevant featuresets for featurization to save memory
if augmentation_set=='augment_eda':
augment_eda.augment_eda(textfile, basedir)
elif augmentation_set=='augment_textacy':
augment_textacy.augment_textacy(textfile,basedir)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
text_transcribe=settings['transcribe_text']
default_image_transcribers=settings['default_text_transcriber']
try:
# assume 1 type of feature_set
augmentation_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
augmentation_sets=settings['default_text_augmenters']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'augment_eda' in augmentation_sets:
import augment_eda
if 'augment_textacy' in augmentation_sets:
import augment_textacy
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## NOW AUGMENT!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.txt']:
filename=[listdir[i]]
for j in range(len(augmentation_sets)):
augmentation_set=augmentation_sets[j]
for k in range(len(filename)):
filename=text_augment(augmentation_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/augment_eda.py | augmentation/text_augmentation/helpers/augment_eda.py | import os, sys, shutil
def augment_eda(textfile, basedir):
arg_num= 1
text='1\t'+open(os.getcwd()+'/'+textfile).read()
textfile2=open(textfile,'w')
textfile2.write(text)
textfile2.close()
shutil.copy(os.getcwd()+'/'+textfile,basedir+'/helpers/eda_nlp/data/'+textfile)
newfile='augmented_'+textfile
os.system('python3 %s/helpers/eda_nlp/code/augment.py --input=%s --output=%s --num_aug=%s --alpha=0.05'%(basedir, textfile, newfile, str(arg_num)))
shutil.copy(basedir+'/helpers/eda_nlp/data/'+newfile, os.getcwd()+'/'+newfile)
os.remove(basedir+'/helpers/eda_nlp/data/'+textfile)
os.remove(basedir+'/helpers/eda_nlp/data/'+newfile)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/augmentation/text_augmentation/helpers/eda_nlp/experiments/b_1_data_process.py | augmentation/text_augmentation/helpers/eda_nlp/experiments/b_1_data_process.py | from methods import *
from b_config import *
if __name__ == "__main__":
#generate the augmented data sets
for dataset_folder in dataset_folders:
#pre-existing file locations
train_orig = dataset_folder + '/train_orig.txt'
#file to be created
train_aug_st = dataset_folder + '/train_aug_st.txt'
#standard augmentation
gen_standard_aug(train_orig, train_aug_st)
#generate the vocab dictionary
word2vec_pickle = dataset_folder + '/word2vec.p' # don't want to load the huge pickle every time, so just save the words that are actually used into a smaller dictionary
gen_vocab_dicts(dataset_folder, word2vec_pickle, huge_word2vec)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.