repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/document_processor.py | features/text_features/helpers/blabla/blabla/document_processor.py | import stanza
import yaml
import os
import json
import blabla.utils.global_params as global_params
from os import environ
from stanza.server import CoreNLPClient
from blabla.sentence_processor.sentence_processing_engine import Sentence
from blabla.document_engine import Document
from blabla.utils.exceptions import *
import blabla.utils.global_params as global_params
import blabla.utils.settings as settings
class DocumentProcessor(object):
"""This class represents the Document Processor class that processes the whole input document
"""
def __init__(self, config_path, lang):
self.config = yaml.load(open(config_path, "r"))
self.client = None
self.lang = lang
def __enter__(self):
if environ.get("CORENLP_HOME") is None:
raise EnvPathException(
"The CORENLP_HOME path was not found. Please export it pointing to the directory that contains the CoreNLP resources"
)
my_path = os.path.abspath(os.path.dirname(__file__))
settings.init()
settings.LANGUAGE = self.lang
stanza.download(self.lang, dir=self.config["stanza"]["dir"])
self.nlp = stanza.Pipeline(**self.config["stanza"], lang=self.lang)
language_properties_fp = os.path.join(
my_path, "language_resources", self.lang + "_properties.txt"
)
self.client = CoreNLPClient(
properties=language_properties_fp, **self.config["corenlp"]
)
return self
def break_json_into_chunks(self, doc_json):
"""Convert an input json to a list of sentences
Args:
doc_json (dict): The input json representing the input document
Returns:
list : The list of sentences with raw text
list: The list of sentences as jsons
"""
raw_sentences = []
sentence_jsons = []
try:
for sent_json in doc_json:
sentence_jsons.append(sent_json)
sent_text = " ".join([word["word"] for word in sent_json["words"]])
raw_sentences.append(sent_text)
except Exception as e:
raise InavlidJSONFileException(
"The input JSON file you provided could not be analysed. Please check the example format provided"
)
return raw_sentences, sentence_jsons
def break_text_into_sentences(self, text, force_split):
"""Break the input raw text string into sentences using Stanza
Args:
doc_json (dict): The input json representing the input document
force_split (bool): If True, split sentences on newline, else use Stanza tokenization
Returns:
list : The list of sentences with raw text
list : The list of sentences as jsons
"""
sentences = []
if force_split:
sentences = [s for s in text.split('\n') if s]
else:
stanza_doc = self.nlp(text)
for sentence in stanza_doc.sentences:
sentences.append(sentence.text)
return sentences
def analyze(self, doc, input_format, force_split=False):
"""Method to analyze the input as either a json or a string and return back a Document object
Args:
doc (json / string): The input that needs to be analyzed using Stanza
Returns:
Document: The Document object
"""
if input_format.lower() not in ["string", "json"]:
raise InavlidFormatException(
"Please provide the format as either 'string' or 'json'"
)
settings.INPUT_FORMAT = input_format.lower()
doc_obj = Document(self.lang, self.nlp, self.client)
if settings.INPUT_FORMAT == "json": # the input format here is json
doc = json.loads(doc)
raw_sentences, sentence_jsons = self.break_json_into_chunks(doc)
for raw_sent, sent_json in zip(raw_sentences, sentence_jsons):
sentence = Sentence(
self.lang, self.nlp, self.client, raw_sent, sent_json
)
sentence.json = sent_json
doc_obj.sentence_objs.append(sentence)
else: # the input format here is string
raw_sentences = self.break_text_into_sentences(doc, force_split)
for raw_sent in raw_sentences:
sentence = Sentence(self.lang, self.nlp, self.client, raw_sent)
doc_obj.sentence_objs.append(sentence)
return doc_obj
def __exit__(self, exc_type, exc_value, tb):
""" Method to stop the CoreNLP client"""
if self.client is not None:
self.client.stop()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/__init__.py | features/text_features/helpers/blabla/blabla/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/utils/exceptions.py | features/text_features/helpers/blabla/blabla/utils/exceptions.py | class EnvPathException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class EmptyStringException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidFormatException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidYamlFileException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InavlidJSONFileException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class InvalidFeatureException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class FileError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class FeatureExtractionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class POSTagExtractionFailedException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DependencyParsingTreeException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class ConstituencyTreeParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class YngveTreeConstructionException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class LexicoSemanticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class MorphoSyntacticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class SyntacticParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class DiscourseParsingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/utils/settings.py | features/text_features/helpers/blabla/blabla/utils/settings.py | def init():
global INPUT_FORMAT
global LANGUAGE
global CONST_PT_FEATURES
INPUT_FORMAT = None
LANGUAGE = None
CONST_PT_FEATURES = [
"parse_tree_height",
"num_noun_phrases",
"noun_phrase_rate",
"num_verb_phrases",
"verb_phrase_rate",
"num_infinitive_phrases",
"infinitive_phrase_rate",
"num_prepositional_phrases",
"prepositional_phrase_rate",
"max_yngve_depth",
"mean_yngve_depth",
"total_yngve_depth",
"num_clauses",
"clause_rate",
"num_dependent_clauses",
"dependent_clause_rate"
]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/utils/global_params.py | features/text_features/helpers/blabla/blabla/utils/global_params.py | STRING = "string"
JSON = "json"
ADJECTIVE = "ADJ"
ADPOSITION = "ADP"
ADVERB = "ADV"
AUXILIARY = "AUX"
CONJUNCTION = "CCONJ"
DETERMINER = "DET"
INTERJECTION = "INTJ"
NOUN = "NOUN"
NUMERAL = "NUM"
PARTICLE = "PART"
PRONOUN = "PRON"
PROPER_NOUN = "PROPN"
PUNCTUATION = "PUNCT"
SUBORDINATING_CONJUNCTION = "SCONJ"
SYMBOL = "SYM"
VERB = "VERB"
OTHER = "X"
PREPOSITIONAL_PHRASE = "PP"
VERB_PHRASE = "VP"
NOUN_PHRASE = "NP"
S_CLAUSE = "S"
SBAR_CLAUSE = "SBAR"
SBARQ_CLAUSE = "SBARQ"
SINV_CLAUSE = "SINV"
SQ_CLAUSE = "SQ"
VERB_PHRASE_LANGUAGE_MAP = {"fr": "VN", "default": VERB_PHRASE}
SUBORD_CLAUSE_LANGUAGE_MAP = {
"fr": "Ssub",
"zh-hant": ["CP", "CP-Q"],
"default": [SBAR_CLAUSE],
}
CLAUSES_LANGUAGE_MAP = {
"fr": ["VPinf", "VPpart", "Srel", "Ssub", "Sint"],
"zh-hant": ["IP", "CP", "IP-Q", "CP-Q", "IP-IMP"],
"default": [S_CLAUSE, SBAR_CLAUSE, SBARQ_CLAUSE, SINV_CLAUSE, SQ_CLAUSE],
}
NOT_AVAILABLE = "NA"
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/utils/__init__.py | features/text_features/helpers/blabla/blabla/utils/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/lexico_semantic_feature_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/lexico_semantic_feature_engine.py | from collections import Counter
import math
from blabla.utils.global_params import *
from blabla.utils import *
def num_demonstratives(stanza_doc):
"""The number of demonstravives
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
(int): the number of demonstravives
"""
return len([1 for word in stanza_doc.sentences[0].words if ((word.feats is not None) and ('PronType=Dem' in word.feats))])
def num_unique_words(stanza_doc):
"""Returns the number of unique words
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
number of unique words
"""
return len(set([word.text for word in stanza_doc.sentences[0].words]))
def num_word_types(stanza_doc):
"""Returns the number of word types
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
number of word types
"""
return len(set([word.lemma for word in stanza_doc.sentences[0].words]))
def compute_mean_word_length(stanza_doc):
"""Returns the mean word length
Args:
stanza_doc (nltk.Tree): The dependency parse tree
Returns:
mean length of all words in the sentence
"""
return np.mean([len(word.text) for word in stanza_doc.sentences[0].words])
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/morpho_syntactic_feature_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/morpho_syntactic_feature_engine.py | from blabla.utils.global_params import *
def num_inflected_verbs(stanza_doc):
"""Returns the number of inflected verbs
Args:
None
Returns
num_inflected_verbs (int): The number of inflected verbs
"""
num_inflected_verbs = 0
words = stanza_doc.sentences[0].words
for word_idx in range(len(words) - 1):
if (words[word_idx].pos == VERB) and (
words[word_idx].text != words[word_idx].lemma
):
num_inflected_verbs += 1
return num_inflected_verbs
def num_gerund_verbs(stanza_doc):
"""The number of gerund verbs
Args:
None
Returns:
num_gerunds (int): the number of gerund verbs
"""
num_gerunds = 0
for word in stanza_doc.sentences[0].words:
if word.feats is not None:
if (word.pos == VERB) and ('VerbForm=Ger' in word.feats):
num_gerunds += 1
return num_gerunds
def num_participle_verbs(stanza_doc):
"""The number of participle verbs
Args:
None
Returns:
num_participle_verbs (int): the number of participle verbs
"""
num_participle_verbs = 0
for word in stanza_doc.sentences[0].words:
if word.feats is not None:
if (word.pos == VERB) and (
'VerbForm=Part' in word.feats
): # what if there are multiple words next to each other with Part?
num_participle_verbs += 1
return num_participle_verbs
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/pos_tag_counting_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/pos_tag_counting_engine.py | import stanza
class PosTagCounter(object):
"""The class that counts the number of pos tags of various types in a sentence
"""
def __init__(self, stanza_doc):
"""The initialization method that take a dependency parse tree as input
Args:
stanza_doc (nltk.Tree): the dependency parse tree
Returns:
None
"""
self.stanza_doc = stanza_doc
def get_pos_tag_count(self, pos_tag):
"""Returns the number of nouns
Args:
None
Returns:
number of nouns in the sentence
"""
return len(
[
word
for word in self.stanza_doc.sentences[0].words
if (word.pos == pos_tag)
]
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/sentence_processing_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/sentence_processing_engine.py | from nltk.tree import Tree
from blabla.sentence_processor.yngve_tree import YngveNode
from blabla.sentence_processor.pos_tag_counting_engine import PosTagCounter
from blabla.utils.exceptions import *
import blabla.utils.settings as settings
from blabla.utils.global_params import *
import os
import stanza
import nltk
import numpy as np
import math
import json
## Ref: http://www.surdeanu.info/mihai/teaching/ista555-fall13/readings/PennTreebankConstituents.html
class Sentence(object):
"""The class that is responsible for processing a sentence and extracting the fundamental blocks such as
the dependency parse tree, constituency parse tree and other low level features for aggregation later.
"""
def __init__(self, language, nlp, client, raw_text, sent_json=None):
"""The initialization method for the Sentence class
Args:
language (str): The language of the raw_text
nlp (obj): The Stanza NLP object.
client (obj): The CoreNLP client object.
raw_text (str): The raw text
sent_json (JSON): The json string of a sentence if the input is a JSON
Returns:
None:
"""
self.lang = language
self._pos_tag_counter = None
self._sent = raw_text
self._stanza_doc = None
self._const_pt = None
self._yngve_tree_root = None
self.nlp = nlp
self.client = client
self.json = sent_json
@property
def pos_tag_counter(self):
"""A property method to return the part of speech counter object"""
return self._pos_tag_counter
@property
def sent(self):
"""A property method to return the raw text of the current sentence object"""
return self._sent
@property
def stanza_doc(self):
"""A property method to return the Stanza object"""
return self._stanza_doc
@property
def yngve_tree_root(self):
"""A property method to return the root node of the Yngve tree"""
return self._yngve_tree_root
@property
def const_pt(self):
"""A property method to return the constituency parse tree"""
return self._const_pt
@property
def tot_num_characters(self):
"""A property method to return the total number of characters in the current sentence"""
return math.sum([len(word) for word in self._stanza_doc.sentences[0].words])
@property
def speech_time(self):
"""A property method to return the speech time"""
return math.fsum([word['duration'] for word in self.json['words']])
@property
def start_time(self):
"""A property method to return the start time of the current sentence"""
return self.json['words'][0]['start_time']
@property
def end_time(self):
"""A property method to return the end time of the current sentence"""
return self.json['words'][-1]['start_time'] + self.json['words'][-1]['duration']
@property
def locution_time(self):
"""A property method to return the locution time of the current sentence"""
start_time = self.start_time
end_time = self.end_time
return end_time - start_time
@property
def words_per_min(self):
"""A property method to return the words per min for the current sentence"""
return (self.num_words() / self.locution_time()) * 60.0
def _get_gaps(self):
gaps = []
for prev_word, next_word in zip(self.json['words'][:-1], self.json['words'][1:]):
prev_end_time = prev_word['start_time'] + prev_word['duration']
gaps.append(next_word['start_time'] - prev_end_time)
return gaps
def num_pauses(self, pause_duration):
"""A method to calculate the number of pauses in the current sentence
Args:
pause_duration (float): The value for the duration of the pause
Returns:
int: The number of pauses
"""
gaps = self._get_gaps()
return sum(gap > pause_duration for gap in gaps)
def tot_pause_time(self, pause_duration):
"""A method to calculate the total pause time in the current sentence
Args:
pause_duration (float): The value for the duration of the pause
Returns:
float: The total pause time of the sentence
"""
return sum([gap for gap in self._get_gaps() if gap > pause_duration])
def analyze_text(self):
"""A method to get the Stanza document
Args:
None:
Returns:
doc (Stanza): The Stanza document object
"""
doc = self.nlp(self._sent)
return doc
def const_parse_tree(self):
"""A method to get the constituency parse tree
Args:
None:
Returns:
parseTree (CoreNLP): The CoreNLP constituency parse tree object
"""
document = self.client.annotate(self._sent)
document = json.loads(document.text)
pt = document["sentences"][0]["parse"]
parseTree = Tree.fromstring(pt)
return parseTree
def num_words(self):
"""A method to get the number of words
Args:
None:
Returns:
int: The number of words in the current sentence
"""
return len(self._stanza_doc.sentences[0].words)
def _navigate_and_score_leaves(self, yngve_node, score_so_far):
"""A method to assign Yngve scores to the leaf nodes
Args:
None:
Returns:
Yngve_Tree: The Yngve tree with the scores for the leaf nodes
"""
if len(yngve_node.children) == 0:
yngve_node.score = score_so_far
else: # it has child nodes
for child in yngve_node.children:
self._navigate_and_score_leaves(child, score_so_far + child.score)
def _traverse_and_build_yngve_tree(self, start_node, parent_node):
"""A method to construct the Yngve tree
Args:
None:
Returns:
Yngve_Tree: The Yngve tree constructed from the constituency parse tree
"""
score = 0
for child in start_node[::-1]:
if isinstance(child, str):
curr_node = YngveNode(child, 0, parent_node)
elif isinstance(child, nltk.tree.Tree):
curr_node = YngveNode(child.label(), score, parent_node)
score += 1
self._traverse_and_build_yngve_tree(child, curr_node)
def yngve_tree(self):
"""The main method to construct the Yngve tree and assign values to all leaf nodes
Args:
None
Returns:
yngve_tree_root_node (YngveNode): The root node of the yngve tree
"""
sent_child = self._const_pt[0]
yngve_tree_root_node = YngveNode("S", 0)
self._traverse_and_build_yngve_tree(sent_child, yngve_tree_root_node)
self._navigate_and_score_leaves(
yngve_tree_root_node, yngve_tree_root_node.score
)
return yngve_tree_root_node
def setup_dep_pt(self):
"""A method to construct the dependency parse tree
Args:
None
Returns:
None:
"""
if len(self._sent) == 0:
raise EmptyStringException('The input string is empty')
try:
self._stanza_doc = self.analyze_text()
except Exception as e:
raise DependencyParsingTreeException('Dependency parse tree set up failed')
try:
self._pos_tag_counter = PosTagCounter(self._stanza_doc)
except Exception as e:
raise POSTagExtractionFailedException('POS Tag counter failed')
def setup_const_pt(self):
"""A method to construct the constituency parse tree
Args:
None
Returns:
None:
"""
try:
self._const_pt = self.const_parse_tree()
except Exception as e:
raise ConstituencyTreeParsingException(
'Constituency parse tree set up failed. Please check if the input format (json/string) is mentioned correctly'
)
def setup_yngve_tree(self):
"""A method to construct the Yngve tree
Args:
None
Returns:
None:
"""
try:
self._yngve_tree_root = self.yngve_tree()
except Exception as e:
raise YngveTreeConstructionException('Yngve tree set up failed')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/syntactic_feature_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/syntactic_feature_engine.py | from blabla.utils.global_params import *
import blabla.utils.settings as settings
import numpy as np
def const_pt_height(const_pt):
"""The height of the constituency parse tree
Args:
const_pt (NLTK): The constituency parse tree
Returns:
height (int): the height of the constituency parse tree
"""
return const_pt.height()
def _leaves(const_pt, tag):
"""Returns the leaves of the constituency parse tree with a label
Args:
const_pt (NLTK): The constituency parse tree
tag (str): The tag to be searched for in the parse tree
Returns:
leaves of the constituence parse trees (int): yields the leaves of the constituence parse tree matching a label
"""
for subtree in const_pt.subtrees(filter=lambda t: t.label() == tag):
yield subtree
def num_prepositional_phrases(const_pt):
"""Returns the number of prepositional phrases
Args:
const_pt (NLTK): The constituency parse tree
Returns
number of prep phrases (int): number of prepositional phrases
"""
pp_chunks = []
for leaf in _leaves(const_pt, PREPOSITIONAL_PHRASE):
pp_chunks.append(leaf.leaves())
return len(pp_chunks)
def num_verb_phrases(const_pt):
"""Returns the number of verb phrases
Ref: For Chinese, please see this manual - https://repository.upenn.edu/cgi/viewcontent.cgi?article=1040&context=ircs_reports
Ref: For English, please see this manual - https://www.cis.upenn.edu/~bies/manuals/root.pdf
Args:
const_pt (NLTK): The constituency parse tree
Returns
number of verb phrases (int): number of verb phrases
"""
vp_chunks = []
vp_tag = None
if settings.LANGUAGE in ["fr"]:
lang = settings.LANGUAGE
else:
lang = "default"
vp_tag = VERB_PHRASE_LANGUAGE_MAP[lang]
for leaf in _leaves(const_pt, vp_tag):
vp_chunks.append(leaf.leaves())
return len(vp_chunks)
def num_noun_phrases(const_pt):
"""Returns the number of noun phrases
Args:
const_pt (NLTK): The constituency parse tree
Returns
number of noun phrases (int): number of noun phrases
"""
np_chunks = []
for leaf in _leaves(const_pt, NOUN_PHRASE):
np_chunks.append(leaf.leaves())
return len(np_chunks)
def num_clauses(const_pt):
# Ref: The PennTreeBank clauses list for English - http://www.surdeanu.info/mihai/teaching/ista555-fall13/readings/PennTreebankConstituents.html
# Ref: https://www.softschools.com/examples/grammar/phrases_and_clauses_examples/416/
# Ref: Simple clauses in Chinese - http://prog3.com/sbdm/blog/cuixianpeng/article/details/16864785
"""Returns the total number of clauses
Args:
const_pt (NLTK): The constituency parse tree
Returns
tot_num_clauses (int): the total number of clauses
"""
clauses = []
clause_tags = None
if settings.LANGUAGE in ["fr", "zh-hant"]:
lang = settings.LANGUAGE
else:
lang = "default"
clause_tags = CLAUSES_LANGUAGE_MAP[lang]
for clause_tag in clause_tags:
for leaf in _leaves(const_pt, clause_tag):
clauses.append(leaf.leaves())
tot_num_clauses = len(clauses)
return tot_num_clauses
def num_infinitive_phrases(stanza_doc):
# Ref: https://www.grammar-monster.com/glossary/infinitive_phrase.html
# NOTE: This feature is available only for English for now
"""Returns the number of infinitive phrases
Args:
stanza_doc (Stanza): The Stanza document object
Returns
num_inf_phrases (int): the number of infinitive phrases
"""
num_inf_phrases = 0
for word in stanza_doc.sentences[0].words:
if word.feats is not None:
if "VerbForm=Inf" in word.feats:
num_inf_phrases += 1
return num_inf_phrases
def num_dependent_clauses(const_pt):
# Ref: The PennTreeBank clauses list for English - http://www.surdeanu.info/mihai/teaching/ista555-fall13/readings/PennTreebankConstituents.html
# Ref: https://www.researchgate.net/figure/The-sample-constituency-parse-tree-S-simple-declarative-clause-NP-noun-phrase-VP_fig2_271223596
# Ref: https://stackoverflow.com/questions/39320015/how-to-split-an-nlp-parse-tree-to-clauses-independent-and-subordinate
# Ref: Depedent Clauses explanations: https://examples.yourdictionary.com/examples-of-dependent-clauses.html
"""Returns the number of dependent clauses
Args:
const_pt (NLTK): The constituency parse tree
Returns
number of dependent clauses (int): the number of dependent clauses
"""
dep_clauses = []
clause_tags = None
if settings.LANGUAGE in ["zh-hant", "fr"]:
lang = settings.LANGUAGE
else:
lang = "default"
clause_tags = SUBORD_CLAUSE_LANGUAGE_MAP[lang]
for clause_tag in clause_tags:
for leaf in _leaves(const_pt, clause_tag):
dep_clauses.append(leaf.leaves())
return len(dep_clauses)
def get_pos(word_id, words):
for word in words:
if int(word.id) == word_id:
return word.pos
def num_nouns_with_det(stanza_doc):
"""Returns the number of nouns with determiners. This is done by counting the number of noun phrases that start with a determiner
Args:
stanza_doc (Stanza): The Stanza document object
Returns
num_nouns_with_det (int): The number of nouns with determiners
"""
num_nouns_with_det = 0
words = stanza_doc.sentences[0].words
for word in words:
if word.pos == DETERMINER:
head_word_id = word.head
if get_pos(head_word_id, words) == NOUN:
num_nouns_with_det += 1
return num_nouns_with_det
def num_nouns_with_adj(stanza_doc):
"""Returns the number of nouns with adjectives. This is done by counting the number of noun phrases that start with a adjective
Args:
stanza_doc (Stanza): The Stanza document object
Returns
num_nouns_with_adjectives (int): The number of nouns with adjectives
"""
num_nouns_with_adjectives = 0
words = stanza_doc.sentences[0].words
for word in words:
if word.pos == ADJECTIVE:
head_word_id = word.head
if get_pos(head_word_id, words) == NOUN:
num_nouns_with_adjectives += 1
return num_nouns_with_adjectives
def prop_nouns_with_det(stanza_doc, pos_tag_counter):
"""Returns the proportion of nouns with determiners. This is done by counting the number of noun phrases that start with a determiner (TODO)
Args:
stanza_doc (Stanza): The Stanza document object
pos_tag_counter (obj): The POS tag counter object
Returns
prop of nouns with det (int): The proportion of nouns with determiners
"""
num_nouns = pos_tag_counter.get_pos_tag_count(NOUN)
if num_nouns == 0:
return NOT_AVAILABLE
num_nouns_with_det = num_nouns_with_det(stanza_doc)
return num_nouns_with_det / num_nouns
def max_yngve_depth(yngve_tree_root):
"""Returns the max depth of the ynvge tree of the sentence
Args:
yngve_tree_root (obj): The root node
Returns:
int: The max depth of the yngve tree
"""
return max([leaf.score for leaf in yngve_tree_root.leaves])
def mean_yngve_depth(yngve_tree_root):
"""Returns the mean depth of the ynvge tree of the sentence
Args:
yngve_tree_root (obj): The root node
Returns:
float: The mean depth of the yngve tree
"""
return np.mean([leaf.score for leaf in yngve_tree_root.leaves])
def total_yngve_depth(yngve_tree_root):
"""Returns the total depth of the ynvge tree of the sentence
Args:
yngve_tree_root (obj): The root node
Returns:
int: The total depth of the yngve tree
"""
tot_score = 0
for leaf in yngve_tree_root.leaves:
tot_score += leaf.score
return tot_score
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/discourse_and_pragmatic_feature_engine.py | features/text_features/helpers/blabla/blabla/sentence_processor/discourse_and_pragmatic_feature_engine.py | def num_discourse_markers(stanza_doc):
# Ref: https://universaldependencies.org/docsv1/u/dep/all.html#al-u-dep/discourse
"""Returns the number of discourse markers
Args:
stanza_doc (obj): The stanza document object
Returns
(int): the number of discourse markers
"""
return len([1 for word in stanza_doc.sentences[0].words if word.deprel == 'discourse'])
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/yngve_tree.py | features/text_features/helpers/blabla/blabla/sentence_processor/yngve_tree.py | from anytree import NodeMixin, RenderTree
class YngveNode(NodeMixin):
"""The class to represent a node in the Yngve tree of a sentence"""
def __init__(self, name, score, parent=None, children=None):
"""The initialization method to initialize a Yngve node with a name, score, parent node and child node
Args:
name (str): the name of the node
score (str): the yngve score of a node
parent (Yngve): the parent node of the current node
children (Yngve): the child Yngve node of the current node
Returns:
None
"""
super(YngveNode, self).__init__()
self.name = name
self.score = score
self.parent = parent
if children: # set children only if given
self.children = children
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_processor/__init__.py | features/text_features/helpers/blabla/blabla/sentence_processor/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/language_resources/__init__.py | features/text_features/helpers/blabla/blabla/language_resources/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/syntactic_feature_aggregator.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/syntactic_feature_aggregator.py | from blabla.sentence_processor.syntactic_feature_engine import *
from blabla.utils.global_params import *
class Num_Noun_Phrases(object):
"""Class to calculate the number of noun phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of noun phrases over all sentences
Args:
None
Returns:
The total number of noun phrases over all sentences
"""
tot_num_noun_phrases = 0
for so in self.sentence_objs:
tot_num_noun_phrases += num_noun_phrases(so.const_pt)
return tot_num_noun_phrases
class Noun_Phrase_Rate(object):
"""Class to calculate the average number of noun phrases over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of noun phrases over all sentences
Args:
None
Returns:
The average number of noun phrases over all sentences
"""
tot_num_noun_phrases = 0
for so in self.sentence_objs:
tot_num_noun_phrases += num_noun_phrases(so.const_pt)
return tot_num_noun_phrases / len(self.sentence_objs)
class Num_Verb_Phrases(object):
"""Class to calculate the number of verb phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of verb phrases over all sentences
Args:
None
Returns:
The total number of verb phrases over all sentences
"""
tot_num_verb_phrases = 0
for so in self.sentence_objs:
tot_num_verb_phrases += num_verb_phrases(so.const_pt)
return tot_num_verb_phrases
class Verb_Phrase_Rate(object):
"""Class to calculate the average number of verb phrases over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of verb phrases over all sentences
Args:
None
Returns:
The average number of verb phrases over all sentences
"""
tot_num_verb_phrases = 0
for so in self.sentence_objs:
tot_num_verb_phrases += num_verb_phrases(so.const_pt)
return tot_num_verb_phrases / len(self.sentence_objs)
class Num_Clauses(object):
"""Class to calculate the total number of clauses over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of clauses over all sentences
Args:
None
Returns:
The total number of clauses over all sentences
"""
tot_num_clauses = 0
for so in self.sentence_objs:
tot_num_clauses += num_clauses(so.const_pt)
return tot_num_clauses
class Clause_Rate(object):
"""Class to calculate the number of clauses per sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of clauses over all sentences
Args:
None
Returns:
The average number of clauses over all sentences
"""
tot_num_clauses = 0
for so in self.sentence_objs:
tot_num_clauses += num_clauses(so.const_pt)
return tot_num_clauses / len(self.sentence_objs)
class Num_Infinitive_Phrases(object):
"""Class to calculate the total number of infinitive phrases
Note: This feature is available only for English
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of infinitive phrases
Args:
None
Returns:
The total number of infinitive phrases over all sentences
"""
tot_num_inf_phrases = 0
for so in self.sentence_objs:
tot_num_inf_phrases += num_infinitive_phrases(so.stanza_doc)
return tot_num_inf_phrases
class Infinitive_Phrase_Rate(object):
"""Class to calculate the number of infinitive phrases per sentence
Note: This feature is available only for English
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of infinitive phrases per sentence
Args:
None
Returns:
The number of infinitive phrases per sentences
"""
tot_num_inf_phrases = 0
for so in self.sentence_objs:
tot_num_inf_phrases += num_infinitive_phrases(so.stanza_doc)
return tot_num_inf_phrases / len(self.sentence_objs)
class Num_Dependent_Clauses(object):
"""Class to calculate the total number of dependent clauses
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of dependent clauses
Args:
None
Returns:
The total number of dependent clauses
"""
tot_num_dep_clauses = 0
for so in self.sentence_objs:
tot_num_dep_clauses += num_dependent_clauses(so.const_pt)
return tot_num_dep_clauses
class Dependent_Clause_Rate(object):
"""Class to calculate the number of dependent clauses per sentence
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of dependent clauses per sentence
Args:
None
Returns:
The number of dependent clauses per sentences
"""
tot_num_dep_clauses = 0
for so in self.sentence_objs:
tot_num_dep_clauses += num_dependent_clauses(so.const_pt)
return tot_num_dep_clauses / len(self.sentence_objs)
class Num_Prepositional_Phrases(object):
"""Class to calculate the total number of prepositional phrases
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of prepositional phrases
Args:
None
Returns:
The total number of prepositional phrases
"""
tot_num_prep_phrases = 0
for so in self.sentence_objs:
tot_num_prep_phrases += num_prepositional_phrases(so.const_pt)
return tot_num_prep_phrases
class Prepositional_Phrase_Rate(object):
"""Class to calculate the number of prepositional phrases per sentence
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of prepositional phrases per sentence
Args:
None
Returns:
The number of prepositional phrases per sentence
"""
tot_num_prep_phrases = 0
for so in self.sentence_objs:
tot_num_prep_phrases += num_prepositional_phrases(so.const_pt)
return tot_num_prep_phrases / len(self.sentence_objs)
class Prop_Nouns_With_Det(object):
"""Class to calculate the proportion of nouns with determiners
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of nouns that have a determiner as their dependency
Args:
None
Returns:
The number of nouns with determiners as their dependency
"""
num_nouns_with_determiners, num_nouns = 0, 0
for so in self.sentence_objs:
num_nouns_with_determiners += num_nouns_with_det(so.stanza_doc)
num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if num_nouns != 0:
return num_nouns_with_determiners / num_nouns
return NOT_AVAILABLE
class Prop_Nouns_With_Adj(object):
"""Class to calculate the proportion of nouns with adjectives
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of nouns that have a adjective as their dependency
Args:
None
Returns:
The number of nouns with adjective as their dependency
"""
num_nouns_with_adjectives, num_nouns = 0, 0
for so in self.sentence_objs:
num_nouns_with_adjectives += num_nouns_with_adj(so.stanza_doc)
num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if num_nouns != 0:
return num_nouns_with_adjectives / num_nouns
return NOT_AVAILABLE
class Max_Yngve_Depth(object):
"""Class to calculate the maximum Yngve depth averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the maximum Yngve depth averaged over all sentences
Args:
None
Returns:
The maximum Yngve depth averaged over all sentences
"""
total_max_yngve_depth = 0
for so in self.sentence_objs:
total_max_yngve_depth += max_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_max_yngve_depth / num_sentences
class Mean_Yngve_Depth(object):
"""Class to calculate the mean Yngve depth of each sentence, averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the mean Yngve depth of each sentence, averaged over all sentences
Args:
None
Returns:
The mean Yngve depth of each sentence, averaged over all sentences
"""
total_mean_yngve_depth = 0
for so in self.sentence_objs:
total_mean_yngve_depth += mean_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_mean_yngve_depth / num_sentences
class Total_Yngve_Depth(object):
"""Class to calculate the total Yngve depth of each sentence, averaged over all sentences
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total Yngve depth of each sentence, averaged over all sentences
Args:
None
Returns:
The total Yngve depth of each sentence, averaged over all sentences
"""
total_all_yngve_depth = 0
for so in self.sentence_objs:
total_all_yngve_depth += total_yngve_depth(so.yngve_tree_root)
num_sentences = len(self.sentence_objs)
return total_all_yngve_depth / num_sentences
class Parse_Tree_Height(object):
"""Class to calculate the constituency parse tree height
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average height of the constituency parse tree over all sentences
Args:
None
Returns:
The constituency parse tree height averaged over all sentences
"""
tot_const_pt_height = 0
for so in self.sentence_objs:
tot_const_pt_height += const_pt_height(so.const_pt)
return tot_const_pt_height / len(self.sentence_objs)
def syntactic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the syntactic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/phonetic_and_phonological_feature_aggregator.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/phonetic_and_phonological_feature_aggregator.py | import blabla.sentence_aggregators.phonetic_features_settings as pfs
from blabla.utils.global_params import *
from blabla.utils.exceptions import *
import blabla.utils.settings as settings
import numpy as np
class Word(object):
"""Class to represent a word"""
def __init__(self, word):
self.start_time = word['start_time']
self.end_time = word['start_time'] + word['duration']
self.duration = word['duration']
class Maximum_Speech_Rate(object):
"""Class to calculate the maximum speech rate
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
self.num_rapid_sentences = kwArgs.get(
'num_rapid_sentences', pfs.NUMBER_OF_RAPID_SENTENCES
)
def handle(self):
"""Method to calculate the maximum speech rate
Args:
None
Returns:
(float): The maximum speech rate
"""
words = []
window_size = 10
num_speech_rates = 3
# store all words as a sequence in a list as objects
for so in self.sentence_objs:
for word_tok in so.json['words']:
words.append(Word(word_tok))
# If we have less than 12 words in total, then we cannot calculate this feature
num_words = len(words)
if num_words < (window_size + num_speech_rates - 1):
return NOT_AVAILABLE
# calculate the speech rates
speech_rates = []
for idx in range(0, num_words - window_size):
start_time, end_time = words[idx].start_time, words[idx+window_size].end_time
speech_rates.append((window_size / (end_time - start_time)) * 60.0)
# take the mean of the highest 3 speech rates
return np.mean(sorted(speech_rates, reverse=True)[:num_speech_rates])
class Num_Pauses(object):
"""Class to calculate the total number of pauses
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects and the optional pause duration
"""
self.sentence_objs = sentence_objs
self.pause_duration = kwArgs.get('pause_duration', pfs.PAUSE_DURATION)
def handle(self):
"""Method to calculate the total number of pauses
Args:
None:
Returns:
total_num_pauses (float): total number of pauses across all sentences between words
"""
tot_num_pauses = 0
for so in self.sentence_objs:
tot_num_pauses += so.num_pauses(self.pause_duration)
return tot_num_pauses
class Total_Pause_Time(object):
"""Class to calculate the total pause time
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects and the optional pause duration
"""
self.sentence_objs = sentence_objs
self.pause_duration = kwArgs.get('pause_duration', pfs.PAUSE_DURATION)
def handle(self):
"""Method to calculate the total pause time
Args:
None:
Returns:
total_pause_time (float): total pause time between words across all sentences
"""
total_pause_time = 0.0
for so in self.sentence_objs:
total_pause_time += so.tot_pause_time(self.pause_duration)
return total_pause_time
class Mean_Pause_Duration(object):
"""Class to calculate the mean pause duration
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects and the optional pause duration
"""
self.sentence_objs = sentence_objs
self.pause_duration = kwArgs.get('pause_duration', pfs.PAUSE_DURATION)
def handle(self):
"""Method to calculate the mean pause duration
Args:
None
Returns:
(float): The mean pause duration across all sentences between words
"""
tot_pause_duration = 0.0
tot_num_pauses = 0
for so in self.sentence_objs:
tot_pause_duration += so.tot_pause_time(self.pause_duration)
tot_num_pauses += so.num_pauses(self.pause_duration)
return tot_pause_duration / tot_num_pauses
class Between_Utterance_Pause_Duration(object):
"""Class to calculate the between utterance pause duration
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects and the optional pause between utterance duration
"""
self.sentence_objs = sentence_objs
self.pause_between_utterance_duration = kwArgs.get(
'pause_between_utterance_duration', pfs.PAUSE_BETWEEN_UTTERANCE_DURATION
)
def handle(self):
"""Method to calculate the average between utterance pause duration
Args:
None
Returns:
(float): The average between utterance pause duration
"""
pause_durations = []
for prev_sent, next_sent in zip(self.sentence_objs[:-1], self.sentence_objs[1:]):
duration = next_sent.start_time - prev_sent.end_time
if (duration >= self.pause_between_utterance_duration):
pause_durations.append(duration)
if len(pause_durations) == 0:
# in case there is no pause between sentences at all
return NOT_AVAILABLE
return np.mean(pause_durations)
class Hesitation_Ratio(object):
"""Class to calculate the total hesitation ratio
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects and the optional pause duration value for hesitation
"""
self.sentence_objs = sentence_objs
self.pause_duration = kwArgs.get(
'pause_duration_for_hesitation', pfs.PAUSE_DURATION_FOR_HESITATION
)
def handle(self):
"""Method to calculate the hesitation ratio
Args:
None
Returns:
(float): The ratio of the total duration of hesitation to the total speech time
"""
tot_hesitation_duration, tot_speech_time = 0.0, 0.0
for so in self.sentence_objs:
tot_speech_time += so.speech_time
for so in self.sentence_objs:
tot_hesitation_duration += so.tot_pause_time(self.pause_duration)
return tot_hesitation_duration / tot_speech_time
class Speech_Rate(object):
"""Class to calculate the number of words spoken per minute
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculate the number of words per minute
Args:
None
Returns:
(float): The number of words per minute
"""
tot_num_words, tot_locution_time = 0, 0
for so in self.sentence_objs:
tot_num_words += so.num_words()
tot_locution_time += so.locution_time
return (tot_num_words / tot_locution_time) * 60.0
class Total_Phonation_Time(object):
"""Class to calculate the total phonation time
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculate the total phonation time
Args:
None
Returns:
tot_speech_time (float): The total phonation time
"""
tot_speech_time = 0
for so in self.sentence_objs:
tot_speech_time += so.speech_time
return tot_speech_time
class Standardized_Phonation_Time(object):
"""Class to calculate the standardized phonation rate
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculate the standardized phonation rate
Args:
None
Returns:
(float): The standardized phonation rate
"""
tot_num_words = 0
for so in self.sentence_objs:
tot_num_words += so.num_words()
return tot_num_words / Total_Phonation_Time(self.sentence_objs).handle()
class Total_Locution_Time(object):
"""Class to calculate the total locution time
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs, **kwArgs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculate the total locution time
Args:
None
Returns:
float: The amount of time in the sample containing both speech and pauses
"""
return self.sentence_objs[-1].end_time - self.sentence_objs[0].start_time
def phonetic_and_phonological_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the phonetic and phonological features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
kwArgs (list): A list of optional arguments for different features
Returns:
the feature value
"""
if settings.INPUT_FORMAT != "json":
raise InvalidFeatureException(
'You have requested the feature {} that required time stamps for the words. Please check the input format'.format(
feature
)
)
nr = globals()[feature.title()](sentence_objs, **kwArgs)
return nr.handle()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/phonetic_features_settings.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/phonetic_features_settings.py | # The default value for the duration of pause between words in a sentence. Ref: https://www.sciencedaily.com/releases/2015/09/150930110555.htm
PAUSE_DURATION = 0.35
# The default value for hesitation duration. From Boschi's paper: https://www.ncbi.nlm.nih.gov/pubmed/28321196
PAUSE_DURATION_FOR_HESITATION = 0.030
# The default value for the duration of pause between utterances. Ref: https://www.sciencedaily.com/releases/2015/09/150930110555.htm
PAUSE_BETWEEN_UTTERANCE_DURATION = 0.035
# The default value for the number of rapid sentences to be considered to calculate the maximum speech rate
NUMBER_OF_RAPID_SENTENCES = 10
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/morpho_syntactic_feature_aggregator.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/morpho_syntactic_feature_aggregator.py | from blabla.sentence_processor.morpho_syntactic_feature_engine import (
num_inflected_verbs,
num_gerund_verbs,
num_participle_verbs,
)
from blabla.utils.global_params import *
class Prop_Inflected_Verbs(object):
"""Class to calculcate the proportion of inflected verbs
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of inflected verbs
Args:
None
Returns:
The proportion of inflected verbs
"""
tot_num_inflected_verbs, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_inflected_verbs += num_inflected_verbs(so.stanza_doc)
if tot_num_verbs != 0:
return tot_num_inflected_verbs / tot_num_verbs
return NOT_AVAILABLE
class Prop_Auxiliary_Verbs(object):
"""Class to calculcate the proportion of auxiliary verbs
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculate the proportion of inflected verbs
Args:
None
Returns:
The proportion of auxiliary verbs
"""
tot_num_auxiliary_verbs, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_auxiliary_verbs += so.pos_tag_counter.get_pos_tag_count(AUXILIARY)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_auxiliary_verbs / tot_num_verbs
return NOT_AVAILABLE
class Prop_Gerund_Verbs(object):
"""Class to calculcate the proportion of gerund verbs
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of gerund verbs
Args:
None
Returns:
The proportion of gerund verbs
"""
tot_num_gerunds, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_gerunds += num_gerund_verbs(so.stanza_doc)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_gerunds / tot_num_verbs
return NOT_AVAILABLE
class Prop_Participles(object):
"""Class to calculcate the proportion of participle verbs
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of participle verbs
Args:
None
Returns:
The proportion of participle verbs
"""
tot_num_participle_verbs, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_participle_verbs += num_participle_verbs(so.stanza_doc)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_participle_verbs / tot_num_verbs
return NOT_AVAILABLE
def morpho_syntactic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the morpho syntactic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/discourse_and_pragmatic_feature_aggregator.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/discourse_and_pragmatic_feature_aggregator.py | from blabla.sentence_processor.discourse_and_pragmatic_feature_engine import (
num_discourse_markers,
)
class Num_Discourse_Markers(object):
"""Class to calculate the total number of discourse markers
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of discourse markers
Args:
None
Returns:
tot_num_discourse_markers (int): The total number of discourse markers
"""
tot_num_discourse_markers = 0
for so in self.sentence_objs:
tot_num_discourse_markers += num_discourse_markers(so.stanza_doc)
return tot_num_discourse_markers
class Discourse_Marker_Rate(object):
"""Class to calculate the number of discourse markers per sentence
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the number of discourse markers per sentence
Args:
None
Returns:
The number of discourse markers per sentence
"""
tot_num_discourse_markers = 0
for so in self.sentence_objs:
tot_num_discourse_markers += num_discourse_markers(so.stanza_doc)
return tot_num_discourse_markers / len(self.sentence_objs)
def discourse_and_pragmatic_feature_processor(sentence_objs, feature, **kwArgs):
"""Extract discourse and pragmatic features across all sentence objects
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/__init__.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/__init__.py | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false | |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/text_features/helpers/blabla/blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py | features/text_features/helpers/blabla/blabla/sentence_aggregators/lexico_semantic_fearture_aggregator.py | from blabla.sentence_processor.lexico_semantic_feature_engine import num_demonstratives
from blabla.utils.global_params import *
from collections import Counter
import numpy as np
import math
import blabla.utils.settings as settings
from blabla.utils.global_params import *
class Adjective_Rate(object):
"""Class to calculate the adjective rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adjective rate
Args:
None
Returns:
The total number of adjectives to the total number of words
"""
tot_num_adjs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_words += so.num_words()
return tot_num_adjs / tot_num_words
class Adposition_Rate(object):
"""Class to calculate the adposition rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Adverb_Rate(object):
"""Class to calculate the adverb rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Auxiliary_Rate(object):
"""Class to calculate the auxiliary rate
Ref: https://www.cs.toronto.edu/~kfraser/Fraser15-JAD.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the adverb rate
Args:
None
Returns:
The total number of adverbs to the total number of words
"""
tot_num_advs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(AUXILIARY)
tot_num_words += so.num_words()
return tot_num_advs / tot_num_words
class Conjunction_Rate(object):
"""Class to calculate the conjunctions rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Determiner_Rate(object):
"""Class to calculate the determiner rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Interjection_Rate(object):
"""Class to calculate the interjection rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(INTERJECTION)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Noun_Rate(object):
"""Class to calculate the noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun rate
Args:
None
Returns:
The total number of nouns to the total number of words
"""
tot_num_nouns, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_words += so.num_words()
return tot_num_nouns / tot_num_words
class Numeral_Rate(object):
"""Class to calculate the numeral rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(NUMERAL)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Particle_Rate(object):
"""Class to calculate the particle rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the conjunctions rate
Args:
None
Returns:
The total number of conjunctions to the total number of words
"""
tot_num_cconj, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(PARTICLE)
tot_num_words += so.num_words()
return tot_num_cconj / tot_num_words
class Pronoun_Rate(object):
"""Class to calculate the pronoun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Proper_Noun_Rate(object):
"""Class to calculate the proper noun rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PROPER_NOUN)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Punctuation_Rate(object):
"""Class to calculate the punctuation rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PUNCTUATION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Subordinating_Conjunction_Rate(object):
"""Class to calculate the subordinating conjuction rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SUBORDINATING_CONJUNCTION)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Symbol_Rate(object):
"""Class to calculate the symbol rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun rate
Args:
None
Returns:
The total number of pronouns to the total number of words
"""
tot_num_pron, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(SYMBOL)
tot_num_words += so.num_words()
return tot_num_pron / tot_num_words
class Verb_Rate(object):
"""Class to calculate the verb rate
Ref: https://pubmed.ncbi.nlm.nih.gov/28321196/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of verbs to the total number of words
"""
tot_num_verbs, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_words += so.num_words()
return tot_num_verbs / tot_num_words
class Demonstrative_Rate(object):
"""Class to calculate the demonstratives rate
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the verb rate
Args:
None
Returns:
The total number of demonstratives to the total number of words
"""
tot_num_demons, tot_num_words = 0, 0
for so in self.sentence_objs:
tot_num_demons += num_demonstratives(so.stanza_doc)
tot_num_words += so.num_words()
return tot_num_demons / tot_num_words
class Possessive_Rate(object):
"""Class to calculate the possessive rate
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3642700/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the possessive rate
Args:
None
Returns:
The total number of adjectives and pronouns to the total number of words
"""
tot_num_adjs, tot_num_pron, tot_num_words = 0, 0, 0
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_words += so.num_words()
return (tot_num_adjs + tot_num_pron) / tot_num_words
class Noun_Verb_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the number of verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun to verb
Args:
None
Returns:
The total number of nouns to the number of verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if tot_num_verbs != 0:
return tot_num_nouns / tot_num_verbs
return NOT_AVAILABLE
class Noun_Ratio(object):
"""Class to calculate the ratio of the number of nouns to the total number of nouns and verbs
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the noun ratio
Args:
None
Returns:
The total number of nouns to the total number of nouns and verbs
"""
tot_num_nouns, tot_num_verbs = 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
if (tot_num_nouns + tot_num_verbs) != 0:
return tot_num_nouns / (tot_num_nouns + tot_num_verbs)
class Pronoun_Noun_Ratio(object):
"""Class to calculate the ratio of the number of pronouns to the total number of nouns
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the pronoun to noun ratio
Args:
None
Returns:
The ratio of the total number of pronouns to the number of nouns
"""
tot_num_prons, tot_num_nouns = 0, 0
for so in self.sentence_objs:
tot_num_prons += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
if tot_num_nouns != 0:
return tot_num_prons / tot_num_nouns
return NOT_AVAILABLE
class Total_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = 0
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist += np.sum([abs(int(dep['id']) - dep['head']) for dep in sd])
return tot_dist
class Average_Dependency_Distance(object):
"""Class to calculate the sum of dependency distances
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total dependency distance across all sentences
Args:
None
Returns:
the sum of dependency distances
"""
tot_dist = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
tot_dist.append(sum([abs(int(dep['id']) - dep['head']) for dep in sd]))
if tot_dist:
return np.mean(tot_dist)
return NOT_AVAILABLE
class Total_Dependencies(object):
"""Class to calculate the number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the total number of unique dependencies across sentences
Args:
None
Returns:
the total number of unique dependencies
"""
deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels.extend([dep['deprel'] for dep in sd])
return len(set(deprels))
class Average_Dependencies(object):
"""Class to calculate the average number of unique syntactic dependencies
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the average number of unique dependencies across sentences
Args:
None
Returns:
the average number of unique dependencies
"""
num_deprels = []
for so in self.sentence_objs:
sd = so.stanza_doc.to_dict()[0]
deprels = set([dep['deprel'] for dep in sd])
num_deprels.append(len(deprels))
if num_deprels:
return np.mean(num_deprels)
return NOT_AVAILABLE
class Closed_Class_Word_Rate(object):
"""Class to calculate the proportion of closed class words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of close class words
Args:
None
Returns:
The ratio of the total number of determiners, prepositions, pronouns and conjunctions to the total number of words
"""
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
) / tot_num_words
class Open_Class_Word_Rate(object):
"""Class to calculate the proportion of open class word_count
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the proportion of open class words
Args:
None
Returns:
The ratio of the total number of nouns, verbs, adjectives and adverbs to the total number of words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs, tot_num_words = (
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_words += so.num_words()
return (
tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
) / tot_num_words
class Content_Density(object):
"""Class to calculate the content density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the content density of words
Args:
None
Returns:
The ratio of the total number of open class words to the total number of closed class words
"""
tot_num_nouns, tot_num_verbs, tot_num_adjs, tot_num_advs = 0, 0, 0, 0
tot_num_det, tot_num_prep, tot_num_pron, tot_num_cconj = 0, 0, 0, 0
for so in self.sentence_objs:
tot_num_nouns += so.pos_tag_counter.get_pos_tag_count(NOUN)
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
for so in self.sentence_objs:
tot_num_det += so.pos_tag_counter.get_pos_tag_count(DETERMINER)
tot_num_prep += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_pron += so.pos_tag_counter.get_pos_tag_count(PRONOUN)
tot_num_cconj += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
numerator = tot_num_nouns + tot_num_verbs + tot_num_adjs + tot_num_advs
denominator = tot_num_det + tot_num_prep + tot_num_pron + tot_num_cconj
if denominator == 0:
return NOT_AVAILABLE
return numerator / denominator
class Idea_Density(object):
"""Class to calculate the idea density of words
Ref: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5337522/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the idea density of words
Args:
None
Returns:
The ratio of the total number of verbs, adjectives, adverbs, prepositions, conjunctions to the number of words
"""
(
tot_num_verbs,
tot_num_adjs,
tot_num_advs,
tot_num_preps,
tot_num_cconjs,
tot_num_words,
) = (
0,
0,
0,
0,
0,
0,
)
for so in self.sentence_objs:
tot_num_verbs += so.pos_tag_counter.get_pos_tag_count(VERB)
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_advs += so.pos_tag_counter.get_pos_tag_count(ADVERB)
tot_num_preps += so.pos_tag_counter.get_pos_tag_count(ADPOSITION)
tot_num_cconjs += so.pos_tag_counter.get_pos_tag_count(CONJUNCTION)
tot_num_words += so.num_words()
return (
tot_num_verbs + tot_num_adjs + tot_num_advs + tot_num_preps + tot_num_cconjs
) / tot_num_words
class Honore_Statistic(object):
"""Class to calculate the honore's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the honore's statistic
Args:
None
Returns:
The honore's statistic of the words
"""
all_words = []
num_unique_words_spoken, num_words_spoken_only_once = 0, 0
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
word_counts = dict(Counter(all_words))
for key, val in word_counts.items():
if val == 1:
num_words_spoken_only_once += 1
num_words = len(all_words)
if (num_words_spoken_only_once == num_unique_words_spoken) or (num_unique_words_spoken == 0) or (num_words == 0):
return NOT_AVAILABLE
honore_statistic = (100 * math.log(num_words)) / (
1 - (num_words_spoken_only_once) / (num_unique_words_spoken)
)
return honore_statistic
class Brunet_Index(object):
"""Class to calculate the brunet's statistic
Ref: https://www.aclweb.org/anthology/W16-1902.pdf
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the brunet's statistic
Args:
None
Returns:
The brunet's statistic of the words
"""
num_unique_words_spoken = 0
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
num_unique_words_spoken = len(set(all_words))
num_words = len(all_words)
brunet_index = math.pow(num_words, math.pow(num_unique_words_spoken, -0.165))
return brunet_index
class Type_Token_Ratio(object):
"""Class to calculate the type-token ratio
Ref: https://www.tandfonline.com/doi/abs/10.1080/02687038.2017.1303441
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the type-token statistic
Args:
None
Returns:
The ratio of the number of word types to the number of words
"""
all_words = []
all_word_lemmas = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
all_word_lemmas.extend(
[word.lemma for word in so.stanza_doc.sentences[0].words]
)
num_word_types = len(set(all_word_lemmas))
num_words = len(all_words)
return num_word_types / num_words
class Word_Length(object):
"""Class to calculate the mean word length
Ref: https://pubmed.ncbi.nlm.nih.gov/26484921/
"""
def __init__(self, sentence_objs):
"""The init method to initialize with an array of sentence objects
"""
self.sentence_objs = sentence_objs
def handle(self):
"""Method to calculcate the mean word length
Args:
None
Returns:
The mean length of the word across all sentences
"""
all_words = []
for so in self.sentence_objs:
all_words.extend([word.text for word in so.stanza_doc.sentences[0].words])
mean_word_length = np.mean([len(word) for word in all_words])
return mean_word_length
def lexico_semantic_feature_processor(sentence_objs, feature, **kwArgs):
"""This method Returns the lexico semantic features across all the sentences depending on the type of feature requested
Args:
sentence_objs (list<Sentence>): a list of Sentence objects
feature (str): a string name for the requested feature
Returns:
the feature value
"""
nr = globals()[feature.title()](sentence_objs)
return nr.handle()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/inception_features.py | features/image_features/inception_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['inception_features']
Read more about the inception model @ https://keras.io/api/applications/inceptionv3/
'''
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications import InceptionV3
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
import numpy as np
def inception_featurize(file):
# load model
model = InceptionV3(include_top=True, weights='imagenet')
img_path = file
img = load_img(img_path, target_size=(299, 299))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# print(features.shape)
features=np.ndarray.flatten(features)
# feature shape = (25088,)
labels=list()
for i in range(len(features)):
labels.append('inception_feature_%s'%(str(i+1)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/image_features.py | features/image_features/image_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Featurizes folders of image files if default_text_features = ['image_features']
Note this uses OpenCV and the SIFT feature detector. SIFT was used here
as as scale-invariant feature selector, but note that this algorithm is patented,
which limits commercical use.
'''
from sklearn import preprocessing, svm, metrics
from textblob import TextBlob
from operator import itemgetter
import getpass, pickle, datetime, time
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
import cv2, os
def convert(file):
if file[-5:]=='.jpeg':
im = Image.open(file)
rgb_im = im.convert('RGB')
filename=file[0:-5]+'.png'
rgb_im.save(filename)
os.remove(file)
elif file[-4:]=='.jpg':
im = Image.open(file)
rgb_im = im.convert('RGB')
filename=file[0:-4]+'.png'
rgb_im.save(filename)
os.remove(file)
return filename
def haar_featurize(cur_dir, haar_dir, img):
os.chdir(haar_dir)
# load image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# assumes all files of haarcascades are in current directory
one = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')
one = one.detectMultiScale(gray, 1.3, 5)
one = len(one)
two = cv2.CascadeClassifier('haarcascade_eye.xml')
two = two.detectMultiScale(gray, 1.3, 5)
two = len(two)
three = cv2.CascadeClassifier('haarcascade_frontalcatface_extended.xml')
three = three.detectMultiScale(gray, 1.3, 5)
three = len(three)
four = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
four = four.detectMultiScale(gray, 1.3, 5)
four = len(four)
five = cv2.CascadeClassifier('haarcascade_frontalface_alt_tree.xml')
five = five.detectMultiScale(gray, 1.3, 5)
five = len(five)
six = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
six = six.detectMultiScale(gray, 1.3, 5)
six = len(six)
seven = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
seven = seven.detectMultiScale(gray, 1.3, 5)
seven = len(seven)
eight = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eight = eight.detectMultiScale(gray, 1.3, 5)
eight = len(eight)
nine = cv2.CascadeClassifier('haarcascade_fullbody.xml')
nine = nine.detectMultiScale(gray, 1.3, 5)
nine = len(nine)
ten = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
ten = ten.detectMultiScale(gray, 1.3, 5)
ten = len(ten)
eleven = cv2.CascadeClassifier('haarcascade_licence_plate_rus_16stages.xml')
eleven = eleven.detectMultiScale(gray, 1.3, 5)
eleven = len(eleven)
twelve = cv2.CascadeClassifier('haarcascade_lowerbody.xml')
twelve = twelve.detectMultiScale(gray, 1.3, 5)
twelve = len(twelve)
thirteen = cv2.CascadeClassifier('haarcascade_profileface.xml')
thirteen = thirteen.detectMultiScale(gray, 1.3, 5)
thirteen = len(thirteen)
fourteen = cv2.CascadeClassifier('haarcascade_righteye_2splits.xml')
fourteen = fourteen.detectMultiScale(gray, 1.3, 5)
fourteen = len(fourteen)
fifteen = cv2.CascadeClassifier('haarcascade_russian_plate_number.xml')
fifteen = fifteen.detectMultiScale(gray, 1.3, 5)
fifteen = len(fifteen)
sixteen = cv2.CascadeClassifier('haarcascade_smile.xml')
sixteen = sixteen.detectMultiScale(gray, 1.3, 5)
sixteen = len(sixteen)
seventeen = cv2.CascadeClassifier('haarcascade_upperbody.xml')
seventeen = seventeen.detectMultiScale(gray, 1.3, 5)
seventeen = len(seventeen)
features=np.array([one,two,three,four,
five,six,seven,eight,
nine,ten,eleven,twelve,
thirteen,fourteen,fifteen,sixteen,
seventeen])
labels=['haarcascade_eye_tree_eyeglasses','haarcascade_eye','haarcascade_frontalcatface_extended','haarcascade_frontalcatface',
'haarcascade_frontalface_alt_tree','haarcascade_frontalface_alt','haarcascade_frontalface_alt2','haarcascade_frontalface_default',
'haarcascade_fullbody','haarcascade_lefteye_2splits','haarcascade_licence_plate_rus_16stages','haarcascade_lowerbody',
'haarcascade_profileface','haarcascade_righteye_2splits','haarcascade_russian_plate_number','haarcascade_smile',
'haarcascade_upperbody']
os.chdir(cur_dir)
return features, labels
def image_featurize(cur_dir,haar_dir,file):
# initialize label array
labels=list()
# only featurize files that are .jpeg, .jpg, or .png (convert all to ping
if file[-5:]=='.jpeg':
filename=convert(file)
elif file[-4:]=='.jpg':
filename=convert(file)
elif file[-4:]=='.png':
filename=file
else:
filename=file
#only featurize .png files after conversion
if filename[-4:]=='.png':
# READ IMAGE
########################################################
img = cv2.imread(filename,1)
# CALCULATE BASIC FEATURES (rows, columns, pixels)
########################################################
#rows, columns, pixel number
rows=img.shape[1]
columns=img.shape[2]
pixels=img.size
basic_features=np.array([rows,columns,pixels])
labels=labels+['rows', 'columns', 'pixels']
# HISTOGRAM FEATURES (avg, stdev, min, max)
########################################################
#blue
blue_hist=cv2.calcHist([img],[0],None,[256],[0,256])
blue_mean=np.mean(blue_hist)
blue_std=np.std(blue_hist)
blue_min=np.amin(blue_hist)
blue_max=np.amax(blue_hist)
#green
green_hist=cv2.calcHist([img],[1],None,[256],[0,256])
green_mean=np.mean(green_hist)
green_std=np.std(green_hist)
green_min=np.amin(green_hist)
green_max=np.amax(green_hist)
#red
red_hist=cv2.calcHist([img],[2],None,[256],[0,256])
red_mean=np.mean(red_hist)
red_std=np.std(red_hist)
red_min=np.amin(red_hist)
red_max=np.amax(red_hist)
hist_features=[blue_mean,blue_std,blue_min,blue_max,
green_mean,green_std,green_min,green_max,
red_mean,red_std,red_min,red_max]
hist_labels=['blue_mean','blue_std','blue_min','blue_max',
'green_mean','green_std','green_min','green_max',
'red_mean','red_std','red_min','red_max']
hist_features=np.array(hist_features)
features=np.append(basic_features,hist_features)
labels=labels+hist_labels
# CALCULATE HAAR FEATURES
########################################################
haar_features, haar_labels=haar_featurize(cur_dir,haar_dir,img)
features=np.append(features,haar_features)
labels=labels+haar_labels
# EDGE FEATURES
########################################################
# SIFT algorithm (scale invariant) - 128 features
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
(kps, des) = sift.detectAndCompute(gray, None)
edges=des
edge_features=np.zeros(len(edges[0]))
for i in range(len(edges)):
edge_features=edge_features+edges[i]
edge_features=edge_features/(len(edges))
edge_features=np.array(edge_features)
edge_labels=list()
for i in range(len(edge_features)):
edge_labels.append('edge_feature_%s'%(str(i+1)))
features=np.append(features,edge_features)
labels=labels+edge_labels
else:
os.remove(file)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/vgg16_features.py | features/image_features/vgg16_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['vgg16_features']
Read more about the VGG16 model @ https://keras.io/api/applications/vgg/#vgg16-function
'''
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
from keras.applications.vgg16 import VGG16
import numpy as np
def vgg16_featurize(file):
# load model
model = VGG16(weights='imagenet', include_top=False)
img_path = file
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# print(features.shape)
features=np.ndarray.flatten(features)
# feature shape = (25088,)
labels=list()
for i in range(len(features)):
labels.append('vgg16_feature_%s'%(str(i+1)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/squeezenet_features.py | features/image_features/squeezenet_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['squeezenet_features']
The Squeezenet model reduces featurization time for large featurizations,
as it is a compressed model that uses less memory.
Read more about the SqueezeNet model @ https://github.com/rcmalli/keras-squeezenet
'''
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
def squeezenet_featurize(imagename, imagedir):
'''
This network model has AlexNet accuracy with small footprint (5.1 MB)
Pretrained models are converted from original Caffe network.
This may be useful for production-purposes if the accuracy is similar to other
types of featurizations.
See https://github.com/rcmalli/keras-squeezenet
'''
model = SqueezeNet()
img = image.load_img(imagedir+'/'+imagename, target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
features = preds[0]
labels=list()
for i in range(len(features)):
label='squeezenet_feature_%s'%(str(i))
labels.append(label)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/vgg19_features.py | features/image_features/vgg19_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['vgg19_features']
Read more about the VGG19 model @ https://keras.io/api/applications/vgg/#vgg19-function
'''
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications import VGG19
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
import numpy as np
def vgg19_featurize(file):
# load model
model = VGG19(include_top=True, weights='imagenet')
img_path = file
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# print(features.shape)
features=np.ndarray.flatten(features)
# feature shape = (25088,)
labels=list()
for i in range(len(features)):
labels.append('vgg19_feature_%s'%(str(i+1)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/tesseract_features.py | features/image_features/tesseract_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['tessearct_features']
Transcribes image files with OCR (pytesseract module) and then featurizes
these transcripts with nltk_features. Read more about nltk_features @
https://github.com/jim-schwoebel/allie/blob/master/features/text_features/nltk_features.py
'''
import os, sys
from PIL import Image
import pytesseract
def prev_dir(directory):
g=directory.split('/')
# print(g)
lastdir=g[len(g)-1]
i1=directory.find(lastdir)
directory=directory[0:i1]
return directory
directory=os.getcwd()
prev_dir=prev_dir(directory)
sys.path.append(prev_dir+'/text_features')
import nltk_features as nf
os.chdir(directory)
def transcribe_image(imgfile):
transcript=pytesseract.image_to_string(Image.open(imgfile))
return transcript
def tesseract_featurize(imgfile):
# can stitch across an entire length of video frames too
transcript=transcribe_image(imgfile)
features, labels = nf.nltk_featurize(transcript)
return transcript, features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/xception_features.py | features/image_features/xception_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['xception_features']
Read more about the Xception model @ https://keras.io/api/applications/xception/
'''
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications import Xception
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
import numpy as np
def xception_featurize(file):
# load model
model = Xception(include_top=True, weights='imagenet')
img_path = file
img = load_img(img_path, target_size=(299, 299))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# print(features.shape)
features=np.ndarray.flatten(features)
# feature shape = (25088,)
labels=list()
for i in range(len(features)):
labels.append('xception_feature_%s'%(str(i+1)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/resnet_features.py | features/image_features/resnet_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Extracts image features if default_image_features = ['resnet_features']
Read more about the ResNet50 model @ https://keras.io/api/applications/resnet/#resnet50v2-function
'''
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications import ResNet50
from keras.applications.vgg16 import preprocess_input
from keras.applications.vgg16 import decode_predictions
import numpy as np
def resnet_featurize(file):
# load model
model = ResNet50(include_top=True, weights='imagenet')
img_path = file
img = load_img(img_path, target_size=(224, 224))
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# print(features.shape)
features=np.ndarray.flatten(features)
# feature shape = (25088,)
labels=list()
for i in range(len(features)):
labels.append('ResNet50_feature_%s'%(str(i+1)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/featurize.py | features/image_features/featurize.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
Featurize folders of images with the default_image_features.
Usage: python3 featurize.py [folder] [featuretype]
All featuretype options include:
["image_features", "inception_features", "resnet_features", "squeezenet_features",
"tesseract_features", "vgg16_features", "vgg19_features", "xception_features"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/features/image_features
'''
import helpers.audio_plot as ap
import numpy as np
import os, json, sys
from tqdm import tqdm
##################################################
## Helper functions. ##
##################################################
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def image_featurize(feature_set, imgfile, cur_dir, haar_dir):
if feature_set == 'image_features':
features, labels=imf.image_featurize(cur_dir, haar_dir, imgfile)
elif feature_set == 'vgg16_features':
features, labels=v16f.vgg16_featurize(imgfile)
elif feature_set == 'inception_features':
features, labels=incf.inception_featurize(imgfile)
elif feature_set == 'xception_features':
features, labels=xf.xception_featurize(imgfile)
elif feature_set == 'resnet_features':
features, labels=rf.resnet_featurize(imgfile)
elif feature_set == 'vgg19_features':
features, labels=v19f.vgg19_featurize(imgfile)
elif feature_set == 'tesseract_features':
transcript, features, labels = tf.tesseract_featurize(imgfile)
elif feature_set == 'squeezenet_features':
features, labels=sf.squeezenet_featurize(imgfile, cur_dir)
# make sure all the features do not have any infinity or NaN
features=np.nan_to_num(np.array(features))
features=features.tolist()
return features, labels
##################################################
## Main script ##
##################################################
# directory=sys.argv[1]
basedir=os.getcwd()
haar_dir=basedir+'/helpers/haarcascades'
foldername=sys.argv[1]
os.chdir(foldername)
cur_dir=os.getcwd()
listdir=os.listdir()
# settings directory
settingsdir=prev_dir(basedir)
sys.path.append(settingsdir)
from standard_array import make_features
settings=json.load(open(prev_dir(settingsdir)+'/settings.json'))
os.chdir(basedir)
image_transcribe=settings['transcribe_image']
default_image_transcriber=settings['default_image_transcriber']
try:
feature_sets=[sys.argv[2]]
except:
feature_sets=settings['default_image_features']
##################################################
## Only load relevant features ##
##################################################
if 'vgg16_features' in feature_sets:
import vgg16_features as v16f
if 'image_features' in feature_sets:
import image_features as imf
if 'inception_features' in feature_sets:
import inception_features as incf
if 'xception_features' in feature_sets:
import xception_features as xf
if 'resnet_features' in feature_sets:
import resnet_features as rf
if 'vgg19_features' in feature_sets:
import vgg19_features as v19f
if 'squeezenet_features' in feature_sets:
import squeezenet_features as sf
if image_transcribe == True or 'tesseract_features' in feature_sets:
import tesseract_features as tf
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
##################################################
## Main loop ##
##################################################
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
os.chdir(cur_dir)
if listdir[i][-4:] in ['.jpg', '.png']:
try:
imgfile=listdir[i]
sampletype='image'
# I think it's okay to assume audio less than a minute here...
if listdir[i][0:-4]+'.json' not in listdir:
# make new .JSON if it is not there with base array schema.
basearray=make_features(sampletype)
if image_transcribe==True:
for j in range(len(default_image_transcriber)):
transcript, features, labels = tf.tesseract_featurize(imgfile)
transcript_list=basearray['transcripts']
transcript_list['image'][default_image_transcriber[j]]=transcript
basearray['transcripts']=transcript_list
# featurize the image file with specified featurizers
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
features, labels = image_featurize(feature_set, imgfile, cur_dir, haar_dir)
print(features)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
image_features=basearray['features']['image']
image_features[feature_set]=data
basearray['features']['image']=image_features
basearray['labels']=[labelname]
# write to .JSON
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
elif listdir[i][0:-4]+'.json' in listdir:
# overwrite existing .JSON if it is there.
basearray=json.load(open(listdir[i][0:-4]+'.json'))
transcript_list=basearray['transcripts']
# only re-transcribe if necessary
if image_transcribe==True:
for j in range(len(default_image_transcriber)):
if default_image_transcriber[j] not in list(transcript_list['image']):
transcript, features, labels = tf.tesseract_featurize(imgfile)
transcript_list['image'][default_image_transcriber[j]]=transcript
basearray['transcripts']=transcript_list
# only re-featurize if necessary (checks if relevant feature embedding exists)
for j in range(len(feature_sets)):
# load feature set
feature_set=feature_sets[j]
# only add in if it is not in the image feature list array
if feature_set not in list(basearray['features']['image']):
features, labels = image_featurize(feature_set, imgfile, cur_dir, haar_dir)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
print(features)
basearray['features']['image'][feature_set]=data
# only add label if necessary
label_list=basearray['labels']
if labelname not in label_list:
label_list.append(labelname)
basearray['labels']=label_list
# overwrite .JSON file
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
except:
print('error')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/image_features/helpers/audio_plot.py | features/image_features/helpers/audio_plot.py | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## AUDIO_PLOT.PY ##
================================================
A simple function to plot a spectrogram with librosa.
'''
import librosa, os
import matplotlib.pyplot as plt
import numpy as np
import librosa.display
from PIL import Image
# now begin plotting linear-frequency power spectrum
def plot_spectrogram(filename):
y, sr = librosa.load(filename)
plt.figure(figsize=(12, 8))
D = librosa.amplitude_to_db(librosa.stft(y), ref=np.max)
plt.subplot(4, 2, 1)
librosa.display.specshow(D, y_axis='linear')
plt.colorbar(format='%+2.0f dB')
plt.title('Linear-frequency power spectrogram')
# on logarithmic scale
plt.subplot(4, 2, 2)
librosa.display.specshow(D, y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Log-frequency power spectrogram')
# Or use a CQT scale
CQT = librosa.amplitude_to_db(librosa.cqt(y, sr=sr), ref=np.max)
plt.subplot(4, 2, 3)
librosa.display.specshow(CQT, y_axis='cqt_note')
plt.colorbar(format='%+2.0f dB')
plt.title('Constant-Q power spectrogram (note)')
plt.subplot(4, 2, 4)
librosa.display.specshow(CQT, y_axis='cqt_hz')
plt.colorbar(format='%+2.0f dB')
plt.title('Constant-Q power spectrogram (Hz)')
# Draw a chromagram with pitch classes
C = librosa.feature.chroma_cqt(y=y, sr=sr)
plt.subplot(4, 2, 5)
librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
# Force a grayscale colormap (white -> black)
plt.subplot(4, 2, 6)
librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
plt.colorbar(format='%+2.0f dB')
plt.title('Linear power spectrogram (grayscale)')
# Draw time markers automatically
plt.subplot(4, 2, 7)
librosa.display.specshow(D, x_axis='time', y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Log power spectrogram')
# Draw a tempogram with BPM markers
plt.subplot(4, 2, 8)
Tgram = librosa.feature.tempogram(y=y, sr=sr)
librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
plt.colorbar()
plt.title('Tempogram')
plt.tight_layout()
# image file save
imgfile=filename[0:-4]+'.png'
plt.savefig(imgfile)
img = Image.open(imgfile).convert('LA')
img.save(imgfile)
# os.system('open %s'%(imgfile))
return imgfile | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/csv_features/featurize_csv_regression.py | features/csv_features/featurize_csv_regression.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____ _____ _ _
/ __ \/ ___|| | | |
| / \/\ `--. | | | |
| | `--. \| | | |
| \__/\/\__/ /\ \_/ /
\____/\____/ \___/
Featurizes a master spreadsheet of files if default_csv_features = ['featurize_csv_regression']
This was inspired by the D3M schema by MIT Data lab. More info about this schema
can be found @ https://github.com/mitll/d3m-schema/blob/master/documentation/datasetSchema.md
'''
#########################################
## IMPORT STATEMENTS ##
#########################################
import pandas as pd
import os, json, uuid, shutil, time, random
from optparse import OptionParser
from sklearn import preprocessing
import pandas as pd
import numpy as np
#########################################
## HELPER FUNCTIONS ##
#########################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
'''
Get previous directory from a host directory.
'''
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def element_featurize(sampletype, default_features, filepaths, directory):
# make a temporary folder and copy all featurized files to it
folder='%s-features-'%(sampletype)+str(uuid.uuid1())
old_dir=directory
train_dir=basedir+'/train_dir'
directory=basedir+'/train_dir/'+folder
os.mkdir(basedir+'/train_dir/'+folder)
for i in range(len(filepaths)):
try:
shutil.copy(filepaths[i], directory+'/'+filepaths[i].split('/')[-1])
except:
pass
try:
shutil.copy(filepaths[i][0:-4]+'.json', directory+'/'+filepaths[i].split('/')[-1][0:-4]+'.json')
except:
# pass over json files if they exist to speed up featurizations
pass
# featurize the files in the folder
os.chdir(basedir+'/features/%s_features/'%(sampletype))
os.system('python3 featurize.py %s'%(basedir+'/train_dir/'+folder))
# get lists for outputting later
features=list()
labels=list()
# go through all featurized .JSON files and read them and establish a feature array
for i in range(len(filepaths)):
try:
jsonfile=filepaths[i].split('/')[-1][0:-4]+'.json'
g=json.load(open(directory+'/'+jsonfile))
feature=[]
label=[]
for j in range(len(default_features)):
array_=g['features'][sampletype][default_features[j]]
feature=feature+array_['features']
label=label+array_['labels']
features.append(feature)
labels.append(label)
except:
features.append(np.zeros(len(features[0])))
labels.append(random.choice(labels))
# remove the temporary directory
os.chdir(train_dir)
shutil.rmtree(folder)
directory=old_dir
os.chdir(directory)
return features, labels
def text_featurize_columns(filepaths, directory, settings, basedir):
'''
Get text features using default_text featurizer
'''
default_features=settings['default_text_features']
print(default_features)
features, labels = element_featurize('text', default_features, filepaths, directory)
return features, labels
def audio_featurize_columns(filepaths, directory, settings, basedir):
'''
get audio features using default_audio_featurizer
'''
features=list()
labels=list()
default_features=settings['default_audio_features']
features, labels = element_featurize('audio', default_features, filepaths, directory)
return features, labels
def image_featurize_columns(filepaths, directory, settings, basedir):
'''
get image features using default_image_featuerizer
'''
features=list()
labels=list()
default_features=settings['default_image_features']
features, labels = element_featurize('image', default_features, filepaths, directory)
return features, labels
def video_featurize_columns(filepaths, directory, settings, basedir):
'''
get video features using default_video_featurizer
'''
features=list()
labels=list()
default_features=settings['default_video_features']
features, labels = element_featurize('video', default_features, filepaths, directory)
return features, labels
# def csv_featurize_columns(filepaths, directory, settings, basedir):
# '''
# get csv features using default_csv_featurizer - likely this script.
# '''
# features=list()
# labels=list()
# default_features=settings['default_csv_features']
# features, labels = element_featurize('csv', default_features, filepaths, directory)
# return features, labels
def category_featurize_columns(columns, directory, settings, basedir):
'''
Create numerical representations of categorical features.
'''
default_features=['categorical_features']
print(default_features)
le = preprocessing.LabelEncoder()
le.fit(columns)
uniquevals=set(columns)
features_ = list(le.transform(columns))
labels_ = list(columns)
# feature and labels must be arrays of arrays
features=list()
labels=list()
for i in range(len(features_)):
features.append([features_[i]])
labels.append([labels_[i]])
return features, labels
def typedtext_featurize_columns(columns, directory, settings, basedir):
'''
Get text features from typed text responses
'''
features=list()
labels=list()
default_features=settings['default_text_features']
filepaths=list()
curdir=os.getcwd()
folder=str('temp-'+str(uuid.uuid1()))
os.mkdir(folder)
os.chdir(folder)
for i in range(len(columns)):
file=str(uuid.uuid1())+'.txt'
textfile=open(file,'w')
textfile.write(columns[i])
textfile.close()
filepaths.append(os.getcwd()+'/'+file)
os.chdir(curdir)
features, labels = element_featurize('text', default_features, filepaths, directory)
shutil.rmtree(folder)
return features, labels
def numerical_featurize_columns(columns, directory, settings, basedir):
'''
Get numerical features from responses
'''
features=list()
labels=list()
for i in range(len(columns)):
features.append([columns[i]])
labels.append(['numerical_'+str(i)])
return features, labels
# create all featurizers in a master class structure
class ColumnSample:
# base directory for moving around folders
basedir=prev_dir(os.getcwd())
def __init__(self, sampletype, column, directory, settings):
self.sampletype = sampletype
self.column = column
self.directory = directory
self.settings=settings
self.basedir = basedir
def featurize(self):
# if an audio file in a column, need to loop through
print(self.sampletype)
if self.sampletype == 'audio':
features_, labels = audio_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'text':
features_, labels = text_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'image':
features_, labels = image_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'video':
features_, labels = video_featurize_columns(self.column, self.directory, self.settings, self.basedir)
# elif self.sampletype == 'csv':
# features_, labels = csv_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'categorical':
features_, labels = category_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'typedtext':
features_, labels = typedtext_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif self.sampletype == 'numerical':
features_, labels = numerical_featurize_columns(self.column, self.directory, self.settings, self.basedir)
self.features = features_
self.labels = labels
def csv_featurize(csvfile, outfile, settings, target):
# look for each column header and classify it accordingly
if csvfile.endswith('.csv'):
data=pd.read_csv(csvfile)
columns=list(data)
coltypes=list()
datatype=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
if coltype == 'numerical':
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
coltype='numerical'
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
# now append all the columsn together
coltypes.append(coltype)
# datatypes found
datatypes=list(set(coltypes))
print('Data types found: %s'%(str(datatypes)))
headers = dict(zip(columns, coltypes))
# now go through and featurize according to the headers
# featurize 'audio'
curdir=os.getcwd()
new_column_labels=list()
new_column_values=list()
lengths=list()
for i in range(len(columns)):
# get column types and featurize each sample
sample=ColumnSample(coltypes[i], data[columns[i]], curdir, settings)
# get back features and labels
sample.featurize()
features=sample.features
labels=sample.labels
lengths.append(len(features))
new_column_values.append(features)
new_column_labels.append(labels)
old_column_labels=columns
old_column_values=data
print('-------------')
labels=[]
features=[]
for i in range(len(old_column_labels)):
column=old_column_labels[i]
for j in range(len(new_column_labels[0])):
# print(column)
for k in range(len(new_column_labels[i][j])):
# print(new_column_labels[i][j][k])
newcolumn=new_column_labels[i][j][k]
if newcolumn not in columns:
if column != target:
# print(str(column)+'_'+str(new_column_labels[i][j][k]))
labels.append(str(column)+'_'+str(new_column_labels[i][j][k]))
else:
# print(str(column)+'_'+str(new_column_labels[i][j][k]))
labels.append(str(column))
else:
# print(str(column))
labels.append(str(column))
features_=list()
for l in range(len(new_column_labels[i])):
features_.append(new_column_values[i][l][k])
# print(features_)
features.append(features_)
break
newdict=dict(zip(labels, features))
# print(newdict)
df = pd.DataFrame(newdict)
df.to_csv(outfile,index=False)
return df, outfile
else:
print('file cannot be read, as it does not end with .CSV extension!')
headers=''
return headers
#########################################
## MAIN SCRIPT ##
#########################################
# get all the options from the terminal
parser = OptionParser()
parser.add_option("-i", "--input", dest="input",
help="the .CSV filename input to process", metavar="INPUT")
parser.add_option("-o", "--output", dest="output",
help="the .CSV filename output to process", metavar="OUTPUT")
parser.add_option("-t", "--target", dest="target",
help="the target class (e.g. age) - will not rename this column.", metavar="TARGET")
(options, args) = parser.parse_args()
curdir=os.getcwd()
basedir=prev_dir(prev_dir(os.getcwd()))
os.chdir(basedir)
settings=json.load(open('settings.json'))
os.chdir(curdir)
if options.output == None:
filename=str(uuid.uuid1())+'.csv'
df, filename=csv_featurize(options.input, filename, settings, options.target)
else:
df, filename=csv_featurize(options.input, options.output, settings, options.target)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/csv_features/featurize.py | features/csv_features/featurize.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
_____ _____ _ _
/ __ \/ ___|| | | |
| / \/\ `--. | | | |
| | `--. \| | | |
| \__/\/\__/ /\ \_/ /
\____/\____/ \___/
Note that this script is not used for Allie version 1.0 and will be
updated in future releases.
Usage: python3 featurize.py [folder] [featuretype]
All featuretype options include:
["csv_features_regression"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/features/csv_features
'''
import os, json, wget, sys
import os, wget, zipfile
import shutil
import numpy as np
from tqdm import tqdm
##################################################
## Helper functions. ##
##################################################
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def transcribe_csv(csv_file, csv_transcriber):
if csv_transcriber=='raw text':
transcript=open(csv_file).read()
else:
transcript=''
return transcript
def csv_featurize(features_dir, feature_set, csvfile, cur_dir):
if feature_set == 'csv_features_regression':
os.chdir(features_dir)
if len(csvfile.split('featurized')) == 2 or len(csvfile.split('predictions'))==2:
pass
else:
os.system('python3 featurize_csv_regression.py --input %s --output %s --target %s'%(cur_dir+'/'+csvfile, cur_dir+'/'+'featurized_'+csvfile, 'target'))
else:
print('-----------------------')
print('!! error !!')
print('-----------------------')
print('Feature set %s does not exist. Please reformat the desired featurizer properly in the settings.json.'%(feature_set))
print('Note that the featurizers are named accordingly with the Python scripts. csv_features.py --> csv_features in settings.json)')
print('-----------------------')
##################################################
## Main script ##
##################################################
basedir=os.getcwd()
help_dir=basedir+'/helpers'
prevdir=prev_dir(basedir)
features_dir=basedir
sys.path.append(prevdir)
from standard_array import make_features
foldername=sys.argv[1]
os.chdir(foldername)
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
listdir=os.listdir()
cur_dir=os.getcwd()
# settings
g=json.load(open(prev_dir(prevdir)+'/settings.json'))
csv_transcribe=g['transcribe_csv']
default_csv_transcriber=g['default_csv_transcriber']
try:
# assume 1 type of feature_set
feature_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
feature_sets=g['default_csv_features']
if 'csv_features_regression' in feature_sets:
pass
###################################################
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
# make audio file into spectrogram and analyze those images if audio file
if listdir[i][-4:] in ['.csv']:
try:
csv_file=listdir[i]
sampletype='csv'
# I think it's okay to assume audio less than a minute here...
if 'featurized_'+listdir[i] not in listdir:
# featurize the csv file
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
csv_featurize(features_dir, feature_set, csv_file, cur_dir)
elif 'featurized_'+listdir[i] in listdir:
pass
except:
print('error')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/csv_features/helpers/csv_features.py | features/csv_features/helpers/csv_features.py | import pandas as pd
import nltk, json, os
from nltk import word_tokenize
import numpy as np
from textblob import TextBlob
import itertools
def nltk_featurize(transcript):
#alphabetical features
a=transcript.count('a')
b=transcript.count('b')
c=transcript.count('c')
d=transcript.count('d')
e=transcript.count('e')
f=transcript.count('f')
g_=transcript.count('g')
h=transcript.count('h')
i=transcript.count('i')
j=transcript.count('j')
k=transcript.count('k')
l=transcript.count('l')
m=transcript.count('m')
n=transcript.count('n')
o=transcript.count('o')
p=transcript.count('p')
q=transcript.count('q')
r=transcript.count('r')
s=transcript.count('s')
t=transcript.count('t')
u=transcript.count('u')
v=transcript.count('v')
w=transcript.count('w')
x=transcript.count('x')
y=transcript.count('y')
z=transcript.count('z')
atsymbol=transcript.count('@')
space=transcript.count(' ')
#numerical features and capital letters
num1=transcript.count('0')+transcript.count('1')+transcript.count('2')+transcript.count('3')+transcript.count('4')+transcript.count('5')+transcript.count('6')+transcript.count('7')+transcript.count('8')+transcript.count('9')
num2=transcript.count('zero')+transcript.count('one')+transcript.count('two')+transcript.count('three')+transcript.count('four')+transcript.count('five')+transcript.count('six')+transcript.count('seven')+transcript.count('eight')+transcript.count('nine')+transcript.count('ten')
number=num1+num2
capletter=sum(1 for c in transcript if c.isupper())
#part of speech
text=word_tokenize(transcript)
g=nltk.pos_tag(transcript)
cc=0
cd=0
dt=0
ex=0
in_=0
jj=0
jjr=0
jjs=0
ls=0
md=0
nn=0
nnp=0
nns=0
pdt=0
pos=0
prp=0
prp2=0
rb=0
rbr=0
rbs=0
rp=0
to=0
uh=0
vb=0
vbd=0
vbg=0
vbn=0
vbp=0
vbp=0
vbz=0
wdt=0
wp=0
wrb=0
for i in range(len(g)):
if g[i][1] == 'CC':
cc=cc+1
elif g[i][1] == 'CD':
cd=cd+1
elif g[i][1] == 'DT':
dt=dt+1
elif g[i][1] == 'EX':
ex=ex+1
elif g[i][1] == 'IN':
in_=in_+1
elif g[i][1] == 'JJ':
jj=jj+1
elif g[i][1] == 'JJR':
jjr=jjr+1
elif g[i][1] == 'JJS':
jjs=jjs+1
elif g[i][1] == 'LS':
ls=ls+1
elif g[i][1] == 'MD':
md=md+1
elif g[i][1] == 'NN':
nn=nn+1
elif g[i][1] == 'NNP':
nnp=nnp+1
elif g[i][1] == 'NNS':
nns=nns+1
elif g[i][1] == 'PDT':
pdt=pdt+1
elif g[i][1] == 'POS':
pos=pos+1
elif g[i][1] == 'PRP':
prp=prp+1
elif g[i][1] == 'PRP$':
prp2=prp2+1
elif g[i][1] == 'RB':
rb=rb+1
elif g[i][1] == 'RBR':
rbr=rbr+1
elif g[i][1] == 'RBS':
rbs=rbs+1
elif g[i][1] == 'RP':
rp=rp+1
elif g[i][1] == 'TO':
to=to+1
elif g[i][1] == 'UH':
uh=uh+1
elif g[i][1] == 'VB':
vb=vb+1
elif g[i][1] == 'VBD':
vbd=vbd+1
elif g[i][1] == 'VBG':
vbg=vbg+1
elif g[i][1] == 'VBN':
vbn=vbn+1
elif g[i][1] == 'VBP':
vbp=vbp+1
elif g[i][1] == 'VBZ':
vbz=vbz+1
elif g[i][1] == 'WDT':
wdt=wdt+1
elif g[i][1] == 'WP':
wp=wp+1
elif g[i][1] == 'WRB':
wrb=wrb+1
#sentiment
tblob=TextBlob(transcript)
polarity=float(tblob.sentiment[0])
subjectivity=float(tblob.sentiment[1])
#word repeats
words=transcript.split()
newlist=transcript.split()
repeat=0
for i in range(len(words)):
newlist.remove(words[i])
if words[i] in newlist:
repeat=repeat+1
features=np.array([a,b,c,d,
e,f,g_,h,
i,j,k,l,
m,n,o,p,
q,r,s,t,
u,v,w,x,
y,z,atsymbol,space,number,
capletter,cc,cd,dt,
ex,in_,jj,jjr,
jjs,ls,md,nn,
nnp,nns,pdt,pos,
prp,prp2,rbr,rbs,
rp,to,uh,vb,
vbd,vbg,vbn,vbp,
vbz,wdt,wp,wrb,
polarity,subjectivity,repeat])
labels=['a', 'b', 'c', 'd',
'e','f','g','h',
'i', 'j', 'k', 'l',
'm','n','o', 'p',
'q','r','s','t',
'u','v','w','x',
'y','z','atsymbol','space', 'numbers',
'capletters','cc','cd','dt',
'ex','in','jj','jjr',
'jjs','ls','md','nn',
'nnp','nns','pdt','pos',
'prp','prp2','rbr','rbs',
'rp','to','uh','vb',
'vbd','vbg','vbn','vbp',
'vbz', 'wdt', 'wp','wrb',
'polarity', 'subjectivity','repeat']
return features, labels
def get_categories(sample_list):
tlist=list()
for i in range(len(sample_list)):
if sample_list[i] not in tlist:
tlist.append(sample_list[i])
# tdict=list()
# for i in range(len(tlist)):
# tdict[tlist[i]]=tlist[i]
return tlist
def csv_featurize(csv_file, cur_dir):
'''
Take in a .CSV file and output
numerical and categorical features
for analysis.
'''
os.chdir(cur_dir)
g=pd.read_csv(csv_file)
labels=list(g)
g_=list()
# only include features that are fully here with no missing values.
for i in range(len(g)):
entry=list(g.iloc[i,:])
skip=False
for j in range(len(entry)):
if str(entry[j]) == 'nan':
skip=True
if skip == True:
pass
else:
g_.append(entry)
print(len(g_))
# now we need to classify each as categorical or numerical data
output=g_[0]
types=list()
# determine uniqueness of each column (if <10, treat as categorical data, otherwise numeric data)
masterlist=list()
not_unique=list()
unique=list()
for i in range(len(g_)):
for j in range(len(g_[i])):
if g_[i][j] in masterlist:
not_unique.append(labels[j])
else:
masterlist.append(g_[i][j])
unique.append(labels[j])
# now figure out uniqueness level of each label
labeltypes=list()
for i in range(len(labels)):
if not_unique.count(labels[i]) >= unique.count(labels[i]):
# categorical
labeltype='categorical'
else:
labeltype='numerical'
labeltypes.append(labeltype)
#print(labels)
#print(labeltypes)
# Now we need to convert the .CSV file to .JSON labels
feature_list = list()
label_list = list()
for i in range(len(g_)):
# calculate features for entire CSV
features=np.array([])
labels_=list()
for j in range(len(g_[i])):
# if it's categorical, we need to create numbers for the categories and output the numbers here with a mapping
if labeltypes[j] == 'categorical':
tlist =get_categories(list(g.iloc[:,j]))
print(g_[i][j])
print(tlist)
features=np.append(features,np.array(tlist.index(g_[i][j])))
labels_=labels_+[labeltypes[j]+'_'+str(i)+'_'+str(j)]
# if it's a text string | numerical, we need to characterize the string with features, added NLTK feautrize here.
elif labeltypes[j] == 'numerical':
try:
# try to make into an integer or float if fail them assume string
value=float(g_[i][j])
tlabel=labels[j]
labels_=labels_+[tlabel+'_float']
except:
tfeatures, tlabels=nltk_featurize(labeltypes[j])
features=np.append(features, np.array(tfeatures))
tlabels2=list()
for k in range(len(tlabels)):
tlabels2.append(labels[j].replace(' ','_')+'_'+tlabels[k]+'_'+str(i)+'_'+str(j))
labels_=labels_+tlabels2
# feature array per sample (output as .JSON)
feature_list.append(features.tolist())
label_list.append(labels_)
# all CSV files must be same length for the ML models to work...
features=np.array(feature_list).flatten()
labels=list(itertools.chain(*label_list))
return features, labels
#feature_list, label_list = csv_featurize('test.csv', os.getcwd()+'/test') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/text_cleaning/clean_summary.py | cleaning/text_cleaning/clean_summary.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____ _
|_ _| | |
| | _____ _| |_
| |/ _ \ \/ / __|
| | __/> <| |_
\_/\___/_/\_\\__|
This script takes a folder of .TXT files and extracts a 100 word summary from
each and replaces the original text file with the summary. This is useful if you
have a lot of very long text files that you want to analyze for machine learning
purposes (e.g. books).
This is enabled if the default_text_cleaners=['clean_summary']
'''
import os, uuid, sys
try:
import textrank
except:
os.system('pip3 install git+git://github.com/davidadamojr/TextRank.git')
def clean_summary(textfile):
summary='summary_'+textfile
os.system('textrank extract-summary %s > %s'%(textfile, summary))
summary=open(summary).read()
os.remove(textfile)
return [summary] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/text_cleaning/clean.py | cleaning/text_cleaning/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____ _
|_ _| | |
| | _____ _| |_
| |/ _ \ \/ / __|
| | __/> <| |_
\_/\___/_/\_\\__|
This section of Allie's API cleans folders of text files
using the default_text_cleaners.
Usage: python3 clean.py [folder] [cleantype]
All cleantype options include:
["clean_summary", "clean_textacy"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/cleaning/text_cleaning
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random, uuid
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def text_clean(cleaning_set, textfile, basedir):
# long conditional on all the types of features that can happen and featurizes accordingly.
if cleaning_set == 'clean_summary':
clean_summary.clean_summary(textfile)
elif cleaning_set == 'clean_textacy':
clean_textacy.clean_textacy(textfile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
text_transcribe=settings['transcribe_text']
default_text_transcribers=settings['default_text_transcriber']
try:
# assume 1 type of feature_set
cleaning_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
cleaning_sets=settings['default_text_cleaners']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'clean_summary' in cleaning_sets:
import clean_summary
if 'clean_textacy' in cleaning_sets:
import clean_textacy
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## REMOVE JSON AND DUPLICATES ##
################################################
deleted_files=list()
# rename files appropriately
for i in range(len(listdir)):
os.rename(listdir[i],listdir[i].replace(' ',''))
# remove duplicates / json files
for i in tqdm(range(len(listdir)), desc=labelname):
file=listdir[i]
listdir2=os.listdir()
#now sub-loop through all files in directory and remove duplicates
for j in range(len(listdir2)):
try:
if listdir2[j]==file:
pass
elif listdir2[j]=='.DS_Store':
pass
else:
if filecmp.cmp(file, listdir2[j])==True:
print('removing duplicate: %s ____ %s'%(file,listdir2[j]))
deleted_files.append(listdir2[j])
os.remove(listdir2[j])
else:
pass
except:
pass
print('deleted the files below')
print(deleted_files)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
# remove .JSON files
if listdir[i].endswith('.json'):
os.remove(listdir[i])
# now rename files with UUIDs
listdir=os.listdir()
for i in range(len(listdir)):
file=listdir[i]
os.rename(file, str(uuid.uuid4())+file[-4:])
################################################
## NOW CLEAN!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.txt']:
filename=[listdir[i]]
for j in range(len(cleaning_sets)):
for k in range(len(filename)):
cleaning_set=cleaning_sets[j]
filename=text_clean(cleaning_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/text_cleaning/clean_textacy.py | cleaning/text_cleaning/clean_textacy.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____ _
|_ _| | |
| | _____ _| |_
| |/ _ \ \/ / __|
| | __/> <| |_
\_/\___/_/\_\\__|
This script cleans folders of .TXT files with things like
removing whitespace, normalizing hyphenized words, and many
other things using the textacy module: https://pypi.org/project/textacy/
This is enabled if the default_text_cleaners=['clean_textacy']
'''
import os
import textacy.preprocessing as preprocessing
def clean_textacy(textfile):
text=open(textfile).read()
text=preprocessing.normalize_whitespace(text)
text=preprocessing.normalize.normalize_hyphenated_words(text)
text=preprocessing.normalize.normalize_quotation_marks(text)
text=preprocessing.normalize.normalize_unicode(text)
text=preprocessing.remove.remove_accents(text)
# text=preprocessing.remove.remove_punctuation(text)
text=preprocessing.replace.replace_currency_symbols(text)
text=preprocessing.replace.replace_emails(text)
text=preprocessing.replace.replace_hashtags(text)
# text=preprocessing.replace.replace_numbers(text)
text=preprocessing.replace.replace_phone_numbers(text)
text=preprocessing.replace.replace_urls(text)
text=preprocessing.replace.replace_user_handles(text)
print(text)
# now replace the original doc with cleaned version
newfile='cleaned_'+textfile
textfile2=open(newfile,'w')
textfile2.write(text)
textfile2.close()
os.remove(textfile)
return [newfile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/csv_cleaning/clean_csv.py | cleaning/csv_cleaning/clean_csv.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____ _____ _ _
/ __ \/ ___|| | | |
| / \/\ `--. | | | |
| | `--. \| | | |
| \__/\/\__/ /\ \_/ /
\____/\____/ \___/
This section of Allie's API cleans folders of csv files
using the default_csv_cleaners.
'''
import os
import pandas as pd
try:
import datacleaner
except:
os.system('pip3 install datacleaner==0.1.5')
import datacleaner
def clean_csv(csvfile, basedir):
'''
https://github.com/rhiever/datacleaner
'''
input_dataframe=pd.read_csv(csvfile)
newframe=datacleaner.autoclean(input_dataframe, drop_nans=False, copy=False, ignore_update_check=False)
newfile='clean_'+csvfile
newframe.to_csv(newfile, index=False)
return [newfile]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/csv_cleaning/clean.py | cleaning/csv_cleaning/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____ _____ _ _
/ __ \/ ___|| | | |
| / \/\ `--. | | | |
| | `--. \| | | |
| \__/\/\__/ /\ \_/ /
\____/\____/ \___/
This is Allie's Cleaning API for CSV Files.
Usage: python3 clean.py [folder] [cleantype]
All cleantype options include:
["clean_csv"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/cleaning/csv_cleaning
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def csv_clean(cleaning_set, csvfile, basedir):
# long conditional on all the types of features that can happen and featurizes accordingly.
if cleaning_set == 'clean_csv':
clean_csv.clean_csv(csvfile, basedir)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
csv_transcribe=settings['transcribe_csv']
default_csv_transcribers=settings['default_csv_transcriber']
try:
# assume 1 type of feature_set
cleaning_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
cleaning_sets=settings['default_csv_cleaners']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'clean_csv' in cleaning_sets:
import clean_csv
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## REMOVE JSON AND DUPLICATES ##
################################################
deleted_files=list()
# rename files appropriately
for i in range(len(listdir)):
os.rename(listdir[i],listdir[i].replace(' ',''))
# remove duplicates / json files
for i in tqdm(range(len(listdir)), desc=labelname):
file=listdir[i]
listdir2=os.listdir()
#now sub-loop through all files in directory and remove duplicates
for j in range(len(listdir2)):
try:
if listdir2[j]==file:
pass
elif listdir2[j]=='.DS_Store':
pass
else:
if filecmp.cmp(file, listdir2[j])==True:
print('removing duplicate: %s ____ %s'%(file,listdir2[j]))
deleted_files.append(listdir2[j])
os.remove(listdir2[j])
else:
pass
except:
pass
print('deleted the files below')
print(deleted_files)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
# remove .JSON files
if listdir[i].endswith('.json'):
os.remove(listdir[i])
################################################
## NOW CLEAN!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.csv']:
filename=[listdir[i]]
for j in range(len(cleaning_sets)):
for k in range(len(filename)):
cleaning_set=cleaning_sets[j]
filename=csv_clean(cleaning_set, filename[k], basedir)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_normalizevolume.py | cleaning/audio_cleaning/clean_normalizevolume.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script normalizes the volume using peak normalization and ffmpeg-normalize
for all audio files in a folder. Note this method could be bad in high-noise environment,
as noise will be amplified if the noise has high volume.
This cleaning script is enabled if default_audio_cleaners=['clean_normalizevolume']
'''
import sys, os, ffmpeg_normalize, uuid
def clean_normalizevolume(audiofile):
# using peak normalization method
file=str(uuid.uuid4())+'.wav'
os.system('ffmpeg-normalize %s -nt peak -t 0 -o %s'%(audiofile,file))
os.remove(audiofile)
os.rename(file, audiofile)
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_rename.py | cleaning/audio_cleaning/clean_rename.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script renames all audio files in a given folder with a unique idenfifier (UUID4).
This is useful to avoid naming conflicts with audio files with spaces or audio files that may have the
same name across folders of audio files.
This cleaning script is enabled if default_audio_cleaners=['clean_rename']
'''
import os, uuid
def clean_rename(audiofile):
# replace wavfile with a version that is 16000 Hz mono audio
newfile=str(uuid.uuid4())+audiofile[-4:]
os.rename(audiofile, newfile)
return [newfile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_opus.py | cleaning/audio_cleaning/clean_opus.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script converts a folder of .WAV audio files into .OPUS format,
then converts this .OPUS file back to .WAV, replacing the original file.
.OPUS is a lossy codec and format that narrows in on the human voice range,
so it could filter out other noises that are beyond the human voice range (20 Hz - 20 kHz).
This cleaning script is enabled if default_audio_cleaners=['clean_opus']
'''
import os, shutil
def clean_opus(filename, opusdir):
filenames=list()
#########################
# lossy codec - .opus
#########################
curdir=os.getcwd()
newfile=filename[0:-4]+'.opus'
# copy file to opus encoding folder
shutil.copy(curdir+'/'+filename, opusdir+'/'+filename)
os.chdir(opusdir)
print(os.getcwd())
# encode with opus codec
os.system('opusenc %s %s'%(filename,newfile))
os.remove(filename)
filename=filename[0:-4]+'_opus.wav'
os.system('opusdec %s %s'%(newfile, filename))
os.remove(newfile)
# delete .wav file in original dir
shutil.copy(opusdir+'/'+filename, curdir+'/'+filename)
os.remove(filename)
os.chdir(curdir)
os.remove(newfile[0:-5]+'.wav')
return [filename] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_removenoise.py | cleaning/audio_cleaning/clean_removenoise.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script de-noises all audio files in a given folder using a SoX noise profile.
This is done by taking the first 500 milliseconds and using this as a basis to delete noise
out of the rest of the file. Note that this works well if the noise is linear but not well
if the noise is non-linear.
This cleaning script is enabled if default_audio_cleaners=['clean_removenoise']
'''
import os, uuid
def clean_removenoise(audiofile):
# create a noise reference (assuming linear noise)
# following https://stackoverflow.com/questions/44159621/how-to-denoise-audio-with-sox
# alternatives would be to use bandpass filter or other low/hf filtering techniques
noiseaudio=str(uuid.uuid1())+'_noiseaudio.wav'
noiseprofile=str(uuid.uuid1())+'_noise.prof'
temp=audiofile[0:-4]+'_.wav'
os.system('sox %s %s trim 0 0.050'%(audiofile, noiseaudio))
os.system('sox %s -n noiseprof %s'%(noiseaudio, noiseprofile))
os.system('sox %s %s noisered %s 0.21'%(audiofile, temp, noiseprofile))
os.remove(audiofile)
os.rename(temp,audiofile)
os.remove(noiseaudio)
os.remove(noiseprofile)
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean.py | cleaning/audio_cleaning/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This section of Allie's API cleans folders of audio files
using the default_audio_cleaners.
Usage: python3 clean.py [folder] [cleantype]
All cleantype options include:
["clean_getfirst3secs", "clean_keyword", "clean_mono16hz", "clean_towav",
"clean_multispeaker", "clean_normalizevolume", "clean_opus", "clean_randomsplice",
"clean_removenoise", "clean_removesilence", "clean_rename", "clean_utterances"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/cleaning/audio_cleaning
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random, uuid
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def transcribe(file, default_audio_transcriber, settingsdir):
# create all transcription methods here
print('%s transcribing: %s'%(default_audio_transcriber, file))
# use the audio file as the audio source
r = sr.Recognizer()
transcript_engine = default_audio_transcriber
with sr.AudioFile(file) as source:
audio = r.record(source) # read the entire audio file
if transcript_engine == 'pocketsphinx':
# recognize speech using Sphinx
try:
transcript= r.recognize_sphinx(audio)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'deepspeech_nodict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --audio "%s" >> "%s"'%(deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'deepspeech_dict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'google':
# recognize speech using Google Speech Recognition
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
print(GOOGLE_CLOUD_SPEECH_CREDENTIALS)
try:
transcript=r.recognize_google_cloud(audio, credentials_json=open(GOOGLE_CLOUD_SPEECH_CREDENTIALS).read())
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'wit':
# recognize speech using Wit.ai
WIT_AI_KEY = os.environ['WIT_AI_KEY']
try:
transcript=r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'azure':
# recognize speech using Microsoft Azure Speech
AZURE_SPEECH_KEY = os.environ['AZURE_SPEECH_KEY']
print(AZURE_SPEECH_KEY)
try:
transcript=r.recognize_azure(audio, key=AZURE_SPEECH_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'bing':
# recognize speech using Microsoft Bing Voice Recognition
BING_KEY = os.environ['BING_KEY']
try:
transcript=r.recognize_bing(audio, key=BING_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'houndify':
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = os.environ['HOUNDIFY_CLIENT_ID']
HOUNDIFY_CLIENT_KEY = os.environ['HOUNDIFY_CLIENT_KEY']
try:
transcript=r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID, client_key=HOUNDIFY_CLIENT_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'ibm':
# recognize speech using IBM Speech to Text
IBM_USERNAME = os.environ['IBM_USERNAME']
IBM_PASSWORD = os.environ['IBM_PASSWORD']
try:
transcript=r.recognize_ibm(audio, username=IBM_USERNAME, password=IBM_PASSWORD)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
else:
print('no transcription engine specified')
transcript=''
# show transcript
print(transcript_engine.upper())
print('--> '+ transcript)
return transcript
def audio_clean(cleaning_set, audiofile, basedir):
# long conditional on all the types of features that can happen and featurizes accordingly.
if cleaning_set == 'clean_getfirst3secs':
audiofile=clean_getfirst3secs.clean_getfirst3secs(audiofile)
elif cleaning_set == 'clean_mono16hz':
audiofile=clean_mono16hz.clean_mono16hz(audiofile)
elif cleaning_set == 'clean_towav':
audiofile=clean_towav.clean_towav(audiofile)
elif cleaning_set == 'clean_keyword':
audiofile=clean_keyword.clean_keyword(audiofile,'coconut')
elif cleaning_set == 'clean_multispeaker':
modeldir=basedir+'/helpers'
audiofile=clean_multispeaker.clean_multispeaker(audiofile,modeldir)
elif cleaning_set == 'clean_normalizevolume':
audiofile=clean_normalizevolume.clean_normalizevolume(audiofile)
elif cleaning_set == 'clean_opus':
audiofile=clean_opus.clean_opus(audiofile,basedir+'/helpers/opustools/')
elif cleaning_set == 'clean_randomsplice':
audiofile=clean_randomsplice.clean_randomsplice(audiofile,20)
elif cleaning_set == 'clean_removenoise':
audiofile=clean_removenoise.clean_removenoise(audiofile)
elif cleaning_set == 'clean_removesilence':
audiofile=clean_removesilence.clean_removesilence(audiofile)
elif cleaning_set == 'clean_rename':
audiofile=clean_rename.clean_rename(audiofile)
elif cleaning_set == 'clean_utterances':
audiofile=clean_utterances.clean_utterances(audiofile)
# transcripts = can look for hotwords and remove
return audiofile
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
audio_transcribe=settings['transcribe_audio']
default_audio_transcribers=settings['default_audio_transcriber']
try:
# assume 1 type of feature_set
cleaning_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
cleaning_sets=settings['default_audio_cleaners']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'clean_getfirst3secs' in cleaning_sets:
import clean_getfirst3secs
if 'clean_mono16hz' in cleaning_sets:
import clean_mono16hz
if 'clean_towav' in cleaning_sets:
import clean_towav
if 'clean_keyword' in cleaning_sets:
import clean_keyword
if 'clean_multispeaker' in cleaning_sets:
import clean_multispeaker
if 'clean_opus' in cleaning_sets:
import clean_opus
if 'clean_normalizevolume' in cleaning_sets:
import clean_normalizevolume
if 'clean_randomsplice' in cleaning_sets:
import clean_randomsplice
if 'clean_rename' in cleaning_sets:
import clean_rename
if 'clean_removenoise' in cleaning_sets:
import clean_removenoise
if 'clean_removesilence' in cleaning_sets:
import clean_removesilence
if 'clean_utterances' in cleaning_sets:
import clean_utterances
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## REMOVE JSON AND DUPLICATES ##
################################################
deleted_files=list()
# rename files appropriately
for i in range(len(listdir)):
os.rename(listdir[i],listdir[i].replace(' ',''))
# remove duplicates / json files
for i in tqdm(range(len(listdir)), desc=labelname):
file=listdir[i]
listdir2=os.listdir()
#now sub-loop through all files in directory and remove duplicates
for j in range(len(listdir2)):
try:
if listdir2[j]==file:
pass
elif listdir2[j]=='.DS_Store':
pass
else:
if filecmp.cmp(file, listdir2[j])==True:
print('removing duplicate: %s ____ %s'%(file,listdir2[j]))
deleted_files.append(listdir2[j])
os.remove(listdir2[j])
else:
pass
except:
pass
print('deleted the files below')
print(deleted_files)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
# remove .JSON files
if listdir[i].endswith('.json'):
os.remove(listdir[i])
# now rename files with UUIDs
listdir=os.listdir()
for i in range(len(listdir)):
file=listdir[i]
os.rename(file, str(uuid.uuid4())+file[-4:])
################################################
## NOW CLEAN!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.wav', '.mp3', '.m4a']:
filename=[listdir[i]]
for j in range(len(cleaning_sets)):
for k in range(len(filename)):
cleaning_set=cleaning_sets[j]
filename=audio_clean(cleaning_set, filename[k], basedir)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_utterances.py | cleaning/audio_cleaning/clean_utterances.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This script takes in a folder of audio files and extracts out many
unique utterances from the audio files. Therefore, you get
1.wav --> many audio files with utterances named as UUIDs.
This is useful if you are looking to create a large dataset of voiced utterances.
This is enabled if the default_audio_cleaners=['clean_utterances']
'''
import sys, os, shutil, librosa, uuid
from pyvad import vad, trim, split
import matplotlib.pyplot as plt
import numpy as np
# make sure the right version of numba is installed
os.system('pip3 install numba==0.48')
def clean_utterances(audiofile):
'''
taken from https://github.com/F-Tag/python-vad/blob/master/example.ipynb
'''
show=False
curdir=os.getcwd()
data, fs = librosa.core.load(audiofile)
time = np.linspace(0, len(data)/fs, len(data))
vact = vad(data, fs, fs_vad = 16000, hop_length = 30, vad_mode=3)
vact = list(vact)
while len(time) > len(vact):
vact.append(0.0)
utterances=list()
for i in range(len(vact)):
try:
if vact[i] != vact[i-1]:
# voice shift
if vact[i] == 1:
start = i
else:
# this means it is end
end = i
utterances.append([start,end])
except:
pass
print(utterances)
vact=np.array(vact)
files=list()
for i in range(len(utterances)):
trimmed = data[utterances[i][0]:utterances[i][1]]
tempfile = str(uuid.uuid4())+'.wav'
librosa.output.write_wav(tempfile, trimmed, fs)
files.append(tempfile)
os.remove(audiofile)
return files | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_keyword.py | cleaning/audio_cleaning/clean_keyword.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script breaks up the source audio file into utterances and keeps
utterances with a certain keyword. Therefore, there can be multiple or no
audio files resulting after the cleaning process dependent on whether multiple
keywords were used.
Note that any utterance under 20000 bytes (20KB) is deleted.
This cleaning script is enabled if default_audio_cleaners=['clean_keyword']
'''
import sys, os, shutil, librosa, uuid
from pyvad import vad, trim, split
import matplotlib.pyplot as plt
import numpy as np
# make sure the right version of numba is installed
os.system('pip3 install numba==0.48')
def transcribe_audiofile(file):
curdir=os.getcwd()
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# try:
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
# except:
# try:
# # remove temporary files
# os.remove(textfile)
# except:
# pass
# try:
# os.remove(newaudio)
# except:
# pass
# transcript=''
return transcript
def clean_keyword(audiofile,keyword):
'''
taken from https://github.com/F-Tag/python-vad/blob/master/example.ipynb
'''
show=False
curdir=os.getcwd()
data, fs = librosa.core.load(audiofile)
time = np.linspace(0, len(data)/fs, len(data))
try:
vact = vad(data, fs, fs_vad = 16000, hop_length = 30, vad_mode=3)
vact = list(vact)
while len(time) > len(vact):
vact.append(0.0)
utterances=list()
for i in range(len(vact)):
try:
if vact[i] != vact[i-1]:
# voice shift
if vact[i] == 1:
start = i
else:
# this means it is end
end = i
utterances.append([start,end])
except:
pass
print(utterances)
vact=np.array(vact)
tempfiles=list()
keptfiles=list()
for i in range(len(utterances)):
trimmed = data[utterances[i][0]:utterances[i][1]]
tempfile = str(uuid.uuid4())+'.wav'
librosa.output.write_wav(tempfile, trimmed, fs)
tempfiles.append(tempfile)
for i in range(len(tempfiles)):
if os.path.getsize(tempfiles[i]) > 20000:
pass
transcript=transcribe_audiofile(tempfiles[i])
print('TRANSCRIPT --> %s'%(transcript))
if transcript == keyword:
keptfiles.append(tempfiles[i])
else:
os.remove(tempfiles[i])
else:
os.remove(tempfiles[i])
except:
print('ERROR - ValueError: When data.type is float, data must be -1.0 <= data <= 1.0.')
os.remove(audiofile)
return keptfiles | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_removesilence.py | cleaning/audio_cleaning/clean_removesilence.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script takes in a folder of audio files and removes the silence
from these audi files using a voice activity detector.
This cleaning script is enabled if default_audio_cleaners=['clean_removesilence']
'''
import collections, contextlib, sys, wave, webrtcvad, os, sys, uuid
def convert_mono(audiofile):
# force conversion to mono to remove silence (16000 Hz, mono, 16 bit depth)
tempfile=str(uuid.uuid1())+'.wav'
os.system('sox %s -r 16000 -c 1 -b 16 %s'%(audiofile,tempfile))
os.remove(audiofile)
os.rename(tempfile, audiofile)
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
def clean_removesilence(audiofile):
# convert to mono for the voice activity detector to work.
convert_mono(audiofile)
audio, sample_rate = read_wave(audiofile)
# set aggressiveness here. [1,3]
vad = webrtcvad.Vad(int(1))
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
framelist=list()
for i, segment in enumerate(segments):
path = 'chunk-%002d.wav' % (i,)
print(' Writing %s' % (path,))
write_wave(path, segment, sample_rate)
framelist.append(path)
soxcmd='sox'
for i in range(len(framelist)):
soxcmd=soxcmd+' %s'%(framelist[i])
soxcmd=soxcmd+' %s'%(audiofile)
print(soxcmd)
os.rename(audiofile,audiofile[0:-4]+'_temp.wav')
os.system(soxcmd)
# remove temporary files and leave only cleaned files
os.remove(audiofile[0:-4]+'_temp.wav')
for i in range(len(framelist)):
os.remove(framelist[i])
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_getfirst3secs.py | cleaning/audio_cleaning/clean_getfirst3secs.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script gets the first three seconds of an audio file
and deletes the source audio.
It is enabled if default_audio_cleaners=['clean_getfirst3secs']
'''
import os
import soundfile as sf
def clean_getfirst3secs(audiofile):
data, samplerate = sf.read(audiofile)
os.remove(audiofile)
data2=data[0:samplerate*3]
sf.write(audiofile,data2, samplerate)
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_removesmall.py | cleaning/audio_cleaning/clean_removesmall.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script renames all audio files in a given folder with a unique idenfifier (UUID4).
This is useful to avoid naming conflicts with audio files with spaces or audio files that may have the
same name across folders of audio files.
This cleaning script is enabled if default_audio_cleaners=['clean_rename']
'''
import os, uuid
def clean_removesmall(audiofile):
# replace wavfile with a version that is 16000 Hz mono audio
if os.path.getsize(audiofile) < 100:
os.remove(audiofile)
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_towav.py | cleaning/audio_cleaning/clean_towav.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script converts all audio files in a folder from .MP# files to .WAV files.
This cleaning script is enabled if default_audio_cleaners=['clean_mp3towav']
'''
import os
import soundfile as sf
def clean_towav(audiofile):
if audiofile.endswith('.mp3') or audiofile.endswith('.mp4'):
newfile=audiofile[0:-4]+'.wav'
os.system('ffmpeg -i %s %s'%(audiofile, newfile))
os.remove(audiofile)
return [newfile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_mono16hz.py | cleaning/audio_cleaning/clean_mono16hz.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script converts all audio files in a folder to mono 16000 Hz.
This is useful to keep all audio samples the same format for further processing,
as if you don't have this setting it can break things like transcribers and also
other audio featurization scripts.
This cleaning script is enabled if default_audio_cleaners=['clean_mono16hz'] (this is the default setting)
'''
import os
import soundfile as sf
def clean_mono16hz(audiofile):
# replace wavfile with a version that is 16000 Hz mono audio
if audiofile.find('cleaned') > 0:
return [audiofile]
else:
if audiofile.endswith('.wav'):
os.system('ffmpeg -i "%s" -ar 16000 -ac 1 "%s" -y'%(audiofile,audiofile[0:-4]+'_cleaned.wav'))
os.remove(audiofile)
return [audiofile[0:-4]+'_cleaned.wav']
elif audiofile.endswith('.mp3') or audiofile.endswith('.m4a'):
os.system('ffmpeg -i "%s" -ar 16000 -ac 1 "%s" -y'%(audiofile,audiofile[0:-4]+'_cleaned.wav'))
os.remove(audiofile)
return [audiofile[0:-4]+'.wav'] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_multispeaker.py | cleaning/audio_cleaning/clean_multispeaker.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script converts deletes all audio files in a folder that have multiple
speakers as determined by a deep learning model. Note that this works good on small files
under 10 seconds in length but can be inaccurate for longer length audio files.
This cleaning script is enabled if default_audio_cleaners=['clean_multispeaker']
'''
import numpy as np
import soundfile as sf
import argparse, os, keras, sklearn, librosa, sys
def get_speakernum(filename, model, mean_, scale_):
'''
taken from https://github.com/faroit/CountNet
(research paper - https://arxiv.org/abs/1712.04555).
Note this is the number of concurrent speakers (in parallel),
and can be used to detect ambient noise.
Note also that it may be better to break up speech into 5 second
segments here for better accuracy, as the model is biased for this
particular case.
'''
print(filename)
eps = np.finfo(np.float).eps
# load standardisation parameters
scaler = sklearn.preprocessing.StandardScaler()
scaler.mean_=mean_
scaler.scale_=scale_
# compute audio
audio, rate = sf.read(filename, always_2d=True)
# downmix to mono
audio = np.mean(audio, axis=1)
# compute STFT
X = np.abs(librosa.stft(audio, n_fft=400, hop_length=160)).T
# apply standardization
X = scaler.transform(X)
# cut to input shape length (500 frames x 201 STFT bins)
X = X[:model.input_shape[1], :]
# apply normalization
Theta = np.linalg.norm(X, axis=1) + eps
X /= np.mean(Theta)
# add sample dimension
Xs = X[np.newaxis, ...]
# predict output
ys = model.predict(Xs, verbose=0)
print("Speaker Count Estimate: ", np.argmax(ys, axis=1)[0])
return np.argmax(ys, axis=1)[0]
def get_wavfiles(listdir):
wavfiles=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.wav':
wavfiles.append(listdir[i])
return wavfiles
def clean_multispeaker(audiofile,modeldir):
curdir=os.getcwd()
model = keras.models.load_model(modeldir+'/RNN_keras2.h5')
with np.load(modeldir+'/scaler.npz') as data:
mean_ = data['arr_0']
scale_ = data['arr_1']
try:
speaker_number=get_speakernum(audiofile, model, mean_,scale_)
print(speaker_number)
if speaker_number > 1:
# remove files with more than 1 concurrent speaker
os.remove(audiofile)
return []
else:
return [audiofile]
except:
print('error')
os.remove(audiofile)
return [] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/audio_cleaning/clean_randomsplice.py | cleaning/audio_cleaning/clean_randomsplice.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This cleaning script converts all audio files in a given folder to 20 seconds in length.
Note you can change this to any arbitary length by changing the desiredlength function below.
This cleaning script is enabled if default_audio_cleaners=['clean_randomsplice']
'''
import soundfile as sf
import os, ffmpy, random, getpass
def clean_randomsplice(audiofile, desiredlength):
try:
data, samplerate = sf.read(audiofile)
totalframes=len(data)
totalseconds=int(totalframes/samplerate)
startsec=random.randint(0,totalseconds-(desiredlength+1))
endsec=startsec+desiredlength
startframe=samplerate*startsec
endframe=samplerate*endsec
#write file to resave wave file at those frames
newfile='snipped_'+audiofile
sf.write(newfile, data[int(startframe):int(endframe)], samplerate)
os.remove(audiofile)
return [newfile]
except:
print('error, skipping...')
return [audiofile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/image_cleaning/clean.py | cleaning/image_cleaning/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
This section of Allie's API cleans folders of image files
using the default_image_cleaners.
Usage: python3 clean.py [folder] [cleantype]
All cleantype options include:
["clean_extractfaces", "clean_greyscale", "clean_jpg2png"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/cleaning/image_cleaning
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random, uuid
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def image_clean(cleaning_set, imagefile, basedir):
# long conditional on all the types of features that can happen and featurizes accordingly.
if cleaning_set == 'clean_greyscale':
clean_greyscale.clean_greyscale(imagefile)
elif cleaning_set == 'clean_extractfaces':
clean_extractfaces.clean_extractfaces(imagefile, basedir)
elif cleaning_set == 'clean_jpg2png':
clean_jpg2png.clean_jpg2png(imagefile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
image_transcribe=settings['transcribe_image']
default_image_transcribers=settings['default_image_transcriber']
try:
# assume 1 type of feature_set
cleaning_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
cleaning_sets=settings['default_image_cleaners']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'clean_greyscale' in cleaning_sets:
import clean_greyscale
if 'clean_extractfaces' in cleaning_sets:
import clean_extractfaces
if 'clean_jpg2png' in cleaning_sets:
import clean_jpg2png
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## REMOVE JSON AND DUPLICATES ##
################################################
deleted_files=list()
# rename files appropriately
for i in range(len(listdir)):
os.rename(listdir[i],listdir[i].replace(' ',''))
# remove duplicates / json files
for i in tqdm(range(len(listdir)), desc=labelname):
file=listdir[i]
listdir2=os.listdir()
#now sub-loop through all files in directory and remove duplicates
for j in range(len(listdir2)):
try:
if listdir2[j]==file:
pass
elif listdir2[j]=='.DS_Store':
pass
else:
if filecmp.cmp(file, listdir2[j])==True:
print('removing duplicate: %s ____ %s'%(file,listdir2[j]))
deleted_files.append(listdir2[j])
os.remove(listdir2[j])
else:
pass
except:
pass
print('deleted the files below')
print(deleted_files)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
# remove .JSON files
if listdir[i].endswith('.json'):
os.remove(listdir[i])
# now rename files with UUIDs
listdir=os.listdir()
for i in range(len(listdir)):
file=listdir[i]
os.rename(file, str(uuid.uuid4())+file[-4:])
################################################
## NOW CLEAN!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.jpg', '.png']:
filename=[listdir[i]]
for j in range(len(cleaning_sets)):
for k in range(len(filename)):
cleaning_set=cleaning_sets[j]
filename=image_clean(cleaning_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/image_cleaning/clean_greyscale.py | cleaning/image_cleaning/clean_greyscale.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
This script takes in a folder of images and makes them all greyscale images.
Doing this often increases machine learning accuracy by only focusing on
relevant features and not color distortions in images.
This is enabled if default_image_cleaners=['clean_greyscale']
'''
import numpy as np
from PIL import Image
import os, cv2
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def clean_greyscale(imagefile):
img = cv2.imread(imagefile)
os.remove(imagefile)
gray = rgb2gray(img)
cv2.imwrite(imagefile, gray)
return [imagefile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/image_cleaning/clean_extractfaces.py | cleaning/image_cleaning/clean_extractfaces.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
This script takes in a folder of images and extracts out the faces for these images
if they are in there and deletes the original image. This is useful if you are looking
to do a lot of facial machine learning work.
This is enabled if default_image_cleaners=['clean_extractfaces']
'''
# you only use these modules if you register, so put them here
import cv2, os, time, shutil, math
import skvideo.io, skvideo.motion, skvideo.measure
from moviepy.editor import VideoFileClip
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
def euclidean_distance(a, b):
x1 = a[0]; y1 = a[1]
x2 = b[0]; y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def detectFace(img,face_detector,eye_detector,nose_detector):
faces = face_detector.detectMultiScale(img, 1.3, 5)
#print("found faces: ", len(faces))
if len(faces) > 0:
face = faces[0]
face_x, face_y, face_w, face_h = face
img = img[int(face_y):int(face_y+face_h), int(face_x):int(face_x+face_w)]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
else:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
#raise ValueError("No face found in the passed image ")
def alignFace(img_path, face_detector, eye_detector, nose_detector):
img = cv2.imread(img_path)
plt.imshow(img[:, :, ::-1])
plt.show()
img_raw = img.copy()
img, gray_img = detectFace(img,face_detector,eye_detector,nose_detector)
eyes = eye_detector.detectMultiScale(gray_img)
#print("found eyes: ",len(eyes))
if len(eyes) >= 2:
#find the largest 2 eye
base_eyes = eyes[:, 2]
#print(base_eyes)
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
#--------------------
#decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
#--------------------
#center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
#center_of_eyes = (int((left_eye_x+right_eye_x)/2), int((left_eye_y+right_eye_y)/2))
cv2.circle(img, left_eye_center, 2, (255, 0, 0) , 2)
cv2.circle(img, right_eye_center, 2, (255, 0, 0) , 2)
#cv2.circle(img, center_of_eyes, 2, (255, 0, 0) , 2)
#----------------------
#find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
print("rotate to clock direction")
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
print("rotate to inverse clock direction")
#----------------------
cv2.circle(img, point_3rd, 2, (255, 0, 0) , 2)
cv2.line(img,right_eye_center, left_eye_center,(67,67,67),1)
cv2.line(img,left_eye_center, point_3rd,(67,67,67),1)
cv2.line(img,right_eye_center, point_3rd,(67,67,67),1)
a = euclidean_distance(left_eye_center, point_3rd)
b = euclidean_distance(right_eye_center, point_3rd)
c = euclidean_distance(right_eye_center, left_eye_center)
#print("left eye: ", left_eye_center)
#print("right eye: ", right_eye_center)
#print("additional point: ", point_3rd)
#print("triangle lengths: ",a, b, c)
cos_a = (b*b + c*c - a*a)/(2*b*c)
#print("cos(a) = ", cos_a)
angle = np.arccos(cos_a)
#print("angle: ", angle," in radian")
angle = (angle * 180) / math.pi
print("angle: ", angle," in degree")
if direction == -1:
angle = 90 - angle
print("angle: ", angle," in degree")
#--------------------
#rotate image
new_img = Image.fromarray(img_raw)
new_img = np.array(new_img.rotate(direction * angle))
else:
#find the largest 2 ey
new_img = img_raw
return new_img
def capture_video(filename, timesplit):
video=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(video.get(3))
frame_height = int(video.get(4))
out = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
a=0
start=time.time()
while True:
a=a+1
check, frame=video.read()
#print(check)
#print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
out.write(frame)
#cv2.imshow("frame",gray)
end=time.time()
if end-start>timesplit:
break
#print(end-start)
print(a)
video.release()
out.release()
cv2.destroyAllWindows()
return filename
def clean_extractfaces(filename,basedir):
# paths
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
# other stuff
face_detector_path = path+"/data/haarcascade_frontalface_default.xml"
eye_detector_path = path+"/data/haarcascade_eye.xml"
nose_detector_path = path+"/data/haarcascade_mcs_nose.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",detector_path," violated.")
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
nose_detector = cv2.CascadeClassifier(nose_detector_path)
# load image file
image_file = filename
alignedFace = alignFace(image_file, face_detector, eye_detector, nose_detector)
gray = cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.3, 5)
increment=0
facenums=0
print(len(faces))
filenames=list()
if len(faces) == 0:
pass
else:
for (x,y,w,h) in faces:
img=alignedFace
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
newimg=img[y:y+h,x:x+w]
new_image_file=image_file[0:-4] + '_face_' + str(increment) + '.png'
newimg=cv2.resize(newimg, (100, 100), interpolation=cv2.INTER_LINEAR)
norm_img = np.zeros((100, 100))
norm_img = cv2.normalize(newimg, norm_img, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite(new_image_file, newimg)
filenames.append(new_image_file)
facenums=facenums+1
os.remove(filename)
return filenames | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/image_cleaning/clean_jpg2png.py | cleaning/image_cleaning/clean_jpg2png.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_____
|_ _|
| | _ __ ___ __ _ __ _ ___
| || '_ ` _ \ / _` |/ _` |/ _ \
_| || | | | | | (_| | (_| | __/
\___/_| |_| |_|\__,_|\__, |\___|
__/ |
|___/
This script takes in a folder of images and simply converts all .jpg files to .pngs.
This is enabled if default_image_cleaners=['clean_jpg2png']
'''
from PIL import Image
import os
def clean_jpg2png(imagefile):
if imagefile.endswith('.jpg'):
im1 = Image.open(imagefile)
newfile=imagefile[0:-4]+'.png'
im1.save(newfile)
os.remove(imagefile)
return [newfile] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/image_cleaning/helpers/tesseract_features.py | cleaning/image_cleaning/helpers/tesseract_features.py | import os, sys
from PIL import Image
import pytesseract
def prev_dir(directory):
g=directory.split('/')
# print(g)
lastdir=g[len(g)-1]
i1=directory.find(lastdir)
directory=directory[0:i1]
return directory
directory=os.getcwd()
prev_dir=prev_dir(directory)
sys.path.append(prev_dir+'/text_features')
import nltk_features as nf
os.chdir(directory)
def transcribe_image(imgfile):
transcript=pytesseract.image_to_string(Image.open(imgfile))
return transcript
def tesseract_featurize(imgfile):
# can stitch across an entire length of video frames too
transcript=transcribe_image(imgfile)
features, labels = nf.nltk_featurize(transcript)
return transcript, features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/video_cleaning/clean_alignfaces.py | cleaning/video_cleaning/clean_alignfaces.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
_____ _ _ ___ ______ _____
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_ _ _ _
| | | (_) | |
| | | |_ __| | ___ ___
| | | | |/ _` |/ _ \/ _ \
\ \_/ / | (_| | __/ (_) |
\___/|_|\__,_|\___|\___/
This script takes a folder of .mp4 video files and converts
them to a folder of .png files full of faces (centered and aligned).
The original video files are also placed in these folders.
This is enabled if default_video_cleaners=['clean_alignfaces']
'''
# you only use these modules if you register, so put them here
import cv2, os, time, shutil, math
import skvideo.io, skvideo.motion, skvideo.measure
from moviepy.editor import VideoFileClip
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from PIL import Image
def clean_alignfaces(videofile, basedir):
def euclidean_distance(a, b):
x1 = a[0]; y1 = a[1]
x2 = b[0]; y2 = b[1]
return math.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
def detectFace(img):
faces = face_detector.detectMultiScale(img, 1.3, 5)
#print("found faces: ", len(faces))
if len(faces) > 0:
face = faces[0]
face_x, face_y, face_w, face_h = face
img = img[int(face_y):int(face_y+face_h), int(face_x):int(face_x+face_w)]
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
else:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, img_gray
#raise ValueError("No face found in the passed image ")
def alignFace(img_path):
img = cv2.imread(img_path)
# plt.imshow(img[:, :, ::-1])
# plt.show()
img_raw = img.copy()
img, gray_img = detectFace(img)
eyes = eye_detector.detectMultiScale(gray_img)
#print("found eyes: ",len(eyes))
if len(eyes) >= 2:
#find the largest 2 eye
base_eyes = eyes[:, 2]
#print(base_eyes)
items = []
for i in range(0, len(base_eyes)):
item = (base_eyes[i], i)
items.append(item)
df = pd.DataFrame(items, columns = ["length", "idx"]).sort_values(by=['length'], ascending=False)
eyes = eyes[df.idx.values[0:2]]
#--------------------
#decide left and right eye
eye_1 = eyes[0]; eye_2 = eyes[1]
if eye_1[0] < eye_2[0]:
left_eye = eye_1
right_eye = eye_2
else:
left_eye = eye_2
right_eye = eye_1
#--------------------
#center of eyes
left_eye_center = (int(left_eye[0] + (left_eye[2] / 2)), int(left_eye[1] + (left_eye[3] / 2)))
left_eye_x = left_eye_center[0]; left_eye_y = left_eye_center[1]
right_eye_center = (int(right_eye[0] + (right_eye[2]/2)), int(right_eye[1] + (right_eye[3]/2)))
right_eye_x = right_eye_center[0]; right_eye_y = right_eye_center[1]
#center_of_eyes = (int((left_eye_x+right_eye_x)/2), int((left_eye_y+right_eye_y)/2))
cv2.circle(img, left_eye_center, 2, (255, 0, 0) , 2)
cv2.circle(img, right_eye_center, 2, (255, 0, 0) , 2)
#cv2.circle(img, center_of_eyes, 2, (255, 0, 0) , 2)
#----------------------
#find rotation direction
if left_eye_y > right_eye_y:
point_3rd = (right_eye_x, left_eye_y)
direction = -1 #rotate same direction to clock
print("rotate to clock direction")
else:
point_3rd = (left_eye_x, right_eye_y)
direction = 1 #rotate inverse direction of clock
print("rotate to inverse clock direction")
#----------------------
cv2.circle(img, point_3rd, 2, (255, 0, 0) , 2)
cv2.line(img,right_eye_center, left_eye_center,(67,67,67),1)
cv2.line(img,left_eye_center, point_3rd,(67,67,67),1)
cv2.line(img,right_eye_center, point_3rd,(67,67,67),1)
a = euclidean_distance(left_eye_center, point_3rd)
b = euclidean_distance(right_eye_center, point_3rd)
c = euclidean_distance(right_eye_center, left_eye_center)
#print("left eye: ", left_eye_center)
#print("right eye: ", right_eye_center)
#print("additional point: ", point_3rd)
#print("triangle lengths: ",a, b, c)
cos_a = (b*b + c*c - a*a)/(2*b*c)
#print("cos(a) = ", cos_a)
angle = np.arccos(cos_a)
#print("angle: ", angle," in radian")
angle = (angle * 180) / math.pi
print("angle: ", angle," in degree")
if direction == -1:
angle = 90 - angle
print("angle: ", angle," in degree")
#--------------------
#rotate image
new_img = Image.fromarray(img_raw)
new_img = np.array(new_img.rotate(direction * angle))
else:
#find the largest 2 ey
new_img = img_raw
return new_img
def capture_video(filename, timesplit):
video=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(video.get(3))
frame_height = int(video.get(4))
out = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
a=0
start=time.time()
while True:
a=a+1
check, frame=video.read()
#print(check)
#print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
out.write(frame)
#cv2.imshow("frame",gray)
end=time.time()
if end-start>timesplit:
break
#print(end-start)
print(a)
video.release()
out.release()
cv2.destroyAllWindows()
return filename
def cut_faces(modeldir,filename):
# import data later
hostdir=os.getcwd()
# capture_video(filename, 5)
face_cascade = cv2.CascadeClassifier(os.getcwd()+'/helpers/haarcascade_frontalface_default.xml')
foldername=filename[0:-4]+'_faces'
try:
os.mkdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
shutil.copy(hostdir+'/'+filename, hostdir+'/'+foldername+'/'+filename)
os.chdir(foldername)
videodata=skvideo.io.vread(filename)
frames, rows, cols, channels = videodata.shape
metadata=skvideo.io.ffprobe(filename)
frame=videodata[0]
r,c,ch=frame.shape
for i in range(0,len(videodata),25):
#row, col, channels
skvideo.io.vwrite("output"+str(i)+".png", videodata[i])
listdir=os.listdir()
facenums=0
for i in range(len(listdir)):
if listdir[i][-4:]=='.png':
try:
image_file = listdir[i]
alignedFace = alignFace(image_file)
gray = cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
increment=0
print(len(faces))
files=list()
if len(faces) == 0:
pass
else:
for (x,y,w,h) in faces:
img=alignedFace
os.chdir(hostdir+'/'+foldername)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
newimg=img[y:y+h,x:x+w]
new_image_file=image_file[0:-4] + '_face_' + str(increment) + '.png'
newimg=cv2.resize(newimg, (100, 100), interpolation=cv2.INTER_LINEAR)
norm_img = np.zeros((100, 100))
norm_img = cv2.normalize(newimg, norm_img, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite(new_image_file, newimg)
files.append(new_image_file)
facenums=facenums+1
except:
print('error')
os.chdir(hostdir+'/'+foldername)
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
if listdir[i][-4:]=='.png':
if listdir[i].find('face') < 0:
os.remove(listdir[i])
return facenums, files
# paths
opencv_home = cv2.__file__
folders = opencv_home.split(os.path.sep)[0:-1]
path = folders[0]
for folder in folders[1:]:
path = path + "/" + folder
# other stuff
face_detector_path = basedir+"/helpers/haarcascade_frontalface_default.xml"
eye_detector_path = basedir+"/helpers/haarcascade_eye.xml"
nose_detector_path = basedir+"/helpers/haarcascade_mcs_nose.xml"
if os.path.isfile(face_detector_path) != True:
raise ValueError("Confirm that opencv is installed on your environment! Expected path ",detector_path," violated.")
face_detector = cv2.CascadeClassifier(face_detector_path)
eye_detector = cv2.CascadeClassifier(eye_detector_path)
nose_detector = cv2.CascadeClassifier(nose_detector_path)
facenums, files=cut_faces(os.getcwd(), videofile)
return [filename] | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/video_cleaning/clean.py | cleaning/video_cleaning/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
_____ _ _ ___ ______ _____
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_ _ _ _
| | | (_) | |
| | | |_ __| | ___ ___
| | | | |/ _` |/ _ \/ _ \
\ \_/ / | (_| | __/ (_) |
\___/|_|\__,_|\___|\___/
This section of Allie's API cleans folders of video files
using the default_video_cleaners.
Usage: python3 clean.py [folder] [cleantype]
All cleantype options include:
["clean_alignfaces", "clean_videostabilize"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/cleaning/video_cleaning
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random, uuid
import numpy as np
# import helpers.transcribe as ts
# import speech_recognition as sr
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
################################################
## Helper functions ##
################################################
def video_clean(cleaning_set, videofile, basedir):
# long conditional on all the types of features that can happen and featurizes accordingly.
if cleaning_set == 'clean_alignfaces':
clean_alignfaces.clean_alignfaces(videofile, basedir)
elif cleaning_set == 'clean_videostabilize':
clean_videostabilize.clean_videostabilize(videofile)
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
video_transcribe=settings['transcribe_video']
default_video_transcribers=settings['default_video_transcriber']
try:
# assume 1 type of feature_set
cleaning_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
cleaning_sets=settings['default_video_cleaners']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'clean_alignfaces' in cleaning_sets:
import clean_alignfaces
if 'clean_videostabilize' in cleaning_sets:
import clean_videostabilize
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## REMOVE JSON AND DUPLICATES ##
################################################
deleted_files=list()
# rename files appropriately
for i in range(len(listdir)):
os.rename(listdir[i],listdir[i].replace(' ',''))
# remove duplicates / json files
for i in tqdm(range(len(listdir)), desc=labelname):
file=listdir[i]
listdir2=os.listdir()
#now sub-loop through all files in directory and remove duplicates
for j in range(len(listdir2)):
try:
if listdir2[j]==file:
pass
elif listdir2[j]=='.DS_Store':
pass
else:
if filecmp.cmp(file, listdir2[j])==True:
print('removing duplicate: %s ____ %s'%(file,listdir2[j]))
deleted_files.append(listdir2[j])
os.remove(listdir2[j])
else:
pass
except:
pass
print('deleted the files below')
print(deleted_files)
listdir=os.listdir()
for i in tqdm(range(len(listdir))):
# remove .JSON files
if listdir[i].endswith('.json'):
os.remove(listdir[i])
# now rename files with UUIDs
listdir=os.listdir()
for i in range(len(listdir)):
file=listdir[i]
os.rename(file, str(uuid.uuid4())+file[-4:])
################################################
## NOW CLEAN!! ##
################################################
listdir=os.listdir()
random.shuffle(listdir)
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.mp4']:
filename=[listdir[i]]
for j in range(len(cleaning_sets)):
for k in range(len(filename)):
cleaning_set=cleaning_sets[j]
filename=video_clean(cleaning_set, filename[k], basedir) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/cleaning/video_cleaning/clean_videostabilize.py | cleaning/video_cleaning/clean_videostabilize.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
_____ _ _ ___ ______ _____
/ __ \ | (_) / _ \ | ___ \_ _| _
| / \/ | ___ __ _ _ __ _ _ __ __ _ / /_\ \| |_/ / | | (_)
| | | |/ _ \/ _` | '_ \| | '_ \ / _` | | _ || __/ | |
| \__/\ | __/ (_| | | | | | | | | (_| | | | | || | _| |_ _
\____/_|\___|\__,_|_| |_|_|_| |_|\__, | \_| |_/\_| \___/ (_)
__/ |
|___/
_ _ _ _
| | | (_) | |
| | | |_ __| | ___ ___
| | | | |/ _` |/ _ \/ _ \
\ \_/ / | (_| | __/ (_) |
\___/|_|\__,_|\___|\___/
This script takes a folder of videos (as .mp4s) and converts them
to stabilized .mp4 videos. This is useful if the videos are wobbly
for focusing on video features.
This is enabled if default_video_features = ['clean_videostabilize']
'''
# import required libraries
try:
from vidgear.gears import VideoGear
except:
import os
os.system('pip3 install vidgear==0.1.8')
import numpy as np
import cv2
def clean_videostabilize(videofile):
# open any valid video stream with stabilization enabled(`stabilize = True`)
stream_stab = VideoGear(videofile, stabilize = True).start()
# open same stream without stabilization for comparison
stream_org = VideoGear(source = "test.mp4").start()
# loop over
while True:
# read stabilized frames
frame_stab = stream_stab.read()
# check for stabilized frame if Nonetype
if frame_stab is None:
break
# read un-stabilized frame
frame_org = stream_org.read()
# concatenate both frames
output_frame = np.concatenate((frame_org, frame_stab), axis=1)
# put text over concatenated frame
cv2.putText(
output_frame, "Before", (10, output_frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.6, (0, 255, 0), 2,
)
cv2.putText(
output_frame, "After", (output_frame.shape[1] // 2 + 10, output_frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.6, (0, 255, 0), 2,
)
# Show output window
cv2.imshow("Stabilized Frame", output_frame)
# check for 'q' key if pressed
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# close output window
cv2.destroyAllWindows()
# safely close both video streams
stream_org.stop()
stream_stab.stop()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_neuraxle.py | training/train_neuraxle.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using neuraxle: https://github.com/Neuraxio/Neuraxle
This is enabled if the default_training_script = ['neuraxle']
Note that only regression is supported with this trainer.
'''
import time
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
import pickle, json, os, shutil
import numpy as np
from sklearn.cluster import KMeans
from sklearn.datasets import load_boston
from sklearn.decomposition import PCA, FastICA
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
# install library
os.system('pip3 install neuraxle==0.4.0')
from neuraxle.pipeline import Pipeline
from neuraxle.steps.numpy import NumpyShapePrinter
from neuraxle.steps.sklearn import RidgeModelStacking
from neuraxle.union import AddFeatures
from neuraxle.checkpoints import DefaultCheckpoint
from neuraxle.hyperparams.distributions import RandInt
from neuraxle.hyperparams.space import HyperparameterSpace
from neuraxle.metaopt.auto_ml import RandomSearchHyperparameterSelectionStrategy
from neuraxle.metaopt.callbacks import MetricCallback, ScoringCallback
from neuraxle.pipeline import ResumablePipeline, DEFAULT_CACHE_FOLDER, Pipeline
from neuraxle.steps.flow import ExpandDim
from neuraxle.steps.loop import ForEachDataInput
from neuraxle.steps.misc import Sleep
from neuraxle.steps.numpy import MultiplyByN
from neuraxle.steps.numpy import NumpyShapePrinter
from neuraxle.union import AddFeatures
def train_neuraxle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# get train and test data
model_name=common_name_model+'.pickle'
files=list()
if mtype in ['classification', 'c']:
print('neuraxle currently does not support classsification...')
elif mtype in ['regression', 'r']:
p = Pipeline([
NumpyShapePrinter(),
AddFeatures([
PCA(n_components=2),
FastICA(n_components=2),
]),
NumpyShapePrinter(),
RidgeModelStacking([
GradientBoostingRegressor(),
GradientBoostingRegressor(n_estimators=500),
GradientBoostingRegressor(max_depth=5),
KMeans(),
]),
NumpyShapePrinter(),
])
# Fitting and evaluating the pipeline.
# X_train data shape: (batch, different_lengths, n_feature_columns)
# y_train data shape: (batch, different_lengths)
pipeline = p.fit(X_train, y_train)
# export pickle file
print('saving model - %s'%(model_name))
f=open(model_name,'wb')
pickle.dump(pipeline, f)
f.close()
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_gama.py | training/train_gama.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using gama: https://github.com/PGijsbers/gama
This is enabled if the default_training_script = ['gama']
'''
import os, sys, shutil, pickle, json
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, accuracy_score, mean_squared_error
# install library
print('installing library')
os.system('pip3 install gama==20.1.0')
from gama import GamaClassifier, GamaRegressor
def train_gama(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
model_name=common_name_model+'.pickle'
files=list()
if mtype in ['c']:
automl = GamaClassifier(max_total_time=180, keep_analysis_log=None)
print("Starting GAMA `fit` - usually takes around 3 minutes but can take longer for large datasets")
automl.fit(X_train, y_train)
label_predictions = automl.predict(X_test)
probability_predictions = automl.predict_proba(X_test)
accuracy=accuracy_score(y_test, label_predictions)
log_loss_pred=log_loss(y_test, probability_predictions)
log_loss_score=automl.score(X_test, y_test)
print('accuracy:', accuracy)
print('log loss pred:', log_loss_pred)
print('log_loss_score', log_loss_score)
elif mtype in ['regression', 'r']:
automl = GamaRegressor(max_total_time=180, keep_analysis_log=None, n_jobs=1)
print("Starting GAMA `fit` - usually takes around 3 minutes but can take longer for large datasets")
automl.fit(X_train, y_train)
predictions = automl.predict(X_test)
mse_error=mean_squared_error(y_test, predictions)
print("MSE:", mse_error)
# SAVE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(automl, modelfile)
modelfile.close()
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_autogluon.py | training/train_autogluon.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autogluon: https://github.com/awslabs/autogluon
This is enabled if the default_training_script = ['autogluon']
'''
import os
# install dependencies
os.system('pip3 install autogluon==0.0.6')
os.system('pip3 install pillow==7.0.0')
os.system('pip3 install numpy==1.18.4')
from autogluon import TabularPrediction as task
import pandas as pd
import os, sys, pickle, json, random, shutil, time
import numpy as np
def convert_gluon(X_train, y_train):
feature_list=list()
for i in range(len(X_train[0])):
feature_list.append('feature_'+str(i))
feature_list.append('class')
data=dict()
for i in range(len(X_train)):
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class']=y_train
data=pd.DataFrame(data, columns = list(data))
data=task.Dataset(data)
return data
def train_autogluon(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# get train and test data
train_data = convert_gluon(X_train, y_train)
test_data = convert_gluon(X_test, y_test)
predictor = task.fit(train_data=train_data, label='class')
# get summary
results = predictor.fit_summary(verbosity=3)
# get model name
files=list()
model_name=common_name_model+'.pickle'
# pickle store classifier
f=open(model_name,'wb')
pickle.dump(predictor, f)
f.close()
# now rename current directory with models (keep this info in a folder)
files.append(model_name)
files.append('AutogluonModels')
files.append('catboost_info')
files.append('dask-worker-space')
# get model_name
model_dir=os.getcwd()
return model_name, model_dir, files, test_data
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_scsr.py | training/train_scsr.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using SCSR: https://github.com/jim-schwoebel/voicebook/blob/master/chapter_4_modeling/train_audioregression.py
This is enabled if the default_training_script = ['scsr']
'''
import os
os.system('pip3 install scikit-learn==0.22.2.post1')
os.system('pip3 install xslxwriter==1.2.8')
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.pipeline import Pipeline
from sklearn import linear_model
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Perceptron
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn import preprocessing
from sklearn import svm
from sklearn import metrics
from textblob import TextBlob
from operator import itemgetter
import json, pickle, datetime, time, shutil, xlsxwriter
import numpy as np
from beautifultable import BeautifulTable
import warnings
# ignore a lot of the warnings.
warnings.filterwarnings("ignore")
# INITIAL FUNCTIONS
#############################################################
def update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores):
try:
explained_variances.append(metrics.explained_variance_score(y_test,predictions))
except:
explained_variances.append('n/a')
try:
mean_absolute_errors.append(metrics.mean_absolute_error(y_test,predictions))
except:
mean_absolute_errors.append('n/a')
try:
median_absolute_errors.append(metrics.median_absolute_error(y_test,predictions))
except:
median_absolute_errors.append('n/a')
try:
r2_scores.append(metrics.r2_score(y_test,predictions))
except:
r2_scores.append('n/a')
return explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores
def train_sr(X_train,X_test,y_train,y_test,common_name_model,problemtype,classes,default_features,transform_model,modeldir,settings):
# metrics
modeltypes=list()
explained_variances=list()
mean_absolute_errors=list()
mean_squared_errors=list()
median_absolute_errors=list()
r2_scores=list()
print(modeldir)
os.chdir(modeldir)
# make a temp folder to dump files into
foldername=''
foldername=common_name_model+'_temp'
tempdir=os.getcwd()+'/'+foldername
try:
os.mkdir(foldername)
os.chdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
os.chdir(foldername)
# metrics.explained_variance_score(y_true, y_pred) Explained variance regression score function
# metrics.mean_absolute_error(y_true, y_pred) Mean absolute error regression loss
# metrics.mean_squared_error(y_true, y_pred[, …]) Mean squared error regression loss
# metrics.mean_squared_log_error(y_true, y_pred) Mean squared logarithmic error regression loss
# metrics.median_absolute_error(y_true, y_pred) Median absolute error regression loss
# metrics.r2_score(y_true, y_pred[, …]) R^2 (coefficient of determination) regression score function.
##################################################
## linear regression ##
##################################################
'''
LinearRegression fits a linear model with coefficients w = (w_1, ..., w_p)
to minimize the residual sum of squares between the observed responses
in the dataset, and the responses predicted by the linear approximation.
Example:
http://scikit-learn.org/stable/modules/linear_model.html
'''
try:
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
#ols.predict(X_test, y_test)
predictions = cross_val_predict(ols, X_test, y_test, cv=6)
f=open('ols.pickle','wb')
pickle.dump(ols,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('linear regression')
except:
print('error - ORDINARY LEAST SQUARES')
##################################################
## Ridge regression ##
##################################################
'''
Ridge regression addresses some of the problems of
Ordinary Least Squares by imposing a penalty on the
size of coefficients.
The ridge coefficients minimize a penalized residual sum of squares.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html#sklearn.linear_model.Ridge
'''
try:
ridge = linear_model.Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X_train, y_train)
predictions = cross_val_predict(ridge, X_test, y_test, cv=6)
f=open('ridge.pickle','wb')
pickle.dump(ridge,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('ridge regression')
except:
print('error - RIDGE REGRESSION')
##################################################
## LASSO ##
##################################################
'''
The Lasso is a linear model that estimates sparse coefficients.
It is useful in some contexts due to its tendency to prefer solutions
with fewer parameter values, effectively reducing the number of
variables upon which the given solution is dependent.
For this reason, the Lasso and its variants are fundamental
to the field of compressed sensing. Under certain conditions,
it can recover the exact set of non-zero weights
(see Compressive sensing: tomography reconstruction with L1 prior (Lasso)).
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_model_selection.html#sphx-glr-auto-examples-linear-model-plot-lasso-model-selection-py
'''
try:
lasso = linear_model.Lasso(alpha = 0.1)
lasso.fit(X_train, y_train)
predictions = cross_val_predict(lasso, X_test, y_test, cv=6)
f=open('lasso.pickle','wb')
pickle.dump(lasso,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('LASSO')
except:
print('error - LASSO')
##################################################
## Multi-task LASSO ##
##################################################
'''
The MultiTaskLasso is a linear model that estimates
sparse coefficients for multiple regression problems
jointly: y is a 2D array, of shape (n_samples, n_tasks).
The constraint is that the selected features are the same
for all the regression problems, also called tasks.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_multi_task_lasso_support.html#sphx-glr-auto-examples-linear-model-plot-multi-task-lasso-support-py
'''
# # ONLY WORKS ON y_train that is multidimensional (one hot encoded)
# # Generate some 2D coefficients with sine waves with random frequency and phase
# mlasso = linear_model.MultiTaskLasso(alpha=0.1)
# mlasso.fit(X_train, y_train)
# predictions = cross_val_predict(mlasso, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Elastic net ##
##################################################
'''
ElasticNet is a linear regression model trained with L1 and L2 prior as regularizer.
This combination allows for learning a sparse model where few of the weights are non-zero
like Lasso, while still maintaining the regularization properties of Ridge.
We control the convex combination of L1 and L2 using the l1_ratio parameter.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_lasso_and_elasticnet.html#sphx-glr-auto-examples-linear-model-plot-lasso-and-elasticnet-py
'''
# need training data
try:
enet = linear_model.ElasticNet()
enet.fit(X_train, y_train)
predictions = cross_val_predict(enet, X_test, y_test, cv=6)
f=open('enet.pickle','wb')
pickle.dump(enet,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(ytest, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('elastic net')
except:
print('error - ELASTIC NET')
##################################################
## Multi-task elastic net ##
##################################################
'''
The MultiTaskElasticNet is an elastic-net model that estimates sparse coefficients
for multiple regression problems jointly: Y is a 2D array, of shape (n_samples, n_tasks).
The constraint is that the selected features are the same for all the regression problems,
also called tasks.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.MultiTaskElasticNet.html
'''
# # # ONLY WORKS ON y_train that is multidimensional (one hot encoded)
# clf = linear_model.MultiTaskElasticNet()
# clf.fit(X_train, y_train)
# #print(clf.coef_)
# #print(clf.intercept_)
##################################################
## Least angle regression (LARS) ##
##################################################
'''
The advantages of LARS are:
-> It is numerically efficient in contexts where p >> n (i.e., when the number of dimensions is significantly greater than the number of points)
-> It is computationally just as fast as forward selection and has the same order of complexity as an ordinary least squares.
-> It produces a full piecewise linear solution path, which is useful in cross-validation or similar attempts to tune the model.
-> If two variables are almost equally correlated with the response, then their coefficients should increase at approximately the same rate. The algorithm thus behaves as intuition would expect, and also is more stable.
-> It is easily modified to produce solutions for other estimators, like the Lasso.
The disadvantages of the LARS method include:
-> Because LARS is based upon an iterative refitting of the residuals,
-> it would appear to be especially sensitive to the effects of noise.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Lars.html
'''
try:
lars = linear_model.Lars(n_nonzero_coefs=1)
lars.fit(X_train, y_train)
predictions = cross_val_predict(lars, X_test, y_test, cv=6)
f=open('lars.pickle','wb')
pickle.dump(lars,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('Least angle regression (LARS)')
except:
print('error - LARS')
##################################################
## LARS LASSO ##
##################################################
'''
LassoLars is a lasso model implemented using the LARS algorithm,
and unlike the implementation based on coordinate_descent,
this yields the exact solution, which is piecewise linear
as a function of the norm of its coefficients.
Example:
http://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
'''
try:
lars_lasso = linear_model.LassoLars()
lars_lasso.fit(X_train, y_train)
predictions = cross_val_predict(lars_lasso, X_test, y_test, cv=6)
f=open('lars_lasso.pickle','wb')
pickle.dump(lars_lasso,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('LARS lasso')
except:
print('error - LARS LASSO')
##################################################
## Orthogonal Matching Pursuit (OMP) ##
##################################################
'''
OrthogonalMatchingPursuit and orthogonal_mp implements the OMP
algorithm for approximating the fit of a linear model with
constraints imposed on the number of non-zero coefficients (ie. the L 0 pseudo-norm).
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_omp.html#sphx-glr-auto-examples-linear-model-plot-omp-py
'''
try:
omp = linear_model.OrthogonalMatchingPursuit()
omp.fit(X_train, y_train)
predictions = cross_val_predict(omp, X_test, y_test, cv=6)
f=open('omp.pickle','wb')
pickle.dump(omp,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('orthogonal matching pursuit (OMP)')
except:
print('error - ORTHOGONAL MATCHING PURSUIT (OMP)')
##################################################
## Bayesian ridge regression ##
##################################################
'''
The advantages of Bayesian Regression are:
-> It adapts to the data at hand.
-> It can be used to include regularization parameters in the estimation procedure.
The disadvantages of Bayesian regression include:
-> Inference of the model can be time consuming.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html
'''
# MULTI-DIMENSIONAL
# clf = BayesianRidge()
# clf.fit(X_train, y_train)
# predictions = cross_val_predict(clf, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Automatic relevance determination ##
##################################################
'''
ARDRegression is very similar to Bayesian Ridge Regression,
but can lead to sparser weights w [1] [2]. ARDRegression poses
a different prior over w, by dropping the assumption of
the Gaussian being spherical.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_ard.html#sphx-glr-auto-examples-linear-model-plot-ard-py
'''
# MULTI-DIMENSIONAL
# clf = ARDRegression(compute_score=True)
# clf.fit(X_train, y_train)
# predictions = cross_val_predict(clf, X_test, y_test, cv=6)
# accuracy = metrics.r2_score(y_test, predictions)
##################################################
## Logistic regression ##
##################################################
'''
Logistic regression, despite its name, is a linear model
for classification rather than regression. Logistic regression
is also known in the literature as logit regression,
maximum-entropy classification (MaxEnt) or the log-linear classifier.
In this model, the probabilities describing the possible outcomes
of a single trial are modeled using a logistic function.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_logistic_l1_l2_sparsity.html#sphx-glr-auto-examples-linear-model-plot-logistic-l1-l2-sparsity-py
'''
try:
lr = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
lr.fit(X_train, y_train)
predictions = cross_val_predict(lr, X_test, y_test, cv=6)
f=open('lr.pickle','wb')
pickle.dump(lr,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('logistic regression')
except:
print('error - LOGISTIC REGRESSION')
##################################################
## Stochastic gradient descent (SGD) ##
##################################################
'''
Stochastic gradient descent is a simple yet very efficient
approach to fit linear models. It is particularly useful
when the number of samples (and the number of features) is very large.
The partial_fit method allows only/out-of-core learning.
The classes SGDClassifier and SGDRegressor provide functionality
to fit linear models for classification and regression using
different (convex) loss functions and different penalties.
E.g., with loss="log", SGDClassifier fits a logistic regression model,
while with loss="hinge" it fits a linear support vector machine (SVM).
Example:
http://scikit-learn.org/stable/modules/sgd.html#sgd
'''
try:
# note you have to scale the data, as SGD algorithms are sensitive to
# feature scaling
scaler = StandardScaler()
scaler.fit(X_train)
X_train_2 = scaler.transform(X_train)
X_test_2 = scaler.transform(X_test)
sgd = linear_model.SGDRegressor()
sgd.fit(X_train_2, y_train)
predictions = cross_val_predict(sgd, X_test_2, y_test, cv=6)
f=open('sgd.pickle','wb')
pickle.dump(sgd,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('stochastic gradient descent (SGD)')
except:
print('error - STOCHASTIC GRADIENT DESCENT')
##################################################
## Perceptron algorithms ##
##################################################
'''
Multi-layer Perceptron is sensitive to feature scaling,
so it is highly recommended to scale your data.
For example, scale each attribute on the input vector X to [0, 1] or [-1, +1],
or standardize it to have mean 0 and variance 1.
Note that you must apply the same scaling to the test
set for meaningful results. You can use StandardScaler for standardization.
change the solver to 'lbfgs'. The default'adam' is a SGD-like method,
hich is effective for large & messy data but pretty useless for this kind of smooth & small data.
Example:
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.PassiveAggressiveRegressor.html#sklearn.linear_model.PassiveAggressiveRegressor
'''
try:
nn = MLPRegressor(solver='lbfgs')
nn.fit(X_train, y_train)
predictions = cross_val_predict(nn, X_test, y_test, cv=6)
f=open('nn.pickle','wb')
pickle.dump(nn,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('perceptron')
except:
print('error - MLP REGRESSOR')
##################################################
## Passive-agressive algorithms ##
##################################################
'''
The passive-aggressive algorithms are a family of algorithms
for large-scale learning. They are similar to the Perceptron
in that they do not require a learning rate. However,
contrary to the Perceptron, they include a regularization parameter C.
Example:
http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf
'''
try:
pa_regr = linear_model.PassiveAggressiveRegressor(random_state=0)
pa_regr.fit(X_train, y_train)
predictions = cross_val_predict(pa_regr, X_test, y_test, cv=6)
f=open('pa_regr.pickle','wb')
pickle.dump(pa_regr,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('passive-agressive algorithm')
except:
print('error - PASSIVE-AGGRESSIVE')
##################################################
## RANSAC ##
##################################################
'''
When in doubt, use RANSAC
RANSAC (RANdom SAmple Consensus) fits a model from random subsets of
inliers from the complete data set.
RANSAC is a non-deterministic algorithm producing only a reasonable
result with a certain probability, which is dependent on the number
of iterations (see max_trials parameter). It is typically used for
linear and non-linear regression problems and is especially popular
in the fields of photogrammetric computer vision.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_ransac.html#sphx-glr-auto-examples-linear-model-plot-ransac-py
'''
try:
ransac = linear_model.RANSACRegressor()
ransac.fit(X_train, y_train)
predictions = cross_val_predict(ransac, X_test, y_test, cv=6)
f=open('ransac.pickle','wb')
pickle.dump(ransac,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('RANSAC')
except:
print('error - RANSAC')
##################################################
## Theil-SEN ##
##################################################
'''
The TheilSenRegressor estimator uses a generalization of the median
in multiple dimensions. It is thus robust to multivariate outliers.
Note however that the robustness of the estimator decreases quickly
with the dimensionality of the problem. It looses its robustness
properties and becomes no better than an ordinary least squares
in high dimension.
Note takes a bit longer to train.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_theilsen.html#sphx-glr-auto-examples-linear-model-plot-theilsen-py
'''
try:
theilsen=linear_model.TheilSenRegressor(random_state=42)
theilsen.fit(X_train, y_train)
predictions = cross_val_predict(theilsen, X_test, y_test, cv=6)
f=open('theilsen.pickle','wb')
pickle.dump(theilsen,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('Theil-Sen')
except:
print('error - THEILSEN')
##################################################
## Huber Regression ##
##################################################
'''
The HuberRegressor is different to Ridge because it applies a linear loss
to samples that are classified as outliers. A sample is classified as an
inlier if the absolute error of that sample is lesser than a certain threshold.
It differs from TheilSenRegressor and RANSACRegressor because it does not
ignore the effect of the outliers but gives a lesser weight to them.
Example:
http://scikit-learn.org/stable/auto_examples/linear_model/plot_huber_vs_ridge.html#sphx-glr-auto-examples-linear-model-plot-huber-vs-ridge-py
'''
try:
huber = linear_model.HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100)
huber.fit(X_train, y_train)
predictions = cross_val_predict(huber, X_test, y_test, cv=6)
f=open('huber.pickle','wb')
pickle.dump(huber,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('huber regression')
except:
print('error - HUBER')
##################################################
## Polynomial Regression ##
##################################################
'''
One common pattern within machine learning is to use linear models trained on
nonlinear functions of the data. This approach maintains the generally fast
performance of linear methods, while allowing them to fit a much wider range of data.
Example:
http://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
'''
try:
poly_lr = Pipeline([
('poly', PolynomialFeatures(degree=5, include_bias=False)),
('linreg', LinearRegression(normalize=True))
])
poly_lr.fit(X_train, y_train)
predictions = cross_val_predict(poly_lr, X_test, y_test, cv=6)
accuracy = metrics.r2_score(y_test, predictions)
f=open('poly_lr.pickle','wb')
pickle.dump(poly_lr,f)
f.close()
# get stats
explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores = update_list(y_test, predictions, explained_variances, mean_absolute_errors, mean_squared_errors, median_absolute_errors, r2_scores)
modeltypes.append('polynomial (linear regression)')
except:
print('error - POLYNOMIAL')
##################################################
## Write session to .JSON ##
##################################################
os.chdir(modeldir)
print('\n\n')
print('RESULTS: \n')
# print table in terminal
table = BeautifulTable()
table.column_headers = ["model type", "R^2 score", "Mean Absolute Errors"]
print(len(modeltypes))
print(len(r2_scores))
print(len(mean_absolute_errors))
for i in range(len(modeltypes)):
table.append_row([modeltypes[i], str(r2_scores[i]), str(mean_absolute_errors[i])])
print(table)
filename=common_name_model+'.xlsx'
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet()
worksheet.write('A1', 'Model type')
worksheet.write('B1', 'R^2 score')
worksheet.write('C1', 'Explained Variances')
worksheet.write('D1', 'Mean Absolute Errors')
worksheet.write('E1', 'Mean Squared Log Errors')
worksheet.write('F1', 'Median Absolute Errors')
#worksheet.write('G1', 'Mean Squared Errors')
# print the best model in terms of mean abolute error
varnames=['ols.pickle', 'ridge.pickle', 'lasso.pickle', 'enet.pickle', 'lars.pickle',
'lars_lasso.pickle','omp.pickle', 'lr.pickle','sgd.pickle', 'nn.pickle','pa_regr.pickle',
'ransac.pickle', 'theilsen.pickle', 'huber.pickle', 'poly_lr.pickle']
# make sure all numbers, make mae 10 (a large number, to eliminate it from the list of options)
mae=mean_absolute_errors
for i in range(len(mae)):
if mae[i] == 'n/a':
mae[i]=10
else:
mae[i]=float(mae[i])
# get minimim index and now delete temp folder, put master file in models directory
minval=np.amin(mae)
ind=mae.index(minval)
print('%s has the lowest mean absolute error (%s)'%(modeltypes[ind], str(minval)))
# rename file
os.chdir(tempdir)
newname= common_name_model+'.pickle'
print('saving file to disk (%s)...'%(newname))
os.rename(varnames[ind], newname)
# move to models directory
shutil.copy(os.getcwd()+'/'+newname, modeldir+'/'+newname)
# now delete temp folder
os.chdir(modeldir)
shutil.rmtree(foldername)
# output spreadsheet of results and open up for analyis
for i in range(len(modeltypes)):
try:
worksheet.write('A'+str(i+2), str(modeltypes[i]))
worksheet.write('B'+str(i+2), str(r2_scores[i]))
worksheet.write('C'+str(i+2), str(explained_variances[i]))
worksheet.write('D'+str(i+2), str(mean_absolute_errors[i]))
worksheet.write('F'+str(i+2), str(median_absolute_errors[i]))
#worksheet.write('G'+str(i+2), str(mean_squared_errors[i]))
except:
pass
workbook.close()
files=list()
files.append(common_name_model+'.xlsx')
files.append(common_name_model+'.pickle')
model_name=common_name_model+'.pickle'
model_dir=os.getcwd()
return model_name, model_dir, files
def train_sc(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_features,transform_model,settings,min_num):
# create common_name
selectedfeature=str(default_features) + ' (%s)'%(problemtype)
modelname=common_name_model
training_data='train labels'+'\n\n'+str(y_train)+'\n\n'+'test labels'+'\n\n'+str(y_test)+'\n\n'
filename=modelname
start=time.time()
c1=0
c5=0
try:
#decision tree
classifier2 = DecisionTreeClassifier(random_state=0)
classifier2.fit(X_train,y_train)
# cross val score taken from documentation (95% interval) - https://scikit-learn.org/stable/modules/cross_validation.html
scores = cross_val_score(classifier2, X_test, y_test,cv=5)
print('Decision tree accuracy (+/-) %s'%(str(scores.std()*2)))
c2=scores.mean()
c2s=scores.std()*2
print(c2)
except:
c2=0
c2s=0
try:
classifier3 = GaussianNB()
classifier3.fit(X_train,y_train)
scores = cross_val_score(classifier3, X_test, y_test,cv=5)
print('Gaussian NB accuracy (+/-) %s'%(str(scores.std()*2)))
c3=scores.mean()
c3s=scores.std()*2
print(c3)
except:
c3=0
c3s=0
try:
#svc
classifier4 = SVC()
classifier4.fit(X_train,y_train)
scores=cross_val_score(classifier4, X_test, y_test,cv=5)
print('SKlearn classifier accuracy (+/-) %s'%(str(scores.std()*2)))
c4=scores.mean()
c4s=scores.std()*2
print(c4)
except:
c4=0
c4s=0
try:
#adaboost
classifier6 = AdaBoostClassifier(n_estimators=100)
classifier6.fit(X_train,y_train)
scores = cross_val_score(classifier6, X_test, y_test,cv=5)
print('Adaboost classifier accuracy (+/-) %s'%(str(scores.std()*2)))
c6=scores.mean()
c6s=scores.std()*2
print(c6)
except:
c6=0
c6s=0
try:
#gradient boosting
classifier7=GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
classifier7.fit(X_train,y_train)
scores = cross_val_score(classifier7, X_test, y_test,cv=5)
print('Gradient boosting accuracy (+/-) %s'%(str(scores.std()*2)))
c7=scores.mean()
c7s=scores.std()*2
print(c7)
except:
c7=0
c7s=0
try:
#logistic regression
classifier8=LogisticRegression(random_state=1)
classifier8.fit(X_train,y_train)
scores = cross_val_score(classifier8, X_test, y_test,cv=5)
print('Logistic regression accuracy (+/-) %s'%(str(scores.std()*2)))
c8=scores.mean()
c8s=scores.std()*2
print(c8)
except:
c8=0
c8s=0
try:
#voting
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_atm.py | training/train_atm.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using ATM: https://github.com/HDI-Project/ATM
This is enabled if the default_training_script = ['atm']
'''
import pandas as pd
import os, sys, pickle, json, random, shutil, time
os.system('pip3 install atm==0.2.2')
os.system('pip3 install pandas==0.24.2')
import numpy as np
from atm import ATM
def convert_(X_train, y_train):
feature_list=list()
for i in range(len(X_train[0])):
feature_list.append('feature_'+str(i))
feature_list.append('class')
data=dict()
for i in range(len(X_train)):
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class']=y_train
data=pd.DataFrame(data, columns = list(data))
return data
def train_atm(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
csvname=common_name_model.split('_')[0]
files=list()
# initialize and train classifier
atm = ATM()
# create a temporary directory for all models
curdir=os.getcwd()
try:
os.mkdir('atm_temp')
os.chdir('atm_temp')
except:
shutil.rmtree('atm_temp')
os.mkdir('atm_temp')
os.chdir('atm_temp')
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# train models
results = atm.run(train_path='train.csv', class_column='class_')
data_results_=str(results.describe())
bestclassifier=str(results.get_best_classifier())
scores=str(results.get_scores())
# export classifier / transfer to model directory
results.export_best_classifier(model_name, force=True)
shutil.move(os.getcwd()+'/'+model_name, curdir+'/'+model_name)
files.append('atm_temp')
files.append(model_name)
files.append('atm.db')
os.chdir(curdir)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/model.py | training/model.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
This is Allie's modeling API to help build classification or regression models.
All you need to do is run the model.py script and you will be guided through the
modeling process.
Usage: python3 model.py
Alternative CLI Usage: python3 model.py audio 2 c gender males females
- audio = audio file type
- 2 = 2 classes
- c = classification (r for regression)
- gender = common name of model
- male = first class
- female = second class [via N number of classes]
For addditional documentation, check out
https://github.com/jim-schwoebel/allie/tree/master/training
'''
###############################################################
## IMPORT STATEMENTS ##
###############################################################
import os, sys, pickle, json, random, shutil, time, itertools, uuid, datetime, uuid, psutil, json, platform
from pyfiglet import Figlet
f=Figlet(font='doh')
print(f.renderText('Allie'))
f=Figlet(font='doom')
import pandas as pd
import matplotlib.pyplot as plt
###############################################################
## CREATE HELPER FUNCTIONS ##
###############################################################
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_folders(listdir):
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') < 0:
folders.append(listdir[i])
return folders
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
counts={'audio': filetypes.count('audio'),
'image': filetypes.count('image'),
'text': filetypes.count('text'),
'video': filetypes.count('video'),
'csv': filetypes.count('csv')}
# get back the type of folder (main file type)
countlist=list(counts)
countvalues=list(counts.values())
maxvalue=max(countvalues)
maxind=countvalues.index(maxvalue)
return countlist[maxind]
def pull_element(mylist, element):
pull_=list()
for i in range(len(mylist)):
pull_.append(mylist[i][element])
return pull_
def convert_csv(X_train, y_train, labels, mtype, classes):
'''
Take in a array of features and labels and output a
pandas DataFrame format for easy .CSV expor and for model training.
This is important to make sure all machine learning training sessions
use the same dataset (so they can be benchmarked appropriately).
'''
# from pandas merging guide https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html
feature_list=labels
data=list()
for i in tqdm(range(len(X_train)), desc='converting csv...'):
newlist=list()
for j in range(len(X_train[i])):
newlist.append([X_train[i][j]])
temp=pd.DataFrame(dict(zip(feature_list,newlist)), index=[i])
# print(temp)
data.append(temp)
data = pd.concat(data)
if mtype == 'c':
data['class_']=y_train
elif mtype == 'r':
if len(classes) == 1:
data[classes[0]]=y_train
else:
for j in range(len(classes)):
newy=pull_element(y_train, j)
data[classes[j]]=newy
data=pd.DataFrame(data, columns = list(data))
# print this because in pretty much every case you will write the .CSV file afterwards
print('writing csv file...')
return data
def device_info():
cpu_data={'memory':psutil.virtual_memory(),
'cpu percent':psutil.cpu_percent(),
'cpu times':psutil.cpu_times(),
'cpu count':psutil.cpu_count(),
'cpu stats':psutil.cpu_stats(),
'cpu swap':psutil.swap_memory(),
'partitions':psutil.disk_partitions(),
'disk usage':psutil.disk_usage('/'),
'disk io counters':psutil.disk_io_counters(),
'battery':psutil.sensors_battery(),
'boot time':psutil.boot_time(),
}
data={'time':datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
'timezone':time.tzname,
'operating system': platform.system(),
'os release':platform.release(),
'os version':platform.version(),
'cpu data':cpu_data,
'space left': list(psutil.disk_usage('/'))[2]/1000000000}
return data
def get_metrics(clf, problemtype, mtype, default_training_script, common_name, X_test, y_test, classes, modelname, settings, model_session, transformer_name, created_csv_files, test_data, model_start_time):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
metrics_=dict()
y_true=y_test
if default_training_script not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(X_test)
elif default_training_script=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif default_training_script == 'autogluon':
from autogluon import TabularPrediction as task
test_data=test_data.drop(labels=['class'],axis=1)
y_pred=clf.predict(test_data)
elif default_training_script == 'autokeras':
y_pred=clf.predict(X_test)
new_y_pred=[]
for i in range(len(y_pred)):
maxval=max(y_pred[i])
index_=list(y_pred[i]).index(maxval)
new_y_pred.append(index_)
y_pred=new_y_pred
elif default_training_script == 'autopytorch':
y_pred=clf.predict(X_test).flatten()
elif default_training_script == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif default_training_script == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif default_training_script == 'devol':
X_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(X_test).flatten()
elif default_training_script=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(X_test).flatten()
elif mtype == 'r':
y_pred=clf.predict(X_test).flatten()
elif default_training_script=='neuraxle':
y_pred=clf.transform(X_test)
elif default_training_script=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
# get classification or regression metrics
if mtype in ['c', 'classification']:
# now get all classification metrics
mtype='classification'
try:
metrics_['accuracy']=metrics.accuracy_score(y_true, y_pred)
except:
metrics_['accuracy']='n/a'
try:
metrics_['balanced_accuracy']=metrics.balanced_accuracy_score(y_true, y_pred)
except:
metrics_['balanced_accuracy']='n/a'
try:
metrics_['precision']=metrics.precision_score(y_true, y_pred)
except:
metrics_['precision']='n/a'
try:
metrics_['recall']=metrics.recall_score(y_true, y_pred)
except:
metrics_['recall']='n/a'
try:
metrics_['f1_score']=metrics.f1_score (y_true, y_pred, pos_label=1)
except:
metrics_['f1_score']='n/a'
try:
metrics_['f1_micro']=metrics.f1_score(y_true, y_pred, average='micro')
except:
metrics_['f1_micro']='n/a'
try:
metrics_['f1_macro']=metrics.f1_score(y_true, y_pred, average='macro')
except:
metrics_['f1_macro']='n/a'
try:
metrics_['roc_auc']=metrics.roc_auc_score(y_true, y_pred)
except:
metrics_['roc_auc']='n/a'
try:
metrics_['roc_auc_micro']=metrics.roc_auc_score(y_true, y_pred, average='micro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['roc_auc_macro']=metrics.roc_auc_score(y_true, y_pred, average='macro')
except:
metrics_['roc_auc_micro']='n/a'
try:
metrics_['confusion_matrix']=metrics.confusion_matrix(y_true, y_pred).tolist()
except:
metrics_['confusion_matrix']='n/a'
try:
metrics_['classification_report']=metrics.classification_report(y_true, y_pred, target_names=classes)
except:
metrics_['classification_report']='n/a'
try:
plot_confusion_matrix(np.array(metrics_['confusion_matrix']), classes)
except:
print('error potting confusion matrix')
try:
# predict_proba only works for or log loss and modified Huber loss.
# https://stackoverflow.com/questions/47788981/sgdclassifier-with-predict-proba
try:
y_probas = clf.predict_proba(X_test)[:, 1]
except:
try:
y_probas = clf.decision_function(X_test)[:, 1]
except:
print('error making y_probas')
plot_roc_curve(y_test, [y_probas], [default_training_script])
except:
print('error plotting ROC curve')
print('predict_proba only works for or log loss and modified Huber loss.')
elif mtype in ['r', 'regression']:
# now get all regression metrics
mtype='regression'
metrics_['mean_absolute_error'] = metrics.mean_absolute_error(y_true, y_pred)
metrics_['mean_squared_error'] = metrics.mean_squared_error(y_true, y_pred)
metrics_['median_absolute_error'] = metrics.median_absolute_error(y_true, y_pred)
metrics_['r2_score'] = metrics.r2_score(y_true, y_pred)
plot_regressor(clf, classes, X_test, y_test)
data={'sample type': problemtype,
'training time': time.time()-model_start_time,
'created date': str(datetime.datetime.now()),
'device info': device_info(),
'session id': model_session,
'classes': classes,
'problem type': mtype,
'model name': modelname,
'model type': default_training_script,
'metrics': metrics_,
'settings': settings,
'transformer name': transformer_name,
'training data': created_csv_files,
'sample X_test': X_test[0].tolist(),
'sample y_test': y_test[0].tolist()}
if modelname.endswith('.pickle'):
jsonfilename=modelname[0:-7]+'.json'
elif modelname.endswith('.h5'):
jsonfilename=modelname[0:-3]+'.json'
else:
jsonfilename=modelname+'.json'
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# also output requirements.txt for reproducibilty purposes
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
os.system('pip3 freeze -> requirements.txt')
# FUTURE - add in optional copy of cleaning, augmentation, and feature libraries contextually
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/features', basedir+'/features')
# except:
# print('error copying features')
# try:
# shutil.copytree(prev_dir(prev_dir(basedir))+'/cleaning', basedir+'/cleaning')
# except:
# print('error copying cleaning techniques')
# shutil.copytree(prev_dir(prev_dir(basedir))+'/augmentation', basedir+'/augmentation')
# except:
# print('error copying augmentation techniques')
os.chdir(curdir)
def plot_roc_curve(y_test, probs, clf_names):
'''
This function plots an ROC curve with the appropriate
list of classifiers.
'''
cycol = itertools.cycle('bgrcmyk')
for i in range(len(probs)):
print(y_test)
print(probs[i])
try:
fper, tper, thresholds = roc_curve(y_test, probs[i])
plt.plot(fper, tper, color=next(cycol), label=clf_names[i]+' = %s'%(str(round(metrics.auc(fper, tper), 3))))
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
except:
print('passing %s'%(clf_names[i]))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.tight_layout()
plt.savefig('roc_curve.png')
plt.close()
def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues, figsize=(10, 8), threshold=0.5):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("\nNormalized confusion matrix")
else:
print('\nConfusion matrix, without normalization')
plt.figure(figsize=figsize)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, ha='right')
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() * threshold
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if cm[i, j] > 0.001: # You can adjust this threshold as needed
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('confusion_matrix.png')
plt.close()
def plot_regressor(regressor, classes, X_test, y_test):
'''
plot regression models with a bar chart.
'''
try:
y_pred = regressor.predict(X_test)
# plot the first 25 records
if len(classes) == 2:
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
# plot a straight line on the data
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.tight_layout()
plt.savefig('straight_line_predictions.png')
plt.close()
else:
# multi-dimensional generalization
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df1 = df.head(25)
df1.plot(kind='bar',figsize=(10,8))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.tight_layout()
plt.savefig('bar_graph_predictions.png')
plt.close()
except:
print('error plotting regressor')
def pursue_modeling(mtype, model_dir, problemtype, default_training_script,common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + '_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def get_csvfiles(listdir):
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
return csvfiles
###############################################################
## LOADING SETTINGS ##
###############################################################
# load the default feature set
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
sys.path.append(prevdir+'/train_dir')
settings=json.load(open(prevdir+'/settings.json'))
# get all the default feature arrays
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
create_csv=settings['create_csv']
# prepare training and testing data (should have been already featurized) - # of classes/folders
os.chdir(prevdir+'/train_dir')
data_dir=os.getcwd()
listdir=os.listdir()
folders=get_folders(listdir)
csvfiles=get_csvfiles(listdir)
# now assess folders by content type
data=dict()
for i in range(len(folders)):
os.chdir(folders[i])
listdir=os.listdir()
filetype=classifyfolder(listdir)
data[folders[i]]=filetype
os.chdir(data_dir)
###############################################################
## INITIALIZE CLASSES ##
###############################################################
# get all information from sys.argv, and if not,
# go through asking user for the proper parameters
try:
problemtype=sys.argv[1]
mtype=sys.argv[3]
if mtype == 'c':
classnum=sys.argv[2]
common_name=sys.argv[4]
classes=list()
for i in range(int(classnum)):
classes.append(sys.argv[i+5])
else:
classnum=1
problemtype='csv'
mtype=sys.argv[1]
csvfile=sys.argv[2]
classes=[sys.argv[3]]
common_name=csvfile[0:-4]
except:
# now ask user what type of problem they are trying to solve
mtype=input('is this a classification (c) or regression (r) problem? \n')
while mtype not in ['c','r']:
print('input not recognized...')
mtype=input('is this a classification (c) or regression (r) problem? \n')
if mtype == 'c':
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype='csv'
if problemtype != 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
count=0
availableclasses=list()
for i in range(len(folders)):
if data[folders[i]]==problemtype:
availableclasses.append(folders[i])
count=count+1
classnum=input('how many classes would you like to model? (%s available) \n'%(str(count)))
print('these are the available classes: ')
print(availableclasses)
# get all if all (good for many classes)
classes=list()
if classnum=='all':
for i in range(len(availableclasses)):
classes.append(availableclasses[i])
else:
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
elif problemtype == 'csv':
print('\n OK cool, we got you modeling %s files \n'%(problemtype))
print('csv file options are: %s \n'%(csvfiles))
csvfile=input('which csvfile would you like to use for classification? \n')
g=pd.read_csv(csvfile)
columns=list(g)
print('potential targets include: %s'%(columns))
target=input('what target would you like to use? \n')
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
elif mtype =='r':
# for regression problems we need a target column to predict / classes from a .CSV
problemtype='csv'
# assumes the .CSV file is in the train dir
os.chdir(prevdir+'/train_dir')
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
while csvfile not in csvfiles:
print('answer not recognized...')
csvfile=input('what is the name of the spreadsheet (in ./train_dir) used for prediction? \n\n available: %s\n\n'%(str(csvfiles)))
# the available classes are only the numeric columns from the spreadsheet
data = pd.read_csv(csvfile)
columns = list(data)
availableclasses=list()
for i in range(len(columns)):
# look at filetype extension in each column
coldata=data[columns[i]]
sampletypes=list()
for j in range(len(coldata)):
try:
values=float(coldata[j])
sampletypes.append('numerical')
except:
if coldata[j].endswith('.wav'):
sampletypes.append('audio')
elif coldata[j].endswith('.txt'):
sampletypes.append('text')
elif coldata[j].endswith('.png'):
sampletypes.append('image')
elif coldata[j].endswith('.mp4'):
sampletypes.append('video')
else:
sampletypes.append('other')
coltype=most_common(sampletypes)
# correct the other category if needed
if coltype == 'other':
# if coltype.endswith('.csv'):
# coltype='csv'
if len(set(list(coldata))) < 10:
coltype='categorical'
else:
# if less than 5 unique answers then we can interpret this as text input
coltype='typedtext'
if coltype == 'numerical':
availableclasses.append(columns[i])
if len(availableclasses) > 0:
classnum=input('how many classes would you like to model? (%s available) \n'%(str(len(availableclasses))))
print('these are the available classes: %s'%(str(availableclasses)))
classes=list()
stillavailable=list()
for i in range(int(classnum)):
class_=input('what is class #%s \n'%(str(i+1)))
while class_ not in availableclasses and class_ not in '' or class_ in classes:
print('\n')
print('------------------ERROR------------------')
print('the input class does not exist (for %s files).'%(problemtype))
print('these are the available classes: ')
if len(stillavailable)==0:
print(availableclasses)
else:
print(stillavailable)
print('------------------------------------')
class_=input('what is class #%s \n'%(str(i+1)))
for j in range(len(availableclasses)):
stillavailable=list()
if availableclasses[j] not in classes:
stillavailable.append(availableclasses[j])
if class_ == '':
class_=stillavailable[0]
classes.append(class_)
else:
print('no classes available... ending session')
sys.exit()
common_name=input('what is the 1-word common name for the problem you are working on? (e.g. gender for male/female classification) \n')
###############################################################
## UPGRADE MODULES / LOAD MODULES ##
###############################################################
print('-----------------------------------')
print(' LOADING MODULES ')
print('-----------------------------------')
# upgrade to have the proper scikit-learn version later
os.chdir(cur_dir)
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
from sklearn import metrics
from sklearn.metrics import roc_curve
###############################################################
## CLEAN THE DATA ##
###############################################################
clean_data=settings['clean_data']
clean_dir=prevdir+'/cleaning'
if clean_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('CLEANING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# clean audio via default_audio_cleaners
os.chdir(clean_dir+'/audio_cleaning')
elif problemtype == 'text':
# clean text via default_text_cleaners
os.chdir(clean_dir+'/text_cleaning')
elif problemtype == 'image':
# clean images via default_image_cleaners
os.chdir(clean_dir+'/image_cleaning')
elif problemtype == 'video':
# clean video via default_video_cleaners
os.chdir(clean_dir+'/video_cleaning')
elif problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
elif clean_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# clean .CSV via default_csv_cleaners
os.chdir(clean_dir+'/csv_cleaning')
os.system('python3 clean.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## AUGMENT THE DATA ##
###############################################################
augment_data=settings['augment_data']
augment_dir=prevdir+'/augmentation'
if augment_data == True and mtype == 'c':
# only pursue augmentation strategies on directories of files and classification problems
print('-----------------------------------')
print(f.renderText('AUGMENTING DATA'))
print('-----------------------------------')
for i in range(len(classes)):
if problemtype == 'audio':
# augment audio via default_audio_augmenters
os.chdir(augment_dir+'/audio_augmentation')
elif problemtype == 'text':
# augment text via default_text_augmenters
os.chdir(augment_dir+'/text_augmentation')
elif problemtype == 'image':
# augment images via default_image_augmenters
os.chdir(augment_dir+'/image_augmentation')
elif problemtype == 'video':
# augment video via default_video_augmenters
os.chdir(augment_dir+'/video_augmentation')
elif problemtype == 'csv':
# augment .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
elif augment_data == True and mtype == 'r':
for i in range(len(classes)):
if problemtype == 'csv':
# featurize .CSV via default_csv_augmenters
os.chdir(augment_dir+'/csv_augmentation')
os.system('python3 augment.py "%s"'%(data_dir+'/'+classes[i]))
###############################################################
## FEATURIZE FILES ##
###############################################################
# now featurize each class (in proper folder)
if mtype == 'c':
data={}
print('-----------------------------------')
print(f.renderText('FEATURIZING DATA'))
print('-----------------------------------')
if problemtype == 'csv':
# csv features should have already been defined
# need to separate into number of unique classes
csv_labels=g[target]
csv_features=g.drop([target], axis=1)
csv_feature_labels=list(csv_features)
classes=list(set(list(csv_labels)))
for i in range(len(classes)):
class_type = classes[i]
feature_list=list()
label_list=list()
for i in range(len(csv_features)):
if csv_labels[i] == class_type:
feature_list.append(list(csv_features.iloc[i,:]))
label_list.append(csv_feature_labels)
data[class_type]=feature_list
else:
#
for i in range(len(classes)):
class_type=classes[i]
if problemtype == 'audio':
# featurize audio
os.chdir(prevdir+'/features/audio_features')
default_features=default_audio_features
elif problemtype == 'text':
# featurize text
os.chdir(prevdir+'/features/text_features')
default_features=default_text_features
elif problemtype == 'image':
# featurize images
os.chdir(prevdir+'/features/image_features')
default_features=default_image_features
elif problemtype == 'video':
# featurize video
os.chdir(prevdir+'/features/video_features')
default_features=default_video_features
print('-----------------------------------')
print(' FEATURIZING %s'%(classes[i].upper()))
print('-----------------------------------')
os.system('python3 featurize.py "%s"'%(data_dir+'/'+classes[i]))
os.chdir(data_dir+'/'+classes[i])
# load audio features
listdir=os.listdir()
feature_list=list()
label_list=list()
for j in range(len(listdir)):
if listdir[j][-5:]=='.json':
try:
g=json.load(open(listdir[j]))
# consolidate all features into one array (if featurizing with multiple featurizers)
default_feature=list()
default_label=list()
for k in range(len(default_features)):
default_feature=default_feature+g['features'][problemtype][default_features[k]]['features']
default_label=default_label+g['features'][problemtype][default_features[k]]['labels']
feature_list.append(default_feature)
label_list.append(default_label)
except:
print('ERROR - skipping ' + listdir[j])
data[class_type]=feature_list
elif mtype == 'r':
# featurize .CSV
os.chdir(prevdir+'/features/csv_features')
output_file=str(uuid.uuid1())+'.csv'
os.system('python3 featurize_csv_regression.py -i "%s" -o "%s" -t "%s"'%(prevdir+'/train_dir/'+csvfile, prevdir+'/train_dir/'+output_file, classes[0]))
csvfile=output_file
default_features=['csv_regression']
###############################################################
## GENERATE TRAINING DATA ##
###############################################################
print('-----------------------------------')
print(f.renderText('CREATING TRAINING DATA'))
print('-----------------------------------')
# perform class balance such that both classes have the same number
# of members (true by default, but can also be false)
os.chdir(prevdir+'/training/')
model_dir=prevdir+'/models'
balance=settings['balance_data']
remove_outliers=settings['remove_outliers']
outlier_types=settings['default_outlier_detector']
if mtype == 'c':
if problemtype != 'csv':
jsonfile=''
for i in range(len(classes)):
if i==0:
jsonfile=classes[i]
else:
jsonfile=jsonfile+'_'+classes[i]
jsonfile=jsonfile+'.json'
#try:
g=data
alldata=list()
labels=list()
lengths=list()
# check to see all classes are same length and reshape if necessary
for i in range(len(classes)):
class_=g[classes[i]]
lengths.append(len(class_))
lengths=np.array(lengths)
minlength=np.amin(lengths)
# now load all the classes
for i in range(len(classes)):
class_=g[classes[i]]
random.shuffle(class_)
# only balance if specified in settings
if balance==True:
if len(class_) > minlength:
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_safe.py | training/train_safe.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using SAFE: https://github.com/ModelOriented/SAFE
This is enabled if the default_training_script = ['safe']
'''
import os, sys, shutil, pickle, json
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
import numpy as np
import pandas as pd
print('installing library')
os.system('pip3 install safe-transformer==0.0.5')
from SafeTransformer import SafeTransformer
def train_safe(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# only store transform and surrogate model
model_name=common_name_model+'.pickle'
files=list()
curdir=os.getcwd()
csvname=common_name_model.split('_')[0]
# get training and testing data
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# now load the training data as pandas dataframe
data=pd.read_csv('train.csv')
X_train=data.drop(columns=['class_'], axis=1)
y_train=data['class_']
print('Starting FIT')
if mtype in ['classification', 'c']:
print('CLASSIFICATION')
print('training surrogate model...')
surrogate_model = XGBClassifier().fit(X_train, y_train)
print('training base model...')
base_model = LogisticRegression().fit(X_train, y_train)
safe_transformer = SafeTransformer(model=surrogate_model, penalty=1)
pipe = Pipeline(steps=[('safe', safe_transformer), ('linear', base_model)])
print('training pipeline...')
pipe = pipe.fit(X_train, y_train)
elif mtype in ['regression', 'r']:
print('REGRESSION')
surrogate_model = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1,loss='huber')
print('training surrogate model...')
surrogate_model = surrogate_model.fit(X_train, y_train)
print('loading base model')
linear_model = LinearRegression()
safe_transformer = SafeTransformer(surrogate_model, penalty = 0.84)
print('training pipeline...')
pipe = Pipeline(steps=[('safe', safe_transformer), ('linear', linear_model)])
pipe = pipe.fit(X_train, y_train)
# SAVE SURROGATE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(pipe, modelfile)
modelfile.close()
files.append(model_name)
files.append('train.csv')
files.append('test.csv')
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_autokaggle.py | training/train_autokaggle.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autokaggle: https://github.com/datamllab/autokaggle
This is enabled if the default_training_script = ['autokaggle']
'''
import os, pickle
curdir=os.getcwd()
print(os.getcwd())
print('initializing installation')
os.system('pip3 install autokaggle==0.1.0')
os.system('pip3 install scikit-learn==0.22')
from autokaggle.tabular_supervised import TabularClassifier
from autokaggle.tabular_supervised import TabularRegressor
os.chdir(curdir)
def train_autokaggle(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
model_name=common_name_model+'.pickle'
files=list()
if mtype in ['classification', 'c']:
# fit classifier
clf = TabularClassifier()
clf.fit(X_train, y_train, time_limit=12 * 60 * 60)
# SAVE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(clf, modelfile)
modelfile.close()
elif mtype in ['regression', 'r']:
print("Starting AutoKaggle")
clf = TabularRegressor()
clf.fit(X_train, y_train, time_limit=12 * 60 * 60)
# saving model
print('saving model')
modelfile=open(model_name,'wb')
pickle.dump(clf, modelfile)
modelfile.close()
model_dir=os.getcwd()
files.append(model_name)
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/regression_all.py | training/regression_all.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Builds many regression models based on .CSV files located in the train_dir.
Relies on this script: https://github.com/jim-schwoebel/allie/blob/master/train_dir/make_csv_regression.py
Note this is for single target regression problems only.
'''
import os, shutil, time
import pandas as pd
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def pursue_modeling(mtype, model_dir, problemtype, default_training_script, common_name_model):
'''
simple script to decide whether or not to continue modeling the data.
'''
try:
model_listdir=os.listdir(model_dir+'/'+problemtype+'_models')
except:
model_listdir=list()
# note that these are tpot definitions
model_exists=False
if default_training_script == 'tpot':
if common_name_model + 'tpot_classifier' in model_listdir and mtype == 'c':
model_exists=True
elif common_name_model +'tpot_regression' in model_listdir and mtype == 'r':
model_exists=True
else:
# only look for naming conflicts with TPOT for now, can expand into the future.
model_exists=False
return model_exists, model_listdir
def replace_nonstrings(string_):
# alphabet to keep characters
alphabet=['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','_',
'1','2','3','4','5','6','7','8','9','0']
string_=string_.lower().replace(' ','_')
newstring=''
for j in range(len(string_)):
if string_[j] not in alphabet:
pass
else:
newstring=newstring+string_[j]
if len(newstring) > 50:
newstring=newstring[0:50]
return newstring
curdir=os.getcwd()
prevdir=prev_dir(curdir)
train_dir=prevdir+'/train_dir'
model_dir=prevdir+'/models'
problemtype='audio'
default_training_script='tpot'
os.chdir(train_dir)
listdir=os.listdir()
csvfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.csv'):
csvfiles.append(listdir[i])
# now train models
os.chdir(curdir)
for i in range(len(csvfiles)):
os.chdir(train_dir)
data=pd.read_csv(csvfiles[i])
class_=list(data)[1]
os.chdir(curdir)
uniquevals=list(set(list(data[class_])))
common_name=replace_nonstrings(class_)
# make regression model (if doesn't already exist)
# model_exists, model_listdir = pursue_modeling('r', model_dir, problemtype, default_training_script, common_name)
# if model_exists == False:
# os.system('python3 model.py r "%s" "%s" "%s"'%(csvfiles[i], class_, common_name))
# else:
# print('skipping - %s'%(common_name+'_tpot_regression'))
os.chdir(prevdir+'/train_dir/')
# make classification model (allows for visualizations and averages around mean)
model_exists, model_listdir = pursue_modeling('c', model_dir, problemtype, default_training_script, common_name)
if model_exists == False:
os.system('python3 create_dataset.py "%s" "%s"'%(csvfiles[i], class_))
os.chdir(curdir)
if len(uniquevals) > 10:
os.system('python3 model.py audio 2 c "%s" "%s" "%s"'%(common_name, replace_nonstrings(class_)+'_above', replace_nonstrings(class_)+'_below'))
# remove temporary directories for classification model training
try:
shutil.rmtree(prevdir+'/train_dir/'+class_+'_above')
except:
pass
try:
shutil.rmtree(prevdir+'/train_dir/'+class_+'_below')
except:
pass
else:
command='python3 model.py audio %s c "%s"'%(str(len(uniquevals)), common_name)
for j in range(len(uniquevals)):
newstring=replace_nonstrings(str(uniquevals[j]))
command=command+' '+'"%s"'%(common_name+'_'+newstring)
os.system(command)
for j in range(len(uniquevals)):
try:
newstring=replace_nonstrings(str(uniquevals[j]))
shutil.rmtree(prevdir+'/train_dir/'+common_name+'_'+newstring)
except:
pass
else:
print('skipping - %s'%(common_name+'_tpot_classifier')) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_alphapy.py | training/train_alphapy.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using alphapy.
This is enabled if the default_training_script = ['alphapy']
'''
import os
# install required dependencies (important to have this config for everything to work)
print('installing dependencies')
# 2.4.0 before
os.system('pip3 install alphapy==2.4.2')
os.system('pip3 install imbalanced-learn==0.5.0')
os.system('pip3 install pandas==1.0')
os.system('pip3 install pandas-datareader==0.8.1')
os.system('pip3 install xgboost==0.80')
import sys, pickle, json, random, shutil, time, yaml
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
def convert_(X_train, y_train, labels):
# create proper training data
feature_list=labels
data=dict()
for i in range(len(X_train)):
for j in range(len(feature_list)-1):
if i > 0:
try:
data[feature_list[j]]=data[feature_list[j]]+[X_train[i][j]]
except:
pass
else:
data[feature_list[j]]=[X_train[i][j]]
print(data)
data['class_']=y_train
data=pd.DataFrame(data, columns = list(data))
print(data)
print(list(data))
return data
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def edit_modelfile(data_,mtype,csvfilename):
# open the yml file
list_doc=yaml.load(open("model.yml"), Loader=yaml.Loader)
os.remove('model.yml')
# load sections / format and modify appropriately
# ----> just change file name
project=list_doc['project']
# project['submission_file']=csvfilename[0:-4]
# ----> set right target value here
# { 'features': '*', 'sampling': {'option': False, 'method': 'under_random', 'ratio': 0.0}, 'sentinel': -1, 'separator': ',', 'shuffle': False, 'split': 0.4, 'target': 'won_on_spread', 'target_value': True}
data=list_doc['data']
print(data)
data['drop']=['Unnamed: 0']
data['shuffle']=True
data['split']=0.4
data['target']='class_'
# ----> now set right model parameters here
model=list_doc['model']
# {'algorithms': ['RF', 'XGB'], 'balance_classes': False, 'calibration': {'option': False, 'type': 'isotonic'}, 'cv_folds': 3, 'estimators': 201, 'feature_selection': {'option': False, 'percentage': 50, 'uni_grid': [5, 10, 15, 20, 25], 'score_func': 'f_classif'}, 'grid_search': {'option': True, 'iterations': 50, 'random': True, 'subsample': False, 'sampling_pct': 0.25}, 'pvalue_level': 0.01, 'rfe': {'option': True, 'step': 5}, 'scoring_function': 'roc_auc', 'type': 'classification'}
if mtype in ['classification', 'c']:
model['algorithms']=['AB','GB','KNN','LOGR','RF','XGB','XT'] # removed 'KERASC', 'LSVC', 'LSVM', 'NB', 'RBF', 'SVM', 'XGBM'
model['scoring_function']='roc_auc'
model['type']='classification'
elif mtype in ['regression','r']:
model['algorithms']=['GBR','KNR','LR','RFR','XGBR','XTR'] # remove 'KERASR'
model['scoring_function']='mse'
model['type']='regression'
# just remove the target class
features=list_doc['features']
# features['factors']=list(data_).remove('class_')
# everything else remains the same
pipeline=list_doc['pipeline']
plots=list_doc['plots']
xgboost_=list_doc['xgboost']
# now reconfigure the doc
list_doc['project']=project
list_doc['data']=data
list_doc['model']=model
list_doc['features']=features
list_doc['pipeline']=pipeline
list_doc['plots']=plots
# list_doc['xgboost']=xgboost
print(list_doc)
# now re-write the file
print('re-writing YAML config file...')
file=open("model.yml", 'w')
yaml.dump(list_doc, file)
file.close()
print(list_doc)
file.close()
def train_alphapy(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
csvfilename=common_name_model+'.csv'
picklefilename=common_name_model+'.pickle'
folder=common_name_model+'_session'
csvname=common_name_model.split('_')[0]
# files
files=list()
# this should be the model directory
hostdir=os.getcwd()
try:
os.mkdir(folder)
except:
shutil.rmtree(folder)
os.mkdir(folder)
basedir=os.getcwd()
# make required directories
os.chdir(folder)
os.mkdir('data')
os.mkdir('input')
# now make a .CSV of all the data
os.chdir('data')
try:
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_all_transformed.csv',os.getcwd()+'/'+csvfilename)
except:
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_all.csv',os.getcwd()+'/'+csvfilename)
data=pd.read_csv(csvfilename)
os.remove(csvfilename)
os.chdir(basedir)
os.chdir(folder)
os.chdir('input')
try:
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(hostdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
os.chdir(basedir)
shutil.copytree(prev_dir(hostdir)+'/training/helpers/alphapy/config/', basedir+'/'+folder+'/config')
os.chdir(folder)
os.chdir('config')
edit_modelfile(data, mtype, csvfilename)
os.chdir(basedir)
os.chdir(folder)
os.system('alphapy')
os.chdir(hostdir)
# get variables
model_name=folder
model_dir=os.getcwd()
files.append(folder)
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_autopytorch.py | training/train_autopytorch.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autopytorch: https://github.com/automl/Auto-PyTorch
This is enabled if the default_training_script = ['autopytorch']
'''
import os, json, shutil, pickle, sys
os.system('pip3 install torch==1.5.0')
import torch
import pandas as pd
print('installing library')
os.system('pip3 install autopytorch==0.0.2')
'''
From the documentation:
--> https://github.com/automl/Auto-PyTorch
# saving/loading torch models:
--> https://pytorch.org/tutorials/beginner/saving_loading_models.html
'''
def train_autopytorch(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# name model
model_name=common_name_model+'.pickle'
files=list()
if mtype=='c':
from autoPyTorch import AutoNetClassification
autonet = AutoNetClassification(log_level='debug', max_runtime=900, min_budget=50, max_budget=150)
autonet.fit(X_train, y_train, validation_split=0.30)
print(autonet.predict(X_test).flatten())
if mtype=='r':
from autoPyTorch import AutoNetRegression
autonet = AutoNetRegression(log_level='debug', max_runtime=900, min_budget=50, max_budget=150)
autonet.fit(X_train, y_train)
print(autonet.predict(X_test).flatten())
print('saving model -->')
torch.save(autonet, model_name)
# get model directory
files.append(model_name)
files.append('configs.json')
files.append('results.json')
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_TPOT.py | training/train_TPOT.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using TPOT: https://epistasislab.github.io/tpot/
This is enabled if the default_training_script = ['tpot']
'''
import os, sys, pickle, json, random, shutil, time
import numpy as np
os.system('pip3 install tpot==0.11.3')
from tpot import TPOTClassifier
from tpot import TPOTRegressor
def train_TPOT(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_features,transform_model,settings, model_session):
# get modelname
modelname=common_name_model
files=list()
if mtype in ['classification', 'c']:
tpot=TPOTClassifier(generations=10, population_size=50, verbosity=2, n_jobs=-1, scoring='accuracy')
tpotname='%s_classifier.py'%(modelname)
elif mtype in ['regression','r']:
tpot = TPOTRegressor(generations=10, population_size=20, verbosity=2)
tpotname='%s_regression.py'%(modelname)
# fit classifier
tpot.fit(X_train, y_train)
tpot.export(tpotname)
# export data to .json format (use all data to improve model accuracy, as it's already tested)
data={
'data': X_train.tolist(),
'labels': y_train.tolist(),
}
jsonfilename='%s.json'%(tpotname[0:-3])
jsonfile=open(jsonfilename,'w')
json.dump(data,jsonfile)
jsonfile.close()
# now edit the file and run it
g=open(tpotname).read()
g=g.replace("import numpy as np", "import numpy as np \nimport json, pickle")
g=g.replace("tpot_data = pd.read_csv(\'PATH/TO/DATA/FILE\', sep=\'COLUMN_SEPARATOR\', dtype=np.float64)","g=json.load(open('%s'))\ntpot_data=np.array(g['labels'])"%(jsonfilename))
g=g.replace("features = tpot_data.drop('target', axis=1)","features=np.array(g['data'])\n")
g=g.replace("tpot_data['target'].values", "tpot_data")
g=g.replace("results = exported_pipeline.predict(testing_features)", "print('saving classifier to disk')\nf=open('%s','wb')\npickle.dump(exported_pipeline,f)\nf.close()"%(jsonfilename[0:-5]+'.pickle'))
g1=g.find('exported_pipeline = ')
g2=g.find('exported_pipeline.fit(training_features, training_target)')
g=g.replace('.values','')
g=g.replace("tpot_data['target']",'tpot_data')
modeltype=g[g1:g2]
os.remove(tpotname)
t=open(tpotname,'w')
t.write(g)
t.close()
print('')
os.system('python3 %s'%(tpotname))
# now write an accuracy label
os.remove(jsonfilename)
# get model_name
model_dir=os.getcwd()
model_name=tpotname[0:-3]+'.pickle'
# tpot file will be here now
files.append(tpotname)
files.append(model_name)
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_hypsklearn.py | training/train_hypsklearn.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using hypsklearn: https://github.com/hyperopt/hyperopt-sklearn
This is enabled if the default_training_script = ['hypsklearn']
'''
import os, pickle
os.system('export OMP_NUM_THREADS=1')
from hpsklearn import HyperoptEstimator, any_preprocessing, any_classifier, any_regressor
from hyperopt import tpe
def train_hypsklearn(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
modelname=common_name_model+'.pickle'
files=list()
if mtype in [' classification', 'c']:
estim = HyperoptEstimator(classifier=any_classifier('my_clf'),
preprocessing=any_preprocessing('my_pre'),
algo=tpe.suggest,
max_evals=100,
trial_timeout=120)
# Search the hyperparameter space based on the data
estim.fit(X_train, y_train)
elif mtype in ['regression','r']:
estim = HyperoptEstimator(classifier=any_regressor('my_clf'),
preprocessing=any_preprocessing('my_pre'),
algo=tpe.suggest,
max_evals=100,
trial_timeout=120)
# Search the hyperparameter space based on the data
estim.fit(X_train, y_train)
# Show the results
print(estim.score(X_test, y_test))
print(estim.best_model())
scores=estim.score(X_test, y_test)
bestmodel=str(estim.best_model())
print('saving classifier to disk')
f=open(modelname,'wb')
pickle.dump(estim,f)
f.close()
files.append(modelname)
modeldir=os.getcwd()
return modelname, modeldir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_mlblocks.py | training/train_mlblocks.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using MLBlocks: https://github.com/HDI-Project/MLBlocks
This is enabled if the default_training_script = ['mlblocks']
'''
import os, json, shutil, pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_log_error
print('installing library')
os.system('pip3 install mlprimitives')
os.system('pip3 install mlblocks==0.3.4')
from mlblocks import MLPipeline
# os.system('pip3 install xgboost==0.80')
'''
From the documentation: https://hdi-project.github.io/MLBlocks/pipeline_examples/single_table.html
'''
def train_mlblocks(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# name model
model_name=common_name_model+'.pickle'
files=list()
if mtype=='c':
# aet up primitives and train model
primitives = ['sklearn.impute.SimpleImputer',
'xgboost.XGBClassifier']
init_params = {'sklearn.impute.SimpleImputer': {'strategy': 'median'},
'xgboost.XGBClassifier': {'learning_rate': 0.1}}
pipeline = MLPipeline(primitives, init_params=init_params)
pipeline.fit(X_train, y_train)
if mtype=='r':
# aet up primitives and train model
primitives = ['sklearn.impute.SimpleImputer',
'xgboost.XGBRegressor']
pipeline = MLPipeline(primitives)
pipeline.fit(X_train, y_train)
# saving model
print('saving model')
modelfile=open(model_name,'wb')
pickle.dump(pipeline, modelfile)
modelfile.close()
# get model directory
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_hungabunga.py | training/train_hungabunga.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using hungabunga: https://github.com/ypeleg/HungaBunga
This is enabled if the default_training_script = ['hungabunga']
'''
import os, sys, pickle, json, random, shutil, time
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier, LogisticRegression, Perceptron, PassiveAggressiveClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from sklearn.cluster import KMeans
from sklearn.neighbors import KNeighborsClassifier, NearestCentroid, RadiusNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, DotProduct, Matern, StationaryKernelMixin, WhiteKernel
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostRegressor, ExtraTreesRegressor, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import is_classifier
from sklearn.preprocessing import MinMaxScaler, StandardScaler
############################
### CORE
###########################
import warnings
warnings.filterwarnings('ignore')
warnings.filterwarnings('ignore', 'Solver terminated early.*')
import sklearn.model_selection
import numpy as np
nan = float('nan')
import traceback
from pprint import pprint
from collections import Counter
from multiprocessing import cpu_count
from time import time
from tabulate import tabulate
try: from tqdm import tqdm
except: tqdm = lambda x: x
from sklearn.cluster import KMeans
from sklearn.model_selection import StratifiedShuffleSplit as sss, ShuffleSplit as ss, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier, ExtraTreesRegressor, AdaBoostClassifier, AdaBoostRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn import model_selection
TREE_N_ENSEMBLE_MODELS = [RandomForestClassifier, GradientBoostingClassifier, DecisionTreeClassifier, DecisionTreeRegressor,ExtraTreesClassifier, ExtraTreesRegressor, AdaBoostClassifier, AdaBoostRegressor]
class GridSearchCVProgressBar(sklearn.model_selection.GridSearchCV):
def _get_param_iterator(self):
iterator = super(GridSearchCVProgressBar, self)._get_param_iterator()
iterator = list(iterator)
n_candidates = len(iterator)
cv = sklearn.model_selection._split.check_cv(self.cv, None)
n_splits = getattr(cv, 'n_splits', 3)
max_value = n_candidates * n_splits
class ParallelProgressBar(sklearn.model_selection._search.Parallel):
def __call__(self, iterable):
iterable = tqdm(iterable, total=max_value)
iterable.set_description("GridSearchCV")
return super(ParallelProgressBar, self).__call__(iterable)
sklearn.model_selection._search.Parallel = ParallelProgressBar
return iterator
class RandomizedSearchCVProgressBar(sklearn.model_selection.RandomizedSearchCV):
def _get_param_iterator(self):
iterator = super(RandomizedSearchCVProgressBar, self)._get_param_iterator()
iterator = list(iterator)
n_candidates = len(iterator)
cv = sklearn.model_selection._split.check_cv(self.cv, None)
n_splits = getattr(cv, 'n_splits', 3)
max_value = n_candidates * n_splits
class ParallelProgressBar(sklearn.model_selection._search.Parallel):
def __call__(self, iterable):
iterable = tqdm(iterable, total=max_value)
iterable.set_description("RandomizedSearchCV")
return super(ParallelProgressBar, self).__call__(iterable)
sklearn.model_selection._search.Parallel = ParallelProgressBar
return iterator
def upsample_indices_clf(inds, y):
assert len(inds) == len(y)
countByClass = dict(Counter(y))
maxCount = max(countByClass.values())
extras = []
for klass, count in countByClass.items():
if maxCount == count: continue
ratio = int(maxCount / count)
cur_inds = inds[y == klass]
extras.append(np.concatenate( (np.repeat(cur_inds, ratio - 1), np.random.choice(cur_inds, maxCount - ratio * count, replace=False))))
return np.concatenate([inds] + extras)
def cv_clf(x, y, test_size = 0.2, n_splits = 5, random_state=None, doesUpsample = True):
sss_obj = sss(n_splits, test_size, random_state=random_state).split(x, y)
if not doesUpsample: yield sss_obj
for train_inds, valid_inds in sss_obj: yield (upsample_indices_clf(train_inds, y[train_inds]), valid_inds)
def cv_reg(x, test_size = 0.2, n_splits = 5, random_state=None): return ss(n_splits, test_size, random_state=random_state).split(x)
def timeit(klass, params, x, y):
start = time()
clf = klass(**params)
clf.fit(x, y)
return time() - start
def main_loop(models_n_params, x, y, isClassification, test_size = 0.2, n_splits = 5, random_state=None, upsample=True, scoring=None, verbose=True, n_jobs =cpu_count() - 1, brain=False, grid_search=True):
def cv_(): return cv_clf(x, y, test_size, n_splits, random_state, upsample) if isClassification else cv_reg(x, test_size, n_splits, random_state)
res = []
num_features = x.shape[1]
scoring = scoring or ('accuracy' if isClassification else 'neg_mean_squared_error')
if brain: print('Scoring criteria:', scoring)
for i, (clf_Klass, parameters) in enumerate(tqdm(models_n_params)):
try:
if brain: print('-'*15, 'model %d/%d' % (i+1, len(models_n_params)), '-'*15)
if brain: print(clf_Klass.__name__)
if clf_Klass == KMeans: parameters['n_clusters'] = [len(np.unique(y))]
elif clf_Klass in TREE_N_ENSEMBLE_MODELS: parameters['max_features'] = [v for v in parameters['max_features'] if v is None or type(v)==str or v<=num_features]
if grid_search: clf_search = GridSearchCVProgressBar(clf_Klass(), parameters, scoring, cv=cv_(), n_jobs=n_jobs)
else: clf_search = RandomizedSearchCVProgressBar(clf_Klass(), parameters, scoring, cv=cv_(), n_jobs=n_jobs)
clf_search.fit(x, y)
timespent = timeit(clf_Klass, clf_search.best_params_, x, y)
if brain: print('best score:', clf_search.best_score_, 'time/clf: %0.3f seconds' % timespent)
if brain: print('best params:')
if brain: pprint(clf_search.best_params_)
if verbose:
print('validation scores:', clf_search.cv_results_['mean_test_score'])
print('training scores:', clf_search.cv_results_['mean_train_score'])
res.append((clf_search.best_estimator_, clf_search.best_score_, timespent))
except Exception as e:
if verbose: traceback.print_exc()
res.append((clf_Klass(), -np.inf, np.inf))
if brain: print('='*60)
if brain: print(tabulate([[m.__class__.__name__, '%.3f'%s, '%.3f'%t] for m, s, t in res], headers=['Model', scoring, 'Time/clf (s)']))
winner_ind = np.argmax([v[1] for v in res])
winner = res[winner_ind][0]
if brain: print('='*60)
if brain: print('The winner is: %s with score %0.3f.' % (winner.__class__.__name__, res[winner_ind][1]))
return winner, res
if __name__ == '__main__':
y = np.array([0,1,0,0,0,3,1,1,3])
x = np.zeros(len(y))
for t, v in cv_reg(x): print(v,t)
for t, v in cv_clf(x, y, test_size=5): print(v,t)
###############
### PARAMS
#################
import numpy as np
penalty_12 = ['l1', 'l2']
penalty_12none = ['l1', 'l2', None]
penalty_12e = ['l1', 'l2', 'elasticnet']
penalty_all = ['l1', 'l2', None, 'elasticnet']
max_iter = [100, 300, 1000]
max_iter_2 = [25]
max_iter_inf = [100, 300, 500, 1000, np.inf]
max_iter_inf2 = [100, 300, 500, 1000, -1]
tol = [1e-4, 1e-3, 1e-2]
warm_start = [True, False]
alpha = [1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 3, 10]
alpha_small = [1e-5, 1e-3, 0.1, 1]
n_iter = [5, 10, 20]
eta0 = [1e-4, 1e-3, 1e-2, 0.1]
C = [1e-2, 0.1, 1, 5, 10]
C_small = [ 0.1, 1, 5]
epsilon = [1e-3, 1e-2, 0.1, 0]
normalize = [True, False]
kernel = ['linear', 'poly', 'rbf', 'sigmoid']
degree = [1, 2, 3, 4, 5]
gamma = list(np.logspace(-9, 3, 6)) + ['auto']
gamma_small = list(np.logspace(-6, 3, 3)) + ['auto']
coef0 = [0, 0.1, 0.3, 0.5, 0.7, 1]
coef0_small = [0, 0.4, 0.7, 1]
shrinking = [True, False]
nu = [1e-4, 1e-2, 0.1, 0.3, 0.5, 0.75, 0.9]
nu_small = [1e-2, 0.1, 0.5, 0.9]
n_neighbors = [5, 7, 10, 15, 20]
neighbor_algo = ['ball_tree', 'kd_tree', 'brute']
neighbor_leaf_size = [1, 2, 5, 10, 20, 30, 50, 100]
neighbor_metric = ['cityblock', 'euclidean', 'l1', 'l2', 'manhattan']
neighbor_radius = [1e-2, 0.1, 1, 5, 10]
learning_rate = ['constant', 'invscaling', 'adaptive']
learning_rate_small = ['invscaling', 'adaptive']
learning_rate2=[0.05, 0.10, 0.15]
n_estimators = [2, 3, 5, 10, 25, 50, 100]
n_estimators_small = [2, 10, 25, 100]
max_features = [3, 5, 10, 25, 50, 'auto', 'log2', None]
max_features_small = [3, 5, 10, 'auto', 'log2', None]
max_depth = [None, 3, 5, 7, 10]
max_depth_small = [None, 5, 10]
min_samples_split = [2, 5, 10, 0.1]
min_impurity_split = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3]
tree_learning_rate = [0.8, 1]
min_samples_leaf = [2]
# for regression
import warnings
warnings.filterwarnings('ignore')
from multiprocessing import cpu_count
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, Lars, LassoLars, OrthogonalMatchingPursuit, BayesianRidge, ARDRegression, SGDRegressor, PassiveAggressiveRegressor, RANSACRegressor, HuberRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, NuSVR, LinearSVR
from sklearn.neighbors import RadiusNeighborsRegressor, KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, DotProduct, WhiteKernel
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import AdaBoostRegressor, ExtraTreesRegressor, RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import is_classifier
def train_hungabunga(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
model_name=common_name_model+'.pickle'
files=list()
if mtype in [' classification', 'c']:
linear_models_n_params = [
(SGDClassifier,
{'loss': ['hinge', 'log', 'modified_huber', 'squared_hinge'],
'alpha': [0.0001, 0.001, 0.1],
'penalty': penalty_12none,
'max_iter':max_iter}),
(LogisticRegression,
{'penalty': penalty_12, 'max_iter': max_iter, 'tol': tol, 'warm_start': warm_start, 'C':C, 'solver': ['liblinear']}),
(Perceptron,
{'penalty': penalty_all,
'alpha': alpha,
'n_iter': n_iter,
'eta0': eta0,
'warm_start': warm_start}),
(PassiveAggressiveClassifier,
{'C': C, 'n_iter': n_iter,
'warm_start': warm_start,
'loss': ['hinge', 'squared_hinge']})]
linear_models_n_params_small = linear_models_n_params
svm_models_n_params = [
(SVC,
{'C':C}),
(NuSVC,
{'nu': nu}),
(LinearSVC,
{'penalty_12': penalty_12, 'loss': ['hinge', 'squared_hinge']})
]
svm_models_n_params_small = [
(SVC,
{'C':C}),
(NuSVC,
{'nu': nu}),
(LinearSVC,
{'penalty': penalty_12, 'tol': tol})
]
neighbor_models_n_params = [
(KMeans,
{'algorithm': ['auto', 'full', 'elkan'],
'init': ['k-means++', 'random']}),
(KNeighborsClassifier,
{'n_neighbors': n_neighbors, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,
'weights': ['uniform', 'distance'],
'p': [1, 2]
}),
(NearestCentroid,
{'metric': neighbor_metric,
'shrink_threshold': [1e-3, 1e-2, 0.1, 0.5, 0.9, 2]
})]
# not using radius neighbors classifier because it doesn't seem to converge on some of these datasets
# (RadiusNeighborsClassifier,
# {'radius': neighbor_radius, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,
# 'weights': ['uniform', 'distance'],
# 'p': [1, 2],
# 'outlier_label': [-1]
# })
gaussianprocess_models_n_params = [
(GaussianProcessClassifier,
{'warm_start': warm_start,
'kernel': [RBF(), ConstantKernel(), DotProduct(), WhiteKernel()],
'max_iter_predict': [500],
'n_restarts_optimizer': [3],
})
]
bayes_models_n_params = [
(GaussianNB, {})
]
nn_models_n_params = [
(MLPClassifier,
{ 'hidden_layer_sizes': [(16,), (64,), (100,), (32, 32)],
'activation': ['identity', 'logistic', 'tanh', 'relu'],
'alpha': alpha, 'learning_rate': learning_rate, 'tol': tol, 'warm_start': warm_start,
'batch_size': ['auto', 50],
'max_iter': [1000],
'early_stopping': [True, False],
'epsilon': [1e-8, 1e-5]
})
]
nn_models_n_params_small = [
(MLPClassifier,
{ 'hidden_layer_sizes': [(64,), (32, 64)],
'batch_size': ['auto', 50],
'activation': ['identity', 'tanh', 'relu'],
'max_iter': [500],
'early_stopping': [True],
'learning_rate': learning_rate_small
})
]
tree_models_n_params = [
(RandomForestClassifier,
{'criterion': ['gini', 'entropy'],
'max_features': max_features, 'n_estimators': n_estimators, 'max_depth': max_depth,
'min_samples_split': min_samples_split, 'min_impurity_split': min_impurity_split, 'warm_start': warm_start, 'min_samples_leaf': min_samples_leaf,
}),
(DecisionTreeClassifier,
{'criterion': ['gini', 'entropy'],
'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_impurity_split':min_impurity_split, 'min_samples_leaf': min_samples_leaf
}),
(ExtraTreesClassifier,
{'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'min_impurity_split': min_impurity_split, 'warm_start': warm_start,
'criterion': ['gini', 'entropy']})
]
tree_models_n_params_small = [
(RandomForestClassifier,
{'max_features_small': max_features_small, 'n_estimators_small': n_estimators_small, 'min_samples_split': min_samples_split, 'max_depth_small': max_depth_small, 'min_samples_leaf': min_samples_leaf
}),
(DecisionTreeClassifier,
{'max_features_small': max_features_small, 'max_depth_small': max_depth_small, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf
}),
(ExtraTreesClassifier,
{'n_estimators_small': n_estimators_small, 'max_features_small': max_features_small, 'max_depth_small': max_depth_small,
'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf})
]
def run_all_classifiers(x, y, small = True, normalize_x = True, n_jobs=cpu_count()-1, brain=False, test_size=0.2, n_splits=5, upsample=True, scoring=None, verbose=False, grid_search=True):
all_params = (linear_models_n_params_small if small else linear_models_n_params) + (nn_models_n_params_small if small else nn_models_n_params) + (gaussianprocess_models_n_params) + (svm_models_n_params_small if small else svm_models_n_params) + (neighbor_models_n_params) + (tree_models_n_params_small if small else tree_models_n_params)
# TEST
# all_params = (linear_models_n_params_small if small else linear_models_n_params)
return main_loop(all_params, StandardScaler().fit_transform(x) if normalize_x else x, y, isClassification=True, n_jobs=n_jobs, verbose=verbose, brain=brain, test_size=test_size, n_splits=n_splits, upsample=upsample, scoring=scoring, grid_search=grid_search)
class HungaBungaClassifier(ClassifierMixin):
def __init__(self, brain=False, test_size = 0.2, n_splits = 5, random_state=None, upsample=True, scoring=None, verbose=False, normalize_x = True, n_jobs =cpu_count() - 1, grid_search=True):
self.model = None
self.brain = brain
self.test_size = test_size
self.n_splits = n_splits
self.random_state = random_state
self.upsample = upsample
self.scoring = None
self.verbose = verbose
self.n_jobs = n_jobs
self.normalize_x = normalize_x
self.grid_search = grid_search
super(HungaBungaClassifier, self).__init__()
def fit(self, x, y):
self.model = run_all_classifiers(x, y, normalize_x=self.normalize_x, test_size=self.test_size, n_splits=self.n_splits, upsample=self.upsample, scoring=self.scoring, verbose=self.verbose, brain=self.brain, n_jobs=self.n_jobs, grid_search=self.grid_search)[0]
return self
def predict(self, x):
return self.model.predict(x)
clf = HungaBungaClassifier(brain=False)
elif mtype in ['regression','r']:
# regression path
linear_models_n_params = [
(LinearRegression, {'normalize': normalize}),
(Ridge,
{'alpha': alpha, 'normalize': normalize, 'tol': tol,
'solver': ['svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag']
}),
(Lasso,
{'alpha': alpha, 'normalize': normalize, 'tol': tol, 'warm_start': warm_start
}),
(ElasticNet,
{'alpha': alpha, 'normalize': normalize, 'tol': tol,
'l1_ratio': [0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9],
}),
(Lars,
{'normalize': normalize,
'n_nonzero_coefs': [100, 300, 500, np.inf],
}),
(LassoLars,
{ 'max_iter_inf': max_iter_inf, 'normalize': normalize, 'alpha': alpha
}),
(OrthogonalMatchingPursuit,
{'n_nonzero_coefs': [100, 300, 500, np.inf, None],
'tol': tol, 'normalize': normalize
}),
(BayesianRidge,
{
'n_iter': [100, 300, 1000],
'tol': tol, 'normalize': normalize,
'alpha_1': [1e-6, 1e-4, 1e-2, 0.1, 0],
'alpha_2': [1e-6, 1e-4, 1e-2, 0.1, 0],
'lambda_1': [1e-6, 1e-4, 1e-2, 0.1, 0],
'lambda_2': [1e-6, 1e-4, 1e-2, 0.1, 0],
}),
# WARNING: ARDRegression takes a long time to run
(ARDRegression,
{'n_iter': [100, 300, 1000],
'tol': tol, 'normalize': normalize,
'alpha_1': [1e-6, 1e-4, 1e-2, 0.1, 0],
'alpha_2': [1e-6, 1e-4, 1e-2, 0.1, 0],
'lambda_1': [1e-6, 1e-4, 1e-2, 0.1, 0],
'lambda_2': [1e-6, 1e-4, 1e-2, 0.1, 0],
'threshold_lambda': [1e2, 1e3, 1e4, 1e6]}),
(SGDRegressor,
{'loss': ['squared_loss', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'],
'penalty': penalty_12e, 'n_iter': n_iter, 'epsilon': epsilon, 'eta0': eta0,
'alpha': [1e-6, 1e-5, 1e-2, 'optimal'],
'l1_ratio': [0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9],
'learning_rate': ['constant', 'optimal', 'invscaling'],
'power_t': [0.1, 0.25, 0.5]
}),
(PassiveAggressiveRegressor,
{'C': C, 'epsilon': epsilon, 'n_iter': n_iter, 'warm_start': warm_start,
'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive']
}),
(RANSACRegressor,
{'min_samples': [0.1, 0.5, 0.9, None],
'max_trials': n_iter,
'stop_score': [0.8, 0.9, 1],
'stop_probability': [0.9, 0.95, 0.99, 1],
'loss': ['absolute_loss', 'squared_loss']
}),
(HuberRegressor,
{ 'epsilon': [1.1, 1.35, 1.5, 2],
'max_iter': max_iter, 'alpha': alpha, 'warm_start': warm_start, 'tol': tol
}),
(KernelRidge,
{'alpha': alpha, 'degree': degree, 'gamma': gamma, 'coef0': coef0
})
]
linear_models_n_params_small = [
(LinearRegression, {'normalize': normalize}),
(Ridge,
{'alpha': alpha_small, 'normalize': normalize
}),
(Lasso,
{'alpha': alpha_small, 'normalize': normalize
}),
(ElasticNet,
{'alpha': alpha, 'normalize': normalize,
'l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9],
}),
(Lars,
{'normalize': normalize,
'n_nonzero_coefs': [100, 300, 500, np.inf],
}),
(LassoLars,
{'normalize': normalize, 'max_iter': max_iter_inf, 'alpha': alpha_small
}),
(OrthogonalMatchingPursuit,
{'n_nonzero_coefs': [100, 300, 500, np.inf, None],
'normalize': normalize
}),
(BayesianRidge,
{ 'n_iter': [100, 300, 1000],
'alpha_1': [1e-6, 1e-3],
'alpha_2': [1e-6, 1e-3],
'lambda_1': [1e-6, 1e-3],
'lambda_2': [1e-6, 1e-3],
'normalize': normalize,
}),
# WARNING: ARDRegression takes a long time to run
(ARDRegression,
{'n_iter': [100, 300],
'normalize': normalize,
'alpha_1': [1e-6, 1e-3],
'alpha_2': [1e-6, 1e-3],
'lambda_1': [1e-6, 1e-3],
'lambda_2': [1e-6, 1e-3],
}),
(SGDRegressor,
{'loss': ['squared_loss', 'huber'],
'penalty': penalty_12e, 'n_iter': n_iter,
'alpha': [1e-6, 1e-5, 1e-2, 'optimal'],
'l1_ratio': [0.1, 0.3, 0.5, 0.7, 0.9],
}),
(PassiveAggressiveRegressor,
{'C': C, 'n_iter': n_iter,
}),
(RANSACRegressor,
{'min_samples': [0.1, 0.5, 0.9, None],
'max_trials': n_iter,
'stop_score': [0.8, 1],
'loss': ['absolute_loss', 'squared_loss']
}),
(HuberRegressor,
{ 'max_iter': max_iter, 'alpha_small': alpha_small,
}),
(KernelRidge,
{'alpha': alpha_small, 'degree': degree,
})
]
svm_models_n_params_small = [
(SVR,
{'kernel': kernel, 'degree': degree, 'shrinking': shrinking
}),
(NuSVR,
{'nu': nu_small, 'kernel': kernel, 'degree': degree, 'shrinking': shrinking,
}),
(LinearSVR,
{'C': C_small, 'epsilon': epsilon,
'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'],
'intercept_scaling': [0.1, 1, 10]
})
]
svm_models_n_params = [
(SVR,
{'C': C, 'epsilon': epsilon, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol, 'max_iter': max_iter_inf2
}),
(NuSVR,
{'C': C, 'epsilon': epsilon, 'kernel': kernel, 'degree': degree, 'gamma': gamma, 'coef0': coef0, 'shrinking': shrinking, 'tol': tol, 'max_iter': max_iter_inf2
}),
(LinearSVR,
{'C': C, 'epsilon': epsilon, 'tol': tol, 'max_iter': max_iter,
'loss': ['epsilon_insensitive', 'squared_epsilon_insensitive'],
'intercept_scaling': [0.1, 0.5, 1, 5, 10]
})
]
neighbor_models_n_params = [
(RadiusNeighborsRegressor,
{'radius': neighbor_radius, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,
'weights': ['uniform', 'distance'],
'p': [1, 2],
}),
(KNeighborsRegressor,
{'n_neighbors': n_neighbors, 'algo': neighbor_algo, 'leaf_size': neighbor_leaf_size, 'metric': neighbor_metric,
'p': [1, 2],
'weights': ['uniform', 'distance'],
})
]
gaussianprocess_models_n_params = [
(GaussianProcessRegressor,
{'kernel': [RBF(), ConstantKernel(), DotProduct(), WhiteKernel()],
'n_restarts_optimizer': [3],
'alpha': [1e-10, 1e-5],
'normalize_y': [True, False]
})
]
nn_models_n_params = [
(MLPRegressor,
{ 'hidden_layer_sizes': [(16,), (64,), (100,), (32, 64)],
'activation': ['identity', 'logistic', 'tanh', 'relu'],
'alpha': alpha, 'learning_rate': learning_rate, 'tol': tol, 'warm_start': warm_start,
'batch_size': ['auto', 50],
'max_iter': [1000],
'early_stopping': [True, False],
'epsilon': [1e-8, 1e-5]
})
]
nn_models_n_params_small = [
(MLPRegressor,
{ 'hidden_layer_sizes': [(64,), (32, 64)],
'activation': ['identity', 'tanh', 'relu'],
'max_iter': [500],
'early_stopping': [True],
'learning_rate': learning_rate_small
})
]
tree_models_n_params = [
(DecisionTreeRegressor,
{'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'min_impurity_split': min_impurity_split,
'criterion': ['mse', 'mae']}),
(ExtraTreesRegressor,
{'n_estimators': n_estimators, 'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf, 'min_impurity_split': min_impurity_split, 'warm_start': warm_start,
'criterion': ['mse', 'mae']}),
]
tree_models_n_params_small = [
(DecisionTreeRegressor,
{'max_features': max_features_small, 'max_depth': max_depth_small, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf,
'criterion': ['mse', 'mae']}),
(ExtraTreesRegressor,
{'n_estimators': n_estimators_small, 'max_features': max_features_small, 'max_depth': max_depth_small, 'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'criterion': ['mse', 'mae']})
]
def run_all_regressors(x, y, small = True, normalize_x = True, n_jobs=cpu_count()-1, brain=False, test_size=0.2, n_splits=5, upsample=True, scoring=None, verbose=False, grid_search=True):
all_params = (linear_models_n_params_small if small else linear_models_n_params) + (nn_models_n_params_small if small else nn_models_n_params) + ([] if small else gaussianprocess_models_n_params) + neighbor_models_n_params + (svm_models_n_params_small if small else svm_models_n_params) + (tree_models_n_params_small if small else tree_models_n_params)
return main_loop(all_params, StandardScaler().fit_transform(x) if normalize_x else x, y, isClassification=False, n_jobs=n_jobs, brain=brain, test_size=test_size, n_splits=n_splits, upsample=upsample, scoring=scoring, verbose=verbose, grid_search=grid_search)
class HungaBungaRegressor(RegressorMixin):
def __init__(self, brain=False, test_size = 0.2, n_splits = 5, random_state=None, upsample=True, scoring=None, verbose=False, normalize_x = True, n_jobs =cpu_count() - 1, grid_search=True):
self.model = None
self.brain = brain
self.test_size = test_size
self.n_splits = n_splits
self.random_state = random_state
self.upsample = upsample
self.scoring = None
self.verbose = verbose
self.n_jobs = n_jobs
self.normalize_x = normalize_x
self.grid_search=grid_search
super(HungaBungaRegressor, self).__init__()
def fit(self, x, y):
self.model = run_all_regressors(x, y, normalize_x=self.normalize_x, test_size=self.test_size, n_splits=self.n_splits, upsample=self.upsample, scoring=self.scoring, verbose=self.verbose, brain=self.brain, n_jobs=self.n_jobs, grid_search=self.grid_search)[0]
return self
def predict(self, x):
return self.model.predict(x)
clf = HungaBungaRegressor(brain=True)
# write model to .pickle file
clf.fit(X_train, y_train)
# now save the model
f=open(model_name,'wb')
pickle.dump(clf.model,f)
f.close()
# files to put into model folder
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_imbalance.py | training/train_imbalance.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using SMOTE/imbalance-learn: https://pypi.org/project/imbalanced-learn/
This is enabled if the default_training_script = ['imbalance']
'''
import warnings, datetime, uuid, os, json, shutil, pickle
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error, accuracy_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
import pandas as pd
print('installing package configuration')
os.system('pip3 install imbalanced-learn==0.5.0')
os.system('pip3 install scikit-learn==0.22.2.post1')
# os.system('pip3 install scipy==1.4.1')
# os.system('pip3 install scikit-learn==0.20.1')
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
from collections import Counter
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split
# from imblearn.datasets import make_imbalance
from imblearn.under_sampling import NearMiss
from imblearn.pipeline import make_pipeline
from imblearn.metrics import classification_report_imbalanced
from sklearn.model_selection import StratifiedKFold
from imblearn.over_sampling import ADASYN, SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
'''
Taken from the example here:
https://imbalanced-learn.readthedocs.io/en/stable/
Plotting taken from:
https://imbalanced-learn.readthedocs.io/en/stable/auto_examples/over-sampling/plot_comparison_over_sampling.html
'''
def train_imbalance(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
files=list()
RANDOM_STATE = 42
if mtype=='c':
# Create a pipeline
pipeline = make_pipeline(NearMiss(version=2),
LinearSVC(random_state=RANDOM_STATE))
pipeline.fit(X_train, y_train)
# Classify and report the results
label_predictions = pipeline.predict(X_test)
report=classification_report_imbalanced(y_test, label_predictions)
print(report)
accuracy=accuracy_score(y_test, label_predictions)
print(accuracy)
# now save the model in .pickle
f=open(model_name,'wb')
pickle.dump(pipeline, f)
f.close()
elif mtype == 'r':
# Create a pipeline (usually works well with logistic regression 2 classes)
pipeline = make_pipeline(SMOTE(random_state=RANDOM_STATE),
LogisticRegression(random_state=0))
pipeline.fit(X_train, y_train)
# now save the model in .pickle
f=open(model_name,'wb')
pickle.dump(pipeline, f)
f.close()
# make sure to get files and model dir
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_hyperband.py | training/train_hyperband.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using hyperband: https://github.com/thuijskens/scikit-hyperband
This is enabled if the default_training_script = ['hyperband']
'''
import os, pickle
from scipy.stats import randint as sp_randint
from sklearn.ensemble import RandomForestClassifier
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def train_hyperband(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# install
curdir=os.getcwd()
os.chdir(prev_dir(os.getcwd())+'/training/helpers/hyperband')
os.system('python3 setup.py install')
from hyperband import HyperbandSearchCV
os.chdir(curdir)
# training and testing sets
files=list()
model_name=common_name_model+'.pickle'
if mtype in ['classification', 'c']:
model = RandomForestClassifier()
param_dist = {
'max_depth': [3, None],
'max_features': sp_randint(1, 11),
'min_samples_split': sp_randint(2, 11),
'min_samples_leaf': sp_randint(1, 11),
'bootstrap': [True, False],
'criterion': ['gini', 'entropy']
}
search = HyperbandSearchCV(model, param_dist,
resource_param='n_estimators',
scoring='roc_auc')
search.fit(X_train, y_train)
params=search.best_params_
print('-----')
print('best params: ')
print(params)
print('------')
accuracy=search.score(X_test, y_test)
# SAVE ML MODEL
modelfile=open(model_name,'wb')
pickle.dump(search, modelfile)
modelfile.close()
elif mtype in ['regression', 'r']:
print('hyperband currently does not support regression modeling.')
model_name=''
model_dir=os.getcwd()
files.append(model_name)
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_btb.py | training/train_btb.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using BTB: https://github.com/HDI-Project/BTB
This is enabled if the default_training_script = ['btb']
'''
import warnings, datetime, uuid, os, json, shutil, pickle
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import cross_val_score
from sklearn.metrics import make_scorer
import pandas as pd
os.system('pip3 install baytune==0.3.7')
# os.system('pip3 install autobazaar==0.2.0')
# os.system('pip3 install gitpython==3.0.2')
# os.system('pip3 install --upgrade GitPython==2.1.15')
# os.system('pip3 install --upgrade gitdb2==2.0.6 gitdb==0.6.4 ')
# make imports
print('installing package configuration')
from btb.session import BTBSession
from btb.tuning import Tunable
from btb.tuning.tuners import GPTuner
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import make_scorer, r2_score
from sklearn.model_selection import cross_val_score
from sklearn.svm import SVC
from btb.selection import UCB1
from btb.tuning.hyperparams import FloatHyperParam, IntHyperParam
'''
Taken from the example here:
https://github.com/HDI-Project/BTB/blob/master/notebooks/BTBSession%20-%20Example.ipynb
Note that autobazaar is used as the primary model trainer for BTB sessions.
https://github.com/HDI-Project/AutoBazaar
Tutorial:
https://hdi-project.github.io/AutoBazaar/readme.html#install
Data: Must be formatted (https://github.com/mitll/d3m-schema/blob/master/documentation/datasetSchema.md)
Case 1: Single table
In many openml and other tabular cases, all the learning data is contained in a single tabular file. In this case, an example dataset will look like the following.
<dataset_id>/
|-- tables/
|-- learningData.csv
d3mIndex,sepalLength,sepalWidth,petalLength,petalWidth,species
0,5.2,3.5,1.4,0.2,I.setosa
1,4.9,3.0,1.4,0.2,I.setosa
2,4.7,3.2,1.3,0.2,I.setosa
3,4.6,3.1,1.5,0.2,I.setosa
4,5.0,3.6,1.4,0.3,I.setosa
5,5.4,3.5,1.7,0.4,I.setosa
...
'''
def create_json(foldername, trainingcsv):
# create the template .JSON file necessary for the featurization
dataset_name=foldername
dataset_id=str(uuid.uuid4())
columns=list()
colnames=list(pd.read_csv(trainingcsv))
for i in range(len(colnames)):
if colnames[i] != 'class_':
columns.append({"colIndex": i,
"colName": colnames[i],
"colType": "real",
"role": ["attribute"]})
else:
columns.append({"colIndex": i,
"colName": 'class_',
"colType": "real",
"role": ["suggestedTarget"]})
data={"about":
{
"datasetID": dataset_id,
"datasetName":dataset_name,
"humanSubjectsResearch": False,
"license":"CC",
"datasetSchemaVersion":"3.0",
"redacted":False
},
"dataResources":
[
{
"resID": "0",
"resPath": os.getcwd()+'/'+trainingcsv,
"resType": "table",
"resFormat": ["text/csv"],
"isCollection": False,
"columns":columns,
}
]
}
filename='datasetDoc.json'
jsonfile=open(filename,'w')
json.dump(data,jsonfile)
jsonfile.close()
return dataset_id, filename
def train_btb(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
model_name=common_name_model+'.pickle'
folder='btb_session'
csvname=common_name_model.split('_')[0]
curdir=os.getcwd()
files=list()
# make a temporary folder for the training session
try:
os.mkdir(folder)
os.chdir(folder)
except:
shutil.rmtree(folder)
os.mkdir(folder)
os.chdir(folder)
# get training and testing data
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# create required .JSON
dataset_id, filename=create_json(folder, 'train.csv')
os.mkdir(dataset_id)
os.chdir(dataset_id)
os.mkdir('tables')
shutil.copy(curdir+'/'+folder+'/train.csv', os.getcwd()+'/tables/train.csv')
if mtype=='c':
def build_model(name, hyperparameters):
model_class = models[name]
return model_class(random_state=0, **hyperparameters)
def score_model(name, hyperparameters):
model = build_model(name, hyperparameters)
scores = cross_val_score(model, X_train, y_train)
return scores.mean()
rf_hyperparams = {'n_estimators': IntHyperParam(min=10, max=500),
'max_depth': IntHyperParam(min=10, max=500)}
rf_tunable = Tunable(rf_hyperparams)
print(rf_tunable)
svc_hyperparams = {'C': FloatHyperParam(min=0.01, max=10.0),
'gamma': FloatHyperParam(0.000000001, 0.0000001)}
svc_tunable = Tunable(svc_hyperparams)
print(svc_tunable)
tuners = {'RF': rf_tunable,
'SVC': svc_tunable}
print(tuners)
models = {'RF': RandomForestClassifier,
'SVC': SVC}
selector = UCB1(['RF', 'SVC'])
session = BTBSession(tuners, score_model, verbose=True)
best_proposal = session.run(iterations=100)
best_model = build_model(best_proposal['name'], best_proposal['config'])
best_model.fit(X_train, y_train)
accuracy = best_model.score(X_test, y_test)
# tuner.record(parameters, score)
print('ACCURACY:')
print(accuracy)
# now save the model in .pickle
os.chdir(curdir)
f=open(model_name,'wb')
pickle.dump(best_model, f)
f.close()
elif mtype == 'r':
tunables = {
'random_forest': {
'n_estimators': {'type': 'int', 'default': 2, 'range': [1, 1000]},
'max_features': {'type': 'str', 'default': 'log2', 'range': [None, 'auto', 'log2', 'sqrt']},
'min_samples_split': {'type': 'int', 'default': 2, 'range': [2, 20]},
'min_samples_leaf': {'type': 'int', 'default': 2, 'range': [1, 20]},
},
'extra_trees': {
'n_estimators': {'type': 'int', 'default': 2, 'range': [1, 1000]},
'max_features': {'type': 'str', 'default': 'log2', 'range': [None, 'auto', 'log2', 'sqrt']},
'min_samples_split': {'type': 'int', 'default': 2, 'range': [2, 20]},
'min_samples_leaf': {'type': 'int', 'default': 2, 'range': [1, 20]},
}
}
models = {
'random_forest': RandomForestRegressor,
'extra_trees': ExtraTreesRegressor,
}
def build_model(name, hyperparameters):
model_class = models[name]
return model_class(random_state=0, **hyperparameters)
def score_model(name, hyperparameters):
model = build_model(name, hyperparameters)
r2_scorer = make_scorer(r2_score)
scores = cross_val_score(model, X_train, y_train, scoring=r2_scorer)
return scores.mean()
session = BTBSession(tunables, score_model, verbose=True)
best_proposal = session.run(iterations=100)
best_model = build_model(best_proposal['name'], best_proposal['config'])
best_model.fit(X_train, y_train)
pred = best_model.predict(X_test)
r2_score=r2_score(y_test, pred)
print('R2 score!!')
print(r2_score)
# now save the model in .pickle
os.chdir(curdir)
f=open(model_name,'wb')
pickle.dump(best_model, f)
f.close()
files.append(model_name)
files.append(folder)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_ludwig.py | training/train_ludwig.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using Ludwig: https://github.com/uber/ludwig
This is enabled if the default_training_script = ['ludwig']
'''
import os, csv, json, random, sys, yaml, time, shutil
os.system('pip3 install tensorflow==1.15.2')
os.system('pip3 install ludwig==0.2.2.6')
from ludwig.api import LudwigModel
import pandas as pd
import numpy as np
def make_yaml(feature_labels, epochs):
# make the yaml file
# assume inputs in first column and outputs in second column
print('making yaml file --> model_definition.yaml')
# assume everything that is not labels heading as a feature
inputs='input_features:\n'
for i in range(len(feature_labels)):
if feature_labels[i] != 'class_':
inputs=inputs+' -\n name: %s\n type: %s\n'%(feature_labels[i], 'numerical')
# assume everything in labels heading as a label
outputs='output_features:\n -\n name: %s\n type: %s\n'%('class_', 'category')
text=inputs+'\n'+outputs
g=open('model_definition.yaml','w')
g.write(text)
g.close()
return 'model_definition.yaml'
def train_ludwig(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# now make a model_definition.yaml
model_name=common_name_model
files=list()
epochs=10
feature_inputs=list()
# get some random naming data
curdir=os.getcwd()
csvname=common_name_model.split('_')[0]
# get training and testing data
try:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train_transformed.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test_transformed.csv',os.getcwd()+'/test.csv')
except:
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_train.csv',os.getcwd()+'/train.csv')
shutil.copy(curdir+'/'+model_session+'/data/'+csvname+'_test.csv',os.getcwd()+'/test.csv')
# now read file to get features
data=pd.read_csv('train.csv')
feature_labels=list(data)
model_definition = make_yaml(feature_labels, epochs)
print(os.getcwd())
os.system('ludwig experiment --data_csv %s --model_definition_file model_definition.yaml --output_directory %s'%('train.csv', 'ludwig_files'))
os.rename('model_definition.yaml', common_name_model+'.yaml')
# add a bunch of files
files.append('train.csv')
files.append('test.csv')
files.append('train.json')
files.append('train.hdf5')
files.append(common_name_model+'.yaml')
files.append('ludwig_files')
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/upgrade.py | training/upgrade.py | import os
# make sure the right version of scikit-learn is used here
os.system('pip3 install scikit-learn==0.22.2.post1') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_cvopt.py | training/train_cvopt.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using CVOpt: https://github.com/genfifth/cvopt
This is enabled if the default_training_script = ['cvopt']
'''
import os, sys, shutil, pickle
# downloading library
os.system('pip3 install cvopt==0.4.3')
# os.system('pip3 install scikit-learn==0.22')
os.system('pip3 install bokeh==1.4.0')
os.system('pip3 install pandas==1.0.3')
os.system('pip3 install numpy==1.17')
import numpy as np
import pandas as pd
import scipy as sp
from sklearn import datasets
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.linear_model import LogisticRegression
from cvopt.model_selection import SimpleoptCV
from cvopt.search_setting import search_category, search_numeric
from sklearn.externals import joblib
def train_cvopt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create model name
model_name=common_name_model+'.pickle'
files=list()
param_distributions = {
"penalty": search_category(['none', 'l2']),
"C": search_numeric(0.01, 3.0, "float"),
"tol" : search_numeric(0.0001, 0.001, "float"),
"class_weight" : search_category([None, "balanced", {0:0.5, 1:0.1}]),
}
# delete search_usage directory
if 'search_usage' in os.listdir():
shutil.rmtree('search_usage')
for bk in ["hyperopt", "gaopt", "bayesopt", "randomopt"]:
estimator = LogisticRegression()
cv = StratifiedKFold(n_splits=3, shuffle=True, random_state=0)
opt = SimpleoptCV(estimator, param_distributions,
scoring="roc_auc", # Objective of search
cv=cv, # Cross validation setting
max_iter=32, # Number of search
n_jobs=3, # Number of jobs to run in parallel.
verbose=2, # 0: don't display status, 1:display status by stdout, 2:display status by graph
logdir="./search_usage", # If this path is specified, save the log.
model_id=bk, # used estimator's dir and file name in save.
save_estimator=2, # estimator save setting.
backend=bk) # hyperopt,bayesopt, gaopt or randomopt.
opt.fit(X_train, y_train, validation_data=(X_test, y_test))
from cvopt.utils import extract_params
target_index = pd.DataFrame(opt.cv_results_)[pd.DataFrame(opt.cv_results_)["mean_test_score"] == opt.best_score_]["index"].values[0]
estimator_params, feature_params, feature_select_flag = extract_params(logdir="./search_usage",
model_id=bk,
target_index=target_index)
estimator.set_params(**estimator_params) # Set estimator parameters
print(estimator)
estimator.fit(X_train, y_train)
X_train_selected = X_train[:, feature_select_flag] # Extract selected feature columns
print("Train features shape:", X_train.shape)
print("Train selected features shape:",X_train_selected.shape)
picklefile=open(bk+'.pickle','wb')
pickle.dump(estimator,picklefile)
picklefile.close()
# from cvopt.utils import mk_metafeature
# X_train_meta, X_test_meta = mk_metafeature(X_train, y_train,
# logdir="./search_usage",
# model_id=bk,
# target_index=target_index,
# cv=cv,
# validation_data=(X_test, y_test),
# estimator_method="predict_proba")
# print("Train features shape:", X_train.shape)
# print("Train meta features shape:", X_train_meta.shape)
# print("Test features shape:", X_test.shape)
# print("Test meta features shape:", X_test_meta.shape)
# now export the best model in terms of accuracy
curdir=os.getcwd()
os.chdir('search_usage')
os.chdir('cv_results')
# load csv docs
bayesopt=pd.read_csv('bayesopt.csv')
gaopt=pd.read_csv('gaopt.csv')
hyperopt=pd.read_csv('hyperopt.csv')
randomopt=pd.read_csv('randomopt.csv')
# get max values per types of array
bayesopt_=np.amax(np.array(bayesopt['mean_test_score']))
gaopt_=np.amax(np.array(gaopt['mean_test_score']))
hyperopt_=np.amax(np.array(hyperopt['mean_test_score']))
randomopt_=np.amax(np.array(randomopt['mean_test_score']))
# get total groups
total=np.array([bayesopt_, gaopt_, hyperopt_, randomopt_])
totalmax=np.amax(total)
os.chdir(curdir)
if bayesopt_ == totalmax:
os.rename('bayesopt.pickle',model_name)
os.remove('randomopt.pickle')
os.remove('hyperopt.pickle')
os.remove('gaopt.pickle')
elif gaopt_ == totalmax:
os.rename('gaopt.pickle',model_name)
os.remove('randomopt.pickle')
os.remove('hyperopt.pickle')
os.remove('bayesopt.pickle')
elif hyperopt_ == totalmax:
os.rename('hyperopt.pickle',model_name)
os.remove('randomopt.pickle')
os.remove('gaopt.pickle')
os.remove('bayesopt.pickle')
elif randomopt_ == totalmax:
os.rename('randomopt.pickle',model_name)
os.remove('hyperopt.pickle')
os.remove('gaopt.pickle')
os.remove('bayesopt.pickle')
# now add all the relevant files to copy over
files.append(model_name)
files.append('search_usage')
files.append('model.html')
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_devol.py | training/train_devol.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using devol: https://github.com/joeddav/devol
This is enabled if the default_training_script = ['devol']
'''
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from helpers.devol.devol import DEvol, GenomeHandler
from sklearn.model_selection import train_test_split
import time, os, shutil, json
def train_devol(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
print('training DEVOL CNN network (may take up to 1 day)')
# reshape the data (to accomodate library needs)
x_train=X_train.reshape(X_train.shape+ (1,)+ (1,))
x_test=X_test.reshape(X_test.shape+ (1,)+ (1,))
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
dataset = ((x_train, y_train), (x_test, y_test))
print(x_train.shape)
print(x_train[0].shape)
print(x_test.shape)
print(x_test[0])
'''
The GenomeHandler class handles the constraints that are imposed upon models in a particular genetic program.
In this example, a genome is allowed up to 6 convolutional layeres, 3 dense layers, 256 feature maps in each convolution, and
1024 nodes in each dense layer. It also specifies three possible activation functions. See genome-handler.py for more information.
'''
# prepare genome configuratino
genome_handler = GenomeHandler(max_conv_layers=6,
max_dense_layers=2, # includes final dense layer
max_filters=256,
max_dense_nodes=1024,
input_shape=x_train[0].shape,
n_classes=len(classes))
'''
The next, and final, step is create a DEvol and run it. Here we specify a few settings pertaining to the genetic program.
In this example, we have 10 generations of evolution, 20 members in each population, and 3 epochs of training used to evaluate
each model's fitness. The program will save each genome's encoding, as well as the model's loss and accuracy, in a .csv file printed at the beginning of program.
'''
devol = DEvol(genome_handler)
model = devol.run(dataset=dataset,
num_generations=1,
pop_size=10,
epochs=10)
model.summary()
summary = str(model.to_json())
# get model name
files=list()
model_name=common_name_model+".h5"
model.save(model_name)
print("\n Saved %s.json model to disk"%(model_name))
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_autokeras.py | training/train_autokeras.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autokeras: https://github.com/keras-team/autokeras
This is enabled if the default_training_script = ['autokeras']
'''
import os, pickle, json, shutil
import autokeras as ak
import tensorflow as tf
import numpy as np
'''
# plot model
https://autokeras.com/tutorial/structured_data_regression/
'''
def train_autokeras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# create file names
files=list()
model_name=common_name_model
# remove folder if it exists
if mtype=='c':
if 'structured_data_classifier' in os.listdir():
shutil.rmtree('structured_data_classifier')
model = ak.StructuredDataClassifier(max_trials=100)
model.fit(X_train, y_train)
files.append('structured_data_classifier')
elif mtype == 'r':
if 'structured_data_regressor' in os.listdir():
shutil.rmtree('structured_data_regressor')
model = ak.StructuredDataRegressor(max_trials=100)
model.fit(X_train, y_train)
files.append('structured_data_regressor')
# show predictions
predictions=model.predict(X_test).flatten()
print(predictions)
model = model.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
model.save(model_name+".h5")
model_name=model_name+".h5"
# get variables
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_autogbt.py | training/train_autogbt.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autogbt: https://github.com/pfnet-research/autogbt-alt
This is enabled if the default_training_script = ['autogbt']
'''
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import random, pickle, time, json, os, shutil
import numpy as np
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.model_selection import train_test_split
try:
from autogbt import AutoGBTClassifier
except:
print('initializing installation...')
os.system('pip3 install git+https://github.com/pfnet-research/autogbt-alt.git')
from autogbt import AutoGBTClassifier
def train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# make initial names and lists
files=list()
model_name=common_name_model+'.pickle'
# train classifier
model = AutoGBTClassifier()
model.fit(X_train, y_train)
print('saving model...')
pmodel=open(model_name,'wb')
g=pickle.dump(model, pmodel)
pmodel.close()
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/train_keras.py | training/train_keras.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using keras simple MLP deep learning model: https://keras.io/getting-started/faq/
This is enabled if the default_training_script = ['keras']
'''
import os, sys, pickle, json, random, shutil, time, datetime, math
import numpy as np
import matplotlib.pyplot as plt
import keras.models
from keras import layers
from keras.models import Sequential,model_from_json
from keras.layers import Dense, Dropout
from sklearn.model_selection import train_test_split
def train_keras(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_features,transform_model,settings,model_session):
files=list()
model_name=common_name_model+".h5"
# MAKE MODEL (assume classification problem)
############################################################################
model = Sequential()
model.add(Dense(64, input_dim=len(X_train[0]), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
model.fit(X_train, y_train,
epochs=100,
batch_size=128)
############################################################################
# serialize model to JSON
# model_json = model.to_json()
# with open(modelname+".json", "w") as json_file:
# json_file.write(model_json)
# # serialize weights to HDF5
# model.save_weights(modelname+".h5")
# print("\n Saved %s.json model to disk"%(modelname))
# re-compile model
# not to save optimizer variables in model data
model.compile(
loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
)
model.save(model_name)
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/setup.py | training/helpers/hyperopt-sklearn/setup.py | import logging
import os
try:
import setuptools
from setuptools import find_packages, setup
except ImportError:
raise ImportError(
"'setuptools' is required but not installed. To install it, "
"follow the instructions at "
"https://pip.pypa.io/en/stable/installing/#installing-with-get-pip-py")
setup(
name = "hpsklearn",
version = '0.0.3',
packages = find_packages(),
scripts = [],
url = 'http://hyperopt.github.com/hyperopt-sklearn/',
download_url = 'https://github.com/hyperopt/hyperopt-sklearn/archive/0.0.3.tar.gz',
author = 'James Bergstra',
author_email = 'anon@anon.com',
description = 'Hyperparameter Optimization for sklearn',
long_description = open('README.md').read(),
keywords = ['hyperopt', 'hyperparameter', 'sklearn'],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
platforms = ['Linux', 'OS-X', 'Windows'],
license = 'BSD',
install_requires = [
'hyperopt',
'nose',
'numpy',
'scikit-learn',
'scipy',
],
extras_require = {
'xgboost': ['xgboost==0.6a2']
}
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/estimator.py | training/helpers/hyperopt-sklearn/hpsklearn/estimator.py | """
"""
import pickle
import copy
from functools import partial
from multiprocessing import Process, Pipe
import time
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score, r2_score
from sklearn.decomposition import PCA
try:
from sklearn.model_selection import KFold, StratifiedKFold, LeaveOneOut, \
ShuffleSplit, StratifiedShuffleSplit, \
PredefinedSplit
except ImportError:
# sklearn.cross_validation is deprecated in version 0.18 of sklearn
from sklearn.cross_validation import KFold, StratifiedKFold, LeaveOneOut, \
ShuffleSplit, StratifiedShuffleSplit, \
PredefinedSplit
# For backwards compatibility with older versions of hyperopt.fmin
import inspect
import numpy as np
import warnings
import hyperopt
import scipy.sparse
from . import components
# Constants for partial_fit
# The partial_fit method will not be run if there is less than
# timeout * timeout_buffer number of seconds left before timeout
timeout_buffer = 0.05
# The minimum number of iterations of the partial_fit method that must be run
# before early stopping can kick in is min_n_iters
min_n_iters = 7
# After best_loss_cutoff_n_iters iterations have occured, the training can be
# stopped early if the validation scores are far from the best scores
best_loss_cutoff_n_iters = 35
# Early stopping can occur when the best validation score of the earlier runs is
# greater than that of the later runs, tipping_pt_ratio determines the split
tipping_pt_ratio = 0.6
# Retraining will be done with all training data for retrain_fraction
# multiplied by the number of iterations used to train the original learner
retrain_fraction = 1.2
class NonFiniteFeature(Exception):
"""
"""
def transform_combine_XEX(Xfit, info, en_pps=[], Xval=None,
EXfit_list=None, ex_pps_list=[], EXval_list=None):
'''Transform endogenous and exogenous datasets and combine them into a
single dataset for training and testing.
'''
def run_preprocs(preprocessings, Xfit, Xval=None):
'''Run all preprocessing steps in a pipeline
'''
for pp_algo in preprocessings:
info('Fitting', pp_algo, 'to X of shape', Xfit.shape)
if isinstance(pp_algo, PCA):
n_components = pp_algo.get_params()['n_components']
n_components = min([n_components] + list(Xfit.shape))
pp_algo.set_params(n_components=n_components)
info('Limited PCA n_components at', n_components)
pp_algo.fit(Xfit)
info('Transforming Xfit', Xfit.shape)
Xfit = pp_algo.transform(Xfit)
# np.isfinite() does not work on sparse matrices
if not (scipy.sparse.issparse(Xfit) or \
np.all(np.isfinite(Xfit))):
# -- jump to NonFiniteFeature handler below
raise NonFiniteFeature(pp_algo)
if Xval is not None:
info('Transforming Xval', Xval.shape)
Xval = pp_algo.transform(Xval)
if not (scipy.sparse.issparse(Xval) or \
np.all(np.isfinite(Xval))):
# -- jump to NonFiniteFeature handler below
raise NonFiniteFeature(pp_algo)
return (Xfit, Xval)
# import ipdb; ipdb.set_trace()
transformed_XEX_list = []
en_pps = list(en_pps)
ex_pps_list = list(ex_pps_list)
if ex_pps_list == [] and EXfit_list is not None:
ex_pps_list = [[]] * len(EXfit_list)
xex_pps_list = [en_pps] + ex_pps_list
if EXfit_list is None:
EXfit_list = []
assert EXval_list is None
EXval_list = []
elif EXval_list is None:
EXval_list = [None] * len(EXfit_list)
EXfit_list = list(EXfit_list)
EXval_list = list(EXval_list)
XEXfit_list = [Xfit] + EXfit_list
XEXval_list = [Xval] + EXval_list
for pps, dfit, dval in zip(xex_pps_list, XEXfit_list, XEXval_list):
if pps != []:
dfit, dval = run_preprocs(pps, dfit, dval)
if dval is not None:
transformed_XEX_list.append( (dfit, dval) )
else:
transformed_XEX_list.append(dfit)
def safe_concatenate(XS):
if not any(scipy.sparse.issparse(x) for x in XS):
return np.concatenate(XS, axis=1)
XS = [ x if scipy.sparse.issparse(x) else scipy.sparse.csr_matrix(x)
for x in XS ]
return scipy.sparse.hstack(XS)
if Xval is None:
XEXfit = safe_concatenate(transformed_XEX_list)
return XEXfit
else:
XEXfit_list, XEXval_list = zip(*transformed_XEX_list)
XEXfit = safe_concatenate(XEXfit_list)
XEXval = safe_concatenate(XEXval_list)
return (XEXfit, XEXval)
def pfit_until_convergence(learner, is_classif, XEXfit, yfit, info,
max_iters=None, best_loss=None,
XEXval=None, yval=None,
timeout=None, t_start=None):
'''Do partial fitting until the convergence criterion is met
'''
if max_iters is None:
assert XEXval is not None and yval is not None and\
best_loss is not None
if timeout is not None:
assert t_start is not None
def should_stop(scores):
# TODO: possibly extend min_n_iters based on how close the current
# score is to the best score, up to some larger threshold
if len(scores) < min_n_iters:
return False
tipping_pt = int(tipping_pt_ratio * len(scores))
early_scores = scores[:tipping_pt]
late_scores = scores[tipping_pt:]
if max(early_scores) >= max(late_scores):
info("stopping early due to no improvement in late scores")
return True
# TODO: make this less confusing and possibly more accurate
if len(scores) > best_loss_cutoff_n_iters and \
max(scores) < 1 - best_loss and \
3 * ( max(late_scores) - max(early_scores) ) < \
1 - best_loss - max(late_scores):
info("stopping early due to best_loss cutoff criterion")
return True
return False
n_iters = 0 # Keep track of the number of training iterations
best_learner = None
if timeout is not None:
timeout_tolerance = timeout * timeout_buffer
else:
timeout = float('Inf')
timeout_tolerance = 0.
t_start = float('Inf')
rng = np.random.RandomState(6665)
train_idxs = rng.permutation(XEXfit.shape[0])
validation_scores = []
def convergence_met():
if max_iters is not None and n_iters >= max_iters:
return True
if time.time() - t_start >= timeout - timeout_tolerance:
return True
if yval is not None:
return should_stop(validation_scores)
else:
return False
while not convergence_met():
n_iters += 1
rng.shuffle(train_idxs)
if is_classif:
learner.partial_fit(XEXfit[train_idxs], yfit[train_idxs],
classes=np.unique(yfit))
else:
learner.partial_fit(XEXfit[train_idxs], yfit[train_idxs])
if XEXval is not None:
validation_scores.append(learner.score(XEXval, yval))
if max(validation_scores) == validation_scores[-1]:
best_learner = copy.deepcopy(learner)
info('VSCORE', validation_scores[-1])
if XEXval is None:
return (learner, n_iters)
else:
return (best_learner, n_iters)
def _cost_fn(argd, X, y, EX_list, valid_size, n_folds, shuffle, random_state,
use_partial_fit, info, timeout, _conn, loss_fn=None,
continuous_loss_fn=False, best_loss=None):
'''Calculate the loss function
'''
try:
t_start = time.time()
# Extract info from calling function.
if 'classifier' in argd:
classifier = argd['classifier']
regressor = argd['regressor']
preprocessings = argd['preprocessing']
ex_pps_list = argd['ex_preprocs']
else:
classifier = argd['model']['classifier']
regressor = argd['model']['regressor']
preprocessings = argd['model']['preprocessing']
ex_pps_list = argd['model']['ex_preprocs']
learner = classifier if classifier is not None else regressor
is_classif = classifier is not None
untrained_learner = copy.deepcopy(learner)
# -- N.B. modify argd['preprocessing'] in-place
# Determine cross-validation iterator.
if n_folds is not None:
if n_folds == -1:
info('Will use leave-one-out CV')
try:
cv_iter = LeaveOneOut().split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = LeaveOneOut(len(y))
elif is_classif:
info('Will use stratified K-fold CV with K:', n_folds,
'and Shuffle:', shuffle)
try:
cv_iter = StratifiedKFold(n_splits=n_folds,
shuffle=shuffle,
random_state=random_state
).split(X, y)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = StratifiedKFold(y, n_folds=n_folds,
shuffle=shuffle,
random_state=random_state)
else:
info('Will use K-fold CV with K:', n_folds,
'and Shuffle:', shuffle)
try:
cv_iter = KFold(n_splits=n_folds,
shuffle=shuffle,
random_state=random_state).split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = KFold(len(y), n_folds=n_folds,
shuffle=shuffle,
random_state=random_state)
else:
if not shuffle: # always choose the last samples.
info('Will use the last', valid_size,
'portion of samples for validation')
n_train = int(len(y) * (1 - valid_size))
valid_fold = np.ones(len(y), dtype=np.int)
valid_fold[:n_train] = -1 # "-1" indicates train fold.
try:
cv_iter = PredefinedSplit(valid_fold).split()
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = PredefinedSplit(valid_fold)
elif is_classif:
info('Will use stratified shuffle-and-split with validation \
portion:', valid_size)
try:
cv_iter = StratifiedShuffleSplit(1, test_size=valid_size,
random_state=random_state
).split(X, y)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = StratifiedShuffleSplit(y, 1, test_size=valid_size,
random_state=random_state)
else:
info('Will use shuffle-and-split with validation portion:',
valid_size)
try:
cv_iter = ShuffleSplit(n_splits=1, test_size=valid_size,
random_state=random_state).split(X)
except TypeError:
# Older syntax before sklearn version 0.18
cv_iter = ShuffleSplit(len(y), 1, test_size=valid_size,
random_state=random_state)
# Use the above iterator for cross-validation prediction.
cv_y_pool = np.array([])
cv_pred_pool = np.array([])
cv_n_iters = np.array([])
for train_index, valid_index in cv_iter:
Xfit, Xval = X[train_index], X[valid_index]
yfit, yval = y[train_index], y[valid_index]
if EX_list is not None:
_EX_list = [ (EX[train_index], EX[valid_index])
for EX in EX_list ]
EXfit_list, EXval_list = zip(*_EX_list)
else:
EXfit_list = None
EXval_list = None
XEXfit, XEXval = transform_combine_XEX(
Xfit, info, preprocessings, Xval,
EXfit_list, ex_pps_list, EXval_list
)
learner = copy.deepcopy(untrained_learner)
info('Training learner', learner, 'on X/EX of dimension',
XEXfit.shape)
if hasattr(learner, "partial_fit") and use_partial_fit:
learner, n_iters = pfit_until_convergence(
learner, is_classif, XEXfit, yfit, info,
best_loss=best_loss, XEXval=XEXval, yval=yval,
timeout=timeout, t_start=t_start
)
else:
learner.fit(XEXfit, yfit)
n_iters = None
if learner is None:
break
cv_y_pool = np.append(cv_y_pool, yval)
info('Scoring on X/EX validation of shape', XEXval.shape)
if continuous_loss_fn:
cv_pred_pool = np.append(cv_pred_pool, learner.predict_proba(XEXval))
else:
cv_pred_pool = np.append(cv_pred_pool, learner.predict(XEXval))
cv_n_iters = np.append(cv_n_iters, n_iters)
else: # all CV folds are exhausted.
if loss_fn is None:
if is_classif:
loss = 1 - accuracy_score(cv_y_pool, cv_pred_pool)
# -- squared standard error of mean
lossvar = (loss * (1 - loss)) / max(1, len(cv_y_pool) - 1)
info('OK trial with accuracy %.1f +- %.1f' % (
100 * (1 - loss),
100 * np.sqrt(lossvar))
)
else:
loss = 1 - r2_score(cv_y_pool, cv_pred_pool)
lossvar = None # variance of R2 is undefined.
info('OK trial with R2 score %.2e' % (1 - loss))
else:
# Use a user specified loss function
loss = loss_fn(cv_y_pool, cv_pred_pool)
lossvar = None
info('OK trial with loss %.1f' % loss)
t_done = time.time()
rval = {
'loss': loss,
'loss_variance': lossvar,
'learner': untrained_learner,
'preprocs': preprocessings,
'ex_preprocs': ex_pps_list,
'status': hyperopt.STATUS_OK,
'duration': t_done - t_start,
'iterations': (cv_n_iters.max()
if (hasattr(learner, "partial_fit") and use_partial_fit)
else None),
}
rtype = 'return'
# The for loop exit with break, one fold did not finish running.
if learner is None:
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': 'Not enough time to finish training on \
all CV folds',
'duration': t_done - t_start,
}
rtype = 'return'
##==== Cost function exception handling ====##
except (NonFiniteFeature,) as exc:
print('Failing trial due to NaN in', str(exc))
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
except (ValueError,) as exc:
if ('k must be less than or equal'
' to the number of training points') in str(exc):
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except (AttributeError,) as exc:
print('Failing due to k_means_ weirdness')
if "'NoneType' object has no attribute 'copy'" in str(exc):
# -- sklearn/cluster/k_means_.py line 270 raises this sometimes
t_done = time.time()
rval = {
'status': hyperopt.STATUS_FAIL,
'failure': str(exc),
'duration': t_done - t_start,
}
rtype = 'return'
else:
rval = exc
rtype = 'raise'
except Exception as exc:
rval = exc
rtype = 'raise'
# -- return the result to calling process
_conn.send((rtype, rval))
class hyperopt_estimator(BaseEstimator):
def __init__(self,
preprocessing=None,
ex_preprocs=None,
classifier=None,
regressor=None,
space=None,
algo=None,
max_evals=10,
loss_fn=None,
continuous_loss_fn=False,
verbose=False,
trial_timeout=None,
fit_increment=1,
fit_increment_dump_filename=None,
seed=None,
use_partial_fit=False,
refit=True,
):
"""
Parameters
----------
preprocessing: pyll.Apply node, default is None
This should evaluate to a list of sklearn-style preprocessing
modules (may include hyperparameters). When None, a random
preprocessing module will be used.
ex_preprocs: pyll.Apply node, default is None
This should evaluate to a list of lists of sklearn-style
preprocessing modules for each exogenous dataset. When None, no
preprocessing will be applied to exogenous data.
classifier: pyll.Apply node
This should evaluates to sklearn-style classifier (may include
hyperparameters).
regressor: pyll.Apply node
This should evaluates to sklearn-style regressor (may include
hyperparameters).
algo: hyperopt suggest algo (e.g. rand.suggest)
max_evals: int
Fit() will evaluate up to this-many configurations. Does not apply
to fit_iter, which continues to search indefinitely.
loss_fn: callable
A function that takes the arguments (y_target, y_prediction)
and computes a loss value to be minimized. If no function is
specified, '1.0 - accuracy_score(y_target, y_prediction)' is used
for classification and '1.0 - r2_score(y_target, y_prediction)'
is used for regression
continuous_loss_fn: boolean, default is False
When true, the loss function is passed the output of
predict_proba() as the second argument. This is to facilitate the
use of continuous loss functions like cross entropy or AUC. When
false, the loss function is given the output of predict(). If
true, `classifier` and `loss_fn` must also be specified.
trial_timeout: float (seconds), or None for no timeout
Kill trial evaluations after this many seconds.
fit_increment: int
Every this-many trials will be a synchronization barrier for
ongoing trials, and the hyperopt Trials object may be
check-pointed. (Currently evaluations are done serially, but
that might easily change in future to allow e.g. MongoTrials)
fit_increment_dump_filename : str or None
Periodically dump self.trials to this file (via cPickle) during
fit() Saves after every `fit_increment` trial evaluations.
seed: numpy.random.RandomState or int or None
If int, the integer will be used to seed a RandomState instance
for use in hyperopt.fmin. Use None to make sure each run is
independent. Default is None.
use_partial_fit : boolean
If the learner support partial fit, it can be used for online
learning. However, the whole train set is not split into mini
batches here. The partial fit is used to iteratively update
parameters on the whole train set. Early stopping is used to kill
the training when the validation score stops improving.
refit: boolean, default True
Refit the best model on the whole data set.
"""
self.max_evals = max_evals
self.loss_fn = loss_fn
self.continuous_loss_fn = continuous_loss_fn
self.verbose = verbose
self.trial_timeout = trial_timeout
self.fit_increment = fit_increment
self.fit_increment_dump_filename = fit_increment_dump_filename
self.use_partial_fit = use_partial_fit
self.refit = refit
if space is None:
if classifier is None and regressor is None:
self.classification = True
classifier = components.any_classifier('classifier')
elif classifier is not None:
assert regressor is None
self.classification = True
else:
assert regressor is not None
self.classification = False
# classifier = components.any_classifier('classifier')
if preprocessing is None:
preprocessing = components.any_preprocessing('preprocessing')
else:
# assert isinstance(preprocessing, (list, tuple))
pass
if ex_preprocs is None:
ex_preprocs = []
else:
assert isinstance(ex_preprocs, (list, tuple))
# assert all(
# isinstance(pps, (list, tuple)) for pps in ex_preprocs
# )
self.n_ex_pps = len(ex_preprocs)
self.space = hyperopt.pyll.as_apply({
'classifier': classifier,
'regressor': regressor,
'preprocessing': preprocessing,
'ex_preprocs': ex_preprocs
})
else:
assert classifier is None
assert regressor is None
assert preprocessing is None
assert ex_preprocs is None
# self.space = hyperopt.pyll.as_apply(space)
self.space = space
evaled_space = space.eval()
if 'ex_preprocs' in evaled_space:
self.n_ex_pps = len(evaled_space['ex_preprocs'])
else:
self.n_ex_pps = 0
self.ex_preprocs = []
if algo is None:
self.algo = hyperopt.rand.suggest
else:
self.algo = algo
if seed is not None:
self.rstate = (np.random.RandomState(seed)
if isinstance(seed, int) else seed)
else:
self.rstate = np.random.RandomState()
# Backwards compatibility with older version of hyperopt
self.seed = seed
if 'rstate' not in inspect.getargspec(hyperopt.fmin).args:
print("Warning: Using older version of hyperopt.fmin")
if self.continuous_loss_fn:
assert self.space['classifier'] is not None, \
"Can only use continuous_loss_fn with classifiers."
assert self.loss_fn is not None, \
"Must specify loss_fn if continuous_loss_fn is true."
def info(self, *args):
if self.verbose:
print(' '.join(map(str, args)))
def fit_iter(self, X, y, EX_list=None, valid_size=.2, n_folds=None,
cv_shuffle=False, warm_start=False,
random_state=np.random.RandomState(),
weights=None, increment=None):
"""Generator of Trials after ever-increasing numbers of evaluations
"""
assert weights is None
increment = self.fit_increment if increment is None else increment
# len does not work on sparse matrices, so using shape[0] instead
# shape[0] does not work on lists, so using len() for those
if scipy.sparse.issparse(X):
data_length = X.shape[0]
else:
data_length = len(X)
if type(X) is list:
X = np.array(X)
if type(y) is list:
y = np.array(y)
if not warm_start:
self.trials = hyperopt.Trials()
self._best_loss = float('inf')
else:
assert hasattr(self, 'trials')
# self._best_loss = float('inf')
# This is where the cost function is used.
fn = partial(_cost_fn,
X=X, y=y, EX_list=EX_list,
valid_size=valid_size, n_folds=n_folds,
shuffle=cv_shuffle, random_state=random_state,
use_partial_fit=self.use_partial_fit,
info=self.info,
timeout=self.trial_timeout,
loss_fn=self.loss_fn,
continuous_loss_fn=self.continuous_loss_fn)
# Wrap up the cost function as a process with timeout control.
def fn_with_timeout(*args, **kwargs):
conn1, conn2 = Pipe()
kwargs['_conn'] = conn2
th = Process(target=partial(fn, best_loss=self._best_loss),
args=args, kwargs=kwargs)
th.start()
if conn1.poll(self.trial_timeout):
fn_rval = conn1.recv()
th.join()
else:
self.info('TERMINATING DUE TO TIMEOUT')
th.terminate()
th.join()
fn_rval = 'return', {
'status': hyperopt.STATUS_FAIL,
'failure': 'TimeOut'
}
assert fn_rval[0] in ('raise', 'return')
if fn_rval[0] == 'raise':
raise fn_rval[1]
# -- remove potentially large objects from the rval
# so that the Trials() object below stays small
# We can recompute them if necessary, and it's usually
# not necessary at all.
if fn_rval[1]['status'] == hyperopt.STATUS_OK:
fn_loss = float(fn_rval[1].get('loss'))
fn_preprocs = fn_rval[1].pop('preprocs')
fn_ex_preprocs = fn_rval[1].pop('ex_preprocs')
fn_learner = fn_rval[1].pop('learner')
fn_iters = fn_rval[1].pop('iterations')
if fn_loss < self._best_loss:
self._best_preprocs = fn_preprocs
self._best_ex_preprocs = fn_ex_preprocs
self._best_learner = fn_learner
self._best_loss = fn_loss
self._best_iters = fn_iters
return fn_rval[1]
while True:
new_increment = yield self.trials
if new_increment is not None:
increment = new_increment
#FIXME: temporary workaround for rstate issue #35
# latest hyperopt.fmin() on master does not match PyPI
if 'rstate' in inspect.getargspec(hyperopt.fmin).args:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
rstate=self.rstate,
# -- let exceptions crash the program,
# so we notice them.
catch_eval_exceptions=False,
return_argmin=False, # -- in case no success so far
)
else:
if self.seed is None:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
)
else:
hyperopt.fmin(fn_with_timeout,
space=self.space,
algo=self.algo,
trials=self.trials,
max_evals=len(self.trials.trials) + increment,
rseed=self.seed,
)
def retrain_best_model_on_full_data(self, X, y, EX_list=None,
weights=None):
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
XEX = transform_combine_XEX(
X, self.info, en_pps=self._best_preprocs,
EXfit_list=EX_list, ex_pps_list=self._best_ex_preprocs
)
self.info('Training learner', self._best_learner,
'on X/EX of dimension', XEX.shape)
if hasattr(self._best_learner, 'partial_fit') and \
self.use_partial_fit:
self._best_learner, _ = pfit_until_convergence(
self._best_learner, self.classification, XEX, y, self.info,
max_iters=int(self._best_iters * retrain_fraction)
)
else:
self._best_learner.fit(XEX, y)
def fit(self, X, y, EX_list=None,
valid_size=.2, n_folds=None,
cv_shuffle=False, warm_start=False,
random_state=np.random.RandomState(),
weights=None):
"""
Search the space of learners and preprocessing steps for a good
predictive model of y <- X. Store the best model for predictions.
Args:
EX_list ([list]): List of exogenous datasets. Each must has the
same number of samples as X.
valid_size ([float]): The portion of the dataset used as the
validation set. If cv_shuffle is False,
always use the last samples as validation.
n_folds ([int]): When n_folds is not None, use K-fold cross-
validation when n_folds > 2. Or, use leave-one-out
cross-validation when n_folds = -1.
cv_shuffle ([boolean]): Whether do sample shuffling before
splitting the data into train and valid
sets or not.
warm_start ([boolean]): If warm_start, the estimator will start
from an existing sequence of trials.
random_state: The random state used to seed the cross-validation
shuffling.
Notes:
For classification problems, will always use the stratified version
of the K-fold cross-validation or shuffle-and-split.
"""
if EX_list is not None:
assert isinstance(EX_list, (list, tuple))
assert len(EX_list) == self.n_ex_pps
filename = self.fit_increment_dump_filename
fit_iter = self.fit_iter(X, y, EX_list=EX_list,
valid_size=valid_size,
n_folds=n_folds,
cv_shuffle=cv_shuffle,
warm_start=warm_start,
random_state=random_state,
weights=weights,
increment=self.fit_increment)
next(fit_iter)
adjusted_max_evals = (self.max_evals if not warm_start else
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/vkmeans.py | training/helpers/hyperopt-sklearn/hpsklearn/vkmeans.py | import numpy as np
from sklearn.cluster import KMeans
class ColumnKMeans(object):
def __init__(self,
n_clusters,
init='k-means++',
n_init=10,
max_iter=300,
tol=1e-4,
precompute_distances=True,
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1,
):
self.n_clusters = n_clusters
self.init = init
self.n_init = n_init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.output_dtype = None
def fit(self, X):
rows, cols = X.shape
self.col_models = []
for jj in range(cols):
col_model = KMeans(
n_clusters=self.n_clusters,
init=self.init,
n_init=self.n_init,
max_iter=self.max_iter,
tol=self.tol,
precompute_distances=self.precompute_distances,
verbose=self.verbose,
random_state=self.random_state,
copy_x=self.copy_x,
n_jobs=self.n_jobs,
)
col_model.fit(X[:, jj:jj + 1])
self.col_models.append(col_model)
def transform(self, X):
rows, cols = X.shape
if self.output_dtype is None:
output_dtype = X.dtype # XXX
else:
output_dtype = self.output_dtype
rval = np.empty(
(rows, cols, self.n_clusters),
dtype=output_dtype)
for jj in range(cols):
Xj = X[:, jj:jj + 1]
dists = self.col_models[jj].transform(Xj)
feats = np.exp(-(dists ** 2))
# -- normalize features by row
rval[:, jj, :] = feats / (feats.sum(axis=1)[:, None])
assert np.all(np.isfinite(rval))
return rval.reshape((rows, cols * self.n_clusters))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/lagselectors.py | training/helpers/hyperopt-sklearn/hpsklearn/lagselectors.py | """Lag selectors that subset time series predictors
This module defines lag selectors with specified lag sizes for endogenous and
exogenous predictors, using the same style as the sklearn transformers. They
can be used in hpsklearn as preprocessors. The module is well suited for time
series data.
When use a lag size of a positive integer, it is assumed that lag=1, 2, ...
predictors are located at the 1st, 2nd, ... columns. When use a negative
integer, the predictors are located at the N-th, (N - 1)th, ... columns.
"""
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class LagSelector(BaseEstimator, TransformerMixin):
"""Subset time series features by choosing the most recent lags
Parameters
----------
lag_size : int, None by default
If None, use all features. If positive integer, use features by
subsetting the X as [:, :lag_size]. If negative integer, use features
by subsetting the X as [:, lag_size:]. If 0, discard the features
from this dataset.
Attributes
----------
max_lag_size_ : int
The largest allowed lag size inferred from input.
"""
def __init__(self, lag_size=None):
self.lag_size = lag_size
def _reset(self):
"""Reset internal data-dependent state of the selector, if necessary.
__init__ parameters are not touched.
"""
if hasattr(self, 'max_lag_size_'):
del self.max_lag_size_
def fit(self, X, y=None):
"""Infer the maximum lag size.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The input time series data with lagged predictors as features.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
self.max_lag_size_ = X.shape[1]
def transform(self, X, y=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The input time series data with lagged predictors as features.
"""
proofed_lag_size = min(self.max_lag_size_, abs(self.lag_size))
if self.lag_size >= 0:
return X[:, :proofed_lag_size]
else:
return X[:, -proofed_lag_size:]
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/components.py | training/helpers/hyperopt-sklearn/hpsklearn/components.py | import numpy as np
import sklearn.svm
import sklearn.ensemble
import sklearn.tree
import sklearn.neighbors
import sklearn.decomposition
import sklearn.preprocessing
import sklearn.neural_network
import sklearn.linear_model
import sklearn.discriminant_analysis
import sklearn.feature_extraction.text
import sklearn.naive_bayes
import sklearn.multiclass
from functools import partial
from hyperopt.pyll import scope, as_apply
from hyperopt import hp
from .vkmeans import ColumnKMeans
from . import lagselectors
# Optional dependencies
try:
import xgboost
except ImportError:
xgboost = None
##########################################
##==== Wrappers for sklearn modules ====##
##########################################
@scope.define
def sklearn_SVC(*args, **kwargs):
return sklearn.svm.SVC(*args, **kwargs)
@scope.define
def sklearn_SVR(*args, **kwargs):
return sklearn.svm.SVR(*args, **kwargs)
@scope.define
def ts_LagSelector(*args, **kwargs):
return lagselectors.LagSelector(*args, **kwargs)
@scope.define
def sklearn_LinearSVC(*args, **kwargs):
return sklearn.svm.LinearSVC(*args, **kwargs)
@scope.define
def sklearn_KNeighborsClassifier(*args, **kwargs):
return sklearn.neighbors.KNeighborsClassifier(*args, **kwargs)
@scope.define
def sklearn_KNeighborsRegressor(*args, **kwargs):
return sklearn.neighbors.KNeighborsRegressor(*args, **kwargs)
@scope.define
def sklearn_AdaBoostClassifier(*args, **kwargs):
return sklearn.ensemble.AdaBoostClassifier(*args, **kwargs)
@scope.define
def sklearn_AdaBoostRegressor(*args, **kwargs):
return sklearn.ensemble.AdaBoostRegressor(*args, **kwargs)
@scope.define
def sklearn_GradientBoostingClassifier(*args, **kwargs):
return sklearn.ensemble.GradientBoostingClassifier(*args, **kwargs)
@scope.define
def sklearn_GradientBoostingRegressor(*args, **kwargs):
return sklearn.ensemble.GradientBoostingRegressor(*args, **kwargs)
@scope.define
def sklearn_RandomForestClassifier(*args, **kwargs):
return sklearn.ensemble.RandomForestClassifier(*args, **kwargs)
@scope.define
def sklearn_RandomForestRegressor(*args, **kwargs):
return sklearn.ensemble.RandomForestRegressor(*args, **kwargs)
@scope.define
def sklearn_ExtraTreesClassifier(*args, **kwargs):
return sklearn.ensemble.ExtraTreesClassifier(*args, **kwargs)
@scope.define
def sklearn_ExtraTreesRegressor(*args, **kwargs):
return sklearn.ensemble.ExtraTreesRegressor(*args, **kwargs)
@scope.define
def sklearn_DecisionTreeClassifier(*args, **kwargs):
return sklearn.tree.DecisionTreeClassifier(*args, **kwargs)
@scope.define
def sklearn_Lasso(*args, **kwargs):
return sklearn.linear_model.Lasso(*args, **kwargs)
@scope.define
def sklearn_ElasticNet(*args, **kwargs):
return sklearn.linear_model.ElasticNet(*args, **kwargs)
@scope.define
def sklearn_SGDClassifier(*args, **kwargs):
return sklearn.linear_model.SGDClassifier(*args, **kwargs)
@scope.define
def sklearn_SGDRegressor(*args, **kwargs):
return sklearn.linear_model.SGDRegressor(*args, **kwargs)
@scope.define
def sklearn_XGBClassifier(*args, **kwargs):
if xgboost is None:
raise ImportError('No module named xgboost')
return xgboost.XGBClassifier(*args, **kwargs)
@scope.define
def sklearn_XGBRegressor(*args, **kwargs):
if xgboost is None:
raise ImportError('No module named xgboost')
return xgboost.XGBRegressor(*args, **kwargs)
# @scope.define
# def sklearn_Ridge(*args, **kwargs):
# return sklearn.linear_model.Ridge(*args, **kwargs)
@scope.define
def sklearn_PassiveAggressiveClassifier(*args, **kwargs):
return sklearn.linear_model.PassiveAggressiveClassifier(*args, **kwargs)
@scope.define
def sklearn_LinearDiscriminantAnalysis(*args, **kwargs):
return sklearn.discriminant_analysis.LinearDiscriminantAnalysis(*args, **kwargs)
@scope.define
def sklearn_QuadraticDiscriminantAnalysis(*args, **kwargs):
return sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis(*args, **kwargs)
@scope.define
def sklearn_MultinomialNB(*args, **kwargs):
return sklearn.naive_bayes.MultinomialNB(*args, **kwargs)
@scope.define
def sklearn_GaussianNB(*args, **kwargs):
return sklearn.naive_bayes.GaussianNB(*args, **kwargs)
@scope.define
def sklearn_OneVsRestClassifier(*args, **kwargs):
return sklearn.multiclass.OneVsRestClassifier(*args, **kwargs)
@scope.define
def sklearn_OneVsOneClassifier(*args, **kwargs):
return sklearn.multiclass.OneVsOneClassifier(*args, **kwargs)
@scope.define
def sklearn_OutputCodeClassifier(*args, **kwargs):
return sklearn.multiclass.OutputCodeClassifier(*args, **kwargs)
@scope.define
def sklearn_PCA(*args, **kwargs):
return sklearn.decomposition.PCA(*args, **kwargs)
@scope.define
def sklearn_Tfidf(*args, **kwargs):
return sklearn.feature_extraction.text.TfidfVectorizer(*args, **kwargs)
@scope.define
def sklearn_StandardScaler(*args, **kwargs):
return sklearn.preprocessing.StandardScaler(*args, **kwargs)
@scope.define
def sklearn_MinMaxScaler(*args, **kwargs):
return sklearn.preprocessing.MinMaxScaler(*args, **kwargs)
@scope.define
def sklearn_Normalizer(*args, **kwargs):
return sklearn.preprocessing.Normalizer(*args, **kwargs)
@scope.define
def sklearn_OneHotEncoder(*args, **kwargs):
return sklearn.preprocessing.OneHotEncoder(*args, **kwargs)
@scope.define
def sklearn_BernoulliRBM(*args, **kwargs):
return sklearn.neural_network.BernoulliRBM(*args, **kwargs)
@scope.define
def sklearn_ColumnKMeans(*args, **kwargs):
return ColumnKMeans(*args, **kwargs)
@scope.define
def sklearn_GaussianRandomProjection(*args, **kwargs):
return sklearn.random_projection.GaussianRandomProjection(*args, **kwargs)
@scope.define
def sklearn_SparseRandomProjection(*args, **kwargs):
return sklearn.random_projection.SparseRandomProjection(*args, **kwargs)
@scope.define
def patience_param(x):
"""
Mark a hyperparameter as having a simple monotonic increasing
relationship with both CPU time and the goodness of the model.
"""
# -- TODO: make this do something!
return x
@scope.define
def inv_patience_param(x):
"""
Mark a hyperparameter as having a simple monotonic decreasing
relationship with both CPU time and the goodness of the model.
"""
# -- TODO: make this do something!
return x
##############################
##==== Global variables ====##
##############################
_svm_default_cache_size = 512
###############################################
##==== Various hyperparameter generators ====##
###############################################
def hp_bool(name):
return hp.choice(name, [False, True])
def _svm_gamma(name, n_features=1):
'''Generator of default gamma values for SVMs.
This setting is based on the following rationales:
1. The gamma hyperparameter is basically an amplifier for the
original dot product or l2 norm.
2. The original dot product or l2 norm shall be normalized by
the number of features first.
'''
# -- making these non-conditional variables
# probably helps the GP algorithm generalize
# assert n_features >= 1
return hp.loguniform(name,
np.log(1. / n_features * 1e-3),
np.log(1. / n_features * 1e3))
def _svm_degree(name):
return hp.quniform(name, 1.5, 6.5, 1)
def _svm_max_iter(name):
return hp.qloguniform(name, np.log(1e7), np.log(1e9), 1)
def _svm_C(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e5))
def _svm_tol(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e-2))
def _svm_int_scaling(name):
return hp.loguniform(name, np.log(1e-1), np.log(1e1))
def _svm_epsilon(name):
return hp.loguniform(name, np.log(1e-3), np.log(1e3))
def _svm_loss_penalty_dual(name):
"""
The combination of penalty='l1' and loss='hinge' is not supported
penalty='l2' and loss='hinge' is only supported when dual='true'
penalty='l1' is only supported when dual='false'.
"""
return hp.choice(name, [
('hinge', 'l2', True),
('squared_hinge', 'l2', True),
('squared_hinge', 'l1', False),
('squared_hinge', 'l2', False)
])
def _knn_metric_p(name, sparse_data=False, metric=None, p=None):
if sparse_data:
return ('euclidean', 2)
elif metric == 'euclidean':
return (metric, 2)
elif metric == 'manhattan':
return (metric, 1)
elif metric == 'chebyshev':
return (metric, 0)
elif metric == 'minkowski':
assert p is not None
return (metric, p)
elif metric is None:
return hp.pchoice(name, [
(0.55, ('euclidean', 2)),
(0.15, ('manhattan', 1)),
(0.15, ('chebyshev', 0)),
(0.15, ('minkowski', _knn_p(name + '.p'))),
])
else:
return (metric, p) # undefined, simply return user input.
def _knn_p(name):
return hp.quniform(name, 2.5, 5.5, 1)
def _knn_neighbors(name):
return scope.int(hp.qloguniform(name, np.log(0.5), np.log(50.5), 1))
def _knn_weights(name):
return hp.choice(name, ['uniform', 'distance'])
def _trees_n_estimators(name):
return scope.int(hp.qloguniform(name, np.log(9.5), np.log(3000.5), 1))
def _trees_criterion(name):
return hp.choice(name, ['gini', 'entropy'])
def _trees_max_features(name):
return hp.pchoice(name, [
(0.2, 'sqrt'), # most common choice.
(0.1, 'log2'), # less common choice.
(0.1, None), # all features, less common choice.
(0.6, hp.uniform(name + '.frac', 0., 1.))
])
def _trees_max_depth(name):
return hp.pchoice(name, [
(0.7, None), # most common choice.
# Try some shallow trees.
(0.1, 2),
(0.1, 3),
(0.1, 4),
])
def _trees_min_samples_split(name):
return 2
def _trees_min_samples_leaf(name):
return hp.choice(name, [
1, # most common choice.
scope.int(hp.qloguniform(name + '.gt1', np.log(1.5), np.log(50.5), 1))
])
def _trees_bootstrap(name):
return hp.choice(name, [True, False])
def _boosting_n_estimators(name):
return scope.int(hp.qloguniform(name, np.log(10.5), np.log(1000.5), 1))
def _ada_boost_learning_rate(name):
return hp.lognormal(name, np.log(0.01), np.log(10.0))
def _ada_boost_loss(name):
return hp.choice(name, ['linear', 'square', 'exponential'])
def _ada_boost_algo(name):
return hp.choice(name, ['SAMME', 'SAMME.R'])
def _grad_boosting_reg_loss_alpha(name):
return hp.choice(name, [
('ls', 0.9),
('lad', 0.9),
('huber', hp.uniform(name + '.alpha', 0.85, 0.95)),
('quantile', 0.5)
])
def _grad_boosting_clf_loss(name):
return hp.choice(name, ['deviance', 'exponential'])
def _grad_boosting_learning_rate(name):
return hp.lognormal(name, np.log(0.01), np.log(10.0))
def _grad_boosting_subsample(name):
return hp.pchoice(name, [
(0.2, 1.0), # default choice.
(0.8, hp.uniform(name + '.sgb', 0.5, 1.0)) # stochastic grad boosting.
])
def _sgd_penalty(name):
return hp.pchoice(name, [
(0.40, 'l2'),
(0.35, 'l1'),
(0.25, 'elasticnet')
])
def _sgd_alpha(name):
return hp.loguniform(name, np.log(1e-6), np.log(1e-1))
def _sgd_l1_ratio(name):
return hp.uniform(name, 0, 1)
def _sgd_epsilon(name):
return hp.loguniform(name, np.log(1e-7), np.log(1))
def _sgdc_learning_rate(name):
return hp.pchoice(name, [
(0.50, 'optimal'),
(0.25, 'invscaling'),
(0.25, 'constant')
])
def _sgdr_learning_rate(name):
return hp.pchoice(name, [
(0.50, 'invscaling'),
(0.25, 'optimal'),
(0.25, 'constant')
])
def _sgd_eta0(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e-1))
def _sgd_power_t(name):
return hp.uniform(name, 0, 1)
def _random_state(name, random_state):
if random_state is None:
return hp.randint(name, 5)
else:
return random_state
def _class_weight(name):
return hp.choice(name, [None, 'balanced'])
##############################################
##==== SVM hyperparameters search space ====##
##############################################
def _svm_hp_space(
name_func,
kernel,
n_features=1,
C=None,
gamma=None,
coef0=None,
degree=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
cache_size=_svm_default_cache_size):
'''Generate SVM hyperparamters search space
'''
if kernel in ['linear', 'rbf', 'sigmoid']:
degree_ = 1
else:
degree_ = (_svm_degree(name_func('degree'))
if degree is None else degree)
if kernel in ['linear']:
gamma_ = 'auto'
else:
gamma_ = (_svm_gamma(name_func('gamma'), n_features=1)
if gamma is None else gamma)
gamma_ /= n_features # make gamma independent of n_features.
if kernel in ['linear', 'rbf']:
coef0_ = 0.0
elif coef0 is None:
if kernel == 'poly':
coef0_ = hp.pchoice(name_func('coef0'), [
(0.3, 0),
(0.7, gamma_ * hp.uniform(name_func('coef0val'), 0., 10.))
])
elif kernel == 'sigmoid':
coef0_ = hp.pchoice(name_func('coef0'), [
(0.3, 0),
(0.7, gamma_ * hp.uniform(name_func('coef0val'), -10., 10.))
])
else:
pass
else:
coef0_ = coef0
hp_space = dict(
kernel=kernel,
C=_svm_C(name_func('C')) if C is None else C,
gamma=gamma_,
coef0=coef0_,
degree=degree_,
shrinking=(hp_bool(name_func('shrinking'))
if shrinking is None else shrinking),
tol=_svm_tol(name_func('tol')) if tol is None else tol,
max_iter=(_svm_max_iter(name_func('maxiter'))
if max_iter is None else max_iter),
verbose=verbose,
cache_size=cache_size)
return hp_space
def _svc_hp_space(name_func, random_state=None, probability=False):
'''Generate SVC specific hyperparamters
'''
hp_space = dict(
random_state = _random_state(name_func('rstate'),random_state),
probability=probability
)
return hp_space
def _svr_hp_space(name_func, epsilon=None):
'''Generate SVR specific hyperparamters
'''
hp_space = {}
hp_space['epsilon'] = (_svm_epsilon(name_func('epsilon'))
if epsilon is None else epsilon)
return hp_space
#########################################
##==== SVM classifier constructors ====##
#########################################
def svc_kernel(name, kernel, random_state=None, probability=False, **kwargs):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVC model with a user specified kernel.
See help(hpsklearn.components._svm_hp_space) for info on additional SVM
arguments.
"""
def _name(msg):
return '%s.%s_%s' % (name, kernel, msg)
hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)
hp_space.update(_svc_hp_space(_name, random_state, probability))
return scope.sklearn_SVC(**hp_space)
def svc_linear(name, **kwargs):
'''Simply use the svc_kernel function with kernel fixed as linear to
return an SVC object.
'''
return svc_kernel(name, kernel='linear', **kwargs)
def svc_rbf(name, **kwargs):
'''Simply use the svc_kernel function with kernel fixed as rbf to
return an SVC object.
'''
return svc_kernel(name, kernel='rbf', **kwargs)
def svc_poly(name, **kwargs):
'''Simply use the svc_kernel function with kernel fixed as poly to
return an SVC object.
'''
return svc_kernel(name, kernel='poly', **kwargs)
def svc_sigmoid(name, **kwargs):
'''Simply use the svc_kernel function with kernel fixed as sigmoid to
return an SVC object.
'''
return svc_kernel(name, kernel='sigmoid', **kwargs)
def svc(name, kernels=['linear', 'rbf', 'poly', 'sigmoid'], **kwargs):
svms = {
'linear': partial(svc_linear, name=name),
'rbf': partial(svc_rbf, name=name),
'poly': partial(svc_poly, name=name),
'sigmoid': partial(svc_sigmoid, name=name),
}
choices = [svms[kern](**kwargs) for kern in kernels]
if len(choices) == 1:
rval = choices[0]
else:
rval = hp.choice('%s.kernel' % name, choices)
return rval
########################################
##==== SVM regressor constructors ====##
########################################
def svr_kernel(name, kernel, epsilon=None, **kwargs):
"""
Return a pyll graph with hyperparamters that will construct
a sklearn.svm.SVR model with a user specified kernel.
Args:
epsilon([float]): tolerance on regression errors.
See help(hpsklearn.components._svm_hp_space) for info on additional SVM
arguments.
"""
def _name(msg):
return '%s.%s_%s' % (name, kernel, msg)
hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)
hp_space.update(_svr_hp_space(_name, epsilon))
return scope.sklearn_SVR(**hp_space)
def svr_linear(name, **kwargs):
'''Simply use the svr_kernel function with kernel fixed as linear to
return an SVR object.
'''
return svr_kernel(name, kernel='linear', **kwargs)
def svr_rbf(name, **kwargs):
'''Simply use the svr_kernel function with kernel fixed as rbf to
return an SVR object.
'''
return svr_kernel(name, kernel='rbf', **kwargs)
def svr_poly(name, **kwargs):
'''Simply use the svr_kernel function with kernel fixed as poly to
return an SVR object.
'''
return svr_kernel(name, kernel='poly', **kwargs)
def svr_sigmoid(name, **kwargs):
'''Simply use the svr_kernel function with kernel fixed as sigmoid to
return an SVR object.
'''
return svr_kernel(name, kernel='sigmoid', **kwargs)
def svr(name, kernels=['linear', 'rbf', 'poly', 'sigmoid'], **kwargs):
svms = {
'linear': partial(svr_linear, name=name),
'rbf': partial(svr_rbf, name=name),
'poly': partial(svr_poly, name=name),
'sigmoid': partial(svr_sigmoid, name=name),
}
choices = [svms[kern](**kwargs) for kern in kernels]
if len(choices) == 1:
rval = choices[0]
else:
rval = hp.choice('%s.kernel' % name, choices)
return rval
##################################################
##==== Liblinear SVM classifier constructor ====##
##################################################
def liblinear_svc(name,
C=None,
loss=None,
penalty=None,
dual=None,
tol=None,
multi_class=None,
fit_intercept=True,
intercept_scaling=None,
class_weight='choose',
random_state=None,
verbose=False,
max_iter=1000):
def _name(msg):
return '%s.%s_%s' % (name, 'linear_svc', msg)
loss_penalty_dual = _svm_loss_penalty_dual(_name('loss_penalty_dual'))
rval = scope.sklearn_LinearSVC(
C=_svm_C(_name('C')) if C is None else C,
loss=loss_penalty_dual[0] if loss is None else loss,
penalty=loss_penalty_dual[1] if penalty is None else penalty,
dual=loss_penalty_dual[2] if dual is None else dual,
tol=_svm_tol(_name('tol')) if tol is None else tol,
multi_class=(hp.choice(_name('multiclass'), ['ovr', 'crammer_singer'])
if multi_class is None else multi_class),
fit_intercept=fit_intercept,
intercept_scaling=(_svm_int_scaling(_name('intscaling'))
if intercept_scaling is None else intercept_scaling),
class_weight=(_class_weight(_name('clsweight'))
if class_weight == 'choose' else class_weight),
random_state=_random_state(_name('rstate'), random_state),
verbose=verbose,
max_iter=max_iter,
)
return rval
##############################################
##==== KNN hyperparameters search space ====##
##############################################
def _knn_hp_space(
name_func,
sparse_data=False,
n_neighbors=None,
weights=None,
algorithm='auto',
leaf_size=30,
metric=None,
p=None,
metric_params=None,
n_jobs=1):
'''Generate KNN hyperparameters search space
'''
metric_p = _knn_metric_p(name_func('metric_p'), sparse_data, metric, p)
hp_space = dict(
n_neighbors=(_knn_neighbors(name_func('neighbors'))
if n_neighbors is None else n_neighbors),
weights=(_knn_weights(name_func('weights'))
if weights is None else weights),
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric_p[0] if metric is None else metric,
p=metric_p[1] if p is None else p,
metric_params=metric_params,
n_jobs=n_jobs)
return hp_space
###################################################
##==== KNN classifier/regressor constructors ====##
###################################################
def knn(name, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.neighbors.KNeighborsClassifier model.
See help(hpsklearn.components._knn_hp_space) for info on available KNN
arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'knc', msg)
hp_space = _knn_hp_space(_name, **kwargs)
return scope.sklearn_KNeighborsClassifier(**hp_space)
def knn_regression(name, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.neighbors.KNeighborsRegressor model.
See help(hpsklearn.components._knn_hp_space) for info on available KNN
arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'knr', msg)
hp_space = _knn_hp_space(_name, **kwargs)
return scope.sklearn_KNeighborsRegressor(**hp_space)
####################################################################
##==== Random forest/extra trees hyperparameters search space ====##
####################################################################
def _trees_hp_space(
name_func,
n_estimators=None,
max_features=None,
max_depth=None,
min_samples_split=None,
min_samples_leaf=None,
bootstrap=None,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=False):
'''Generate trees ensemble hyperparameters search space
'''
hp_space = dict(
n_estimators=(_trees_n_estimators(name_func('n_estimators'))
if n_estimators is None else n_estimators),
max_features=(_trees_max_features(name_func('max_features'))
if max_features is None else max_features),
max_depth=(_trees_max_depth(name_func('max_depth'))
if max_depth is None else max_depth),
min_samples_split=(_trees_min_samples_split(name_func('min_samples_split'))
if min_samples_split is None else min_samples_split),
min_samples_leaf=(_trees_min_samples_leaf(name_func('min_samples_leaf'))
if min_samples_leaf is None else min_samples_leaf),
bootstrap=(_trees_bootstrap(name_func('bootstrap'))
if bootstrap is None else bootstrap),
oob_score=oob_score,
n_jobs=n_jobs,
random_state=_random_state(name_func('rstate'), random_state),
verbose=verbose,
)
return hp_space
#############################################################
##==== Random forest classifier/regressor constructors ====##
#############################################################
def random_forest(name, criterion=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.RandomForestClassifier model.
Args:
criterion([str]): choose 'gini' or 'entropy'.
See help(hpsklearn.components._trees_hp_space) for info on additional
available random forest/extra trees arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'rfc', msg)
hp_space = _trees_hp_space(_name, **kwargs)
hp_space['criterion'] = (_trees_criterion(_name('criterion'))
if criterion is None else criterion)
return scope.sklearn_RandomForestClassifier(**hp_space)
def random_forest_regression(name, criterion='mse', **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.RandomForestRegressor model.
Args:
criterion([str]): 'mse' is the only choice.
See help(hpsklearn.components._trees_hp_space) for info on additional
available random forest/extra trees arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'rfr', msg)
hp_space = _trees_hp_space(_name, **kwargs)
hp_space['criterion'] = criterion
return scope.sklearn_RandomForestRegressor(**hp_space)
###################################################
##==== AdaBoost hyperparameters search space ====##
###################################################
def _ada_boost_hp_space(
name_func,
base_estimator=None,
n_estimators=None,
learning_rate=None,
random_state=None):
'''Generate AdaBoost hyperparameters search space
'''
hp_space = dict(
base_estimator=base_estimator,
n_estimators=(_boosting_n_estimators(name_func('n_estimators'))
if n_estimators is None else n_estimators),
learning_rate=(_ada_boost_learning_rate(name_func('learning_rate'))
if learning_rate is None else learning_rate),
random_state=_random_state(name_func('rstate'), random_state)
)
return hp_space
########################################################
##==== AdaBoost classifier/regressor constructors ====##
########################################################
def ada_boost(name, algorithm=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.AdaBoostClassifier model.
Args:
algorithm([str]): choose from ['SAMME', 'SAMME.R']
See help(hpsklearn.components._ada_boost_hp_space) for info on
additional available AdaBoost arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'ada_boost', msg)
hp_space = _ada_boost_hp_space(_name, **kwargs)
hp_space['algorithm'] = (_ada_boost_algo(_name('algo')) if
algorithm is None else algorithm)
return scope.sklearn_AdaBoostClassifier(**hp_space)
def ada_boost_regression(name, loss=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.AdaBoostRegressor model.
Args:
loss([str]): choose from ['linear', 'square', 'exponential']
See help(hpsklearn.components._ada_boost_hp_space) for info on
additional available AdaBoost arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'ada_boost_reg', msg)
hp_space = _ada_boost_hp_space(_name, **kwargs)
hp_space['loss'] = (_ada_boost_loss(_name('loss')) if
loss is None else loss)
return scope.sklearn_AdaBoostRegressor(**hp_space)
###########################################################
##==== GradientBoosting hyperparameters search space ====##
###########################################################
def _grad_boosting_hp_space(
name_func,
learning_rate=None,
n_estimators=None,
subsample=None,
min_samples_split=None,
min_samples_leaf=None,
max_depth=None,
init=None,
random_state=None,
max_features=None,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
presort='auto'):
'''Generate GradientBoosting hyperparameters search space
'''
hp_space = dict(
learning_rate=(_grad_boosting_learning_rate(name_func('learning_rate'))
if learning_rate is None else learning_rate),
n_estimators=(_boosting_n_estimators(name_func('n_estimators'))
if n_estimators is None else n_estimators),
subsample=(_grad_boosting_subsample(name_func('subsample'))
if subsample is None else subsample),
min_samples_split=(_trees_min_samples_split(name_func('min_samples_split'))
if min_samples_split is None else min_samples_split),
min_samples_leaf=(_trees_min_samples_leaf(name_func('min_samples_leaf'))
if min_samples_leaf is None else min_samples_leaf),
max_depth=(_trees_max_depth(name_func('max_depth'))
if max_depth is None else max_depth),
init=init,
random_state=_random_state(name_func('rstate'), random_state),
max_features=(_trees_max_features(name_func('max_features'))
if max_features is None else max_features),
warm_start=warm_start,
presort=presort
)
return hp_space
################################################################
##==== GradientBoosting classifier/regressor constructors ====##
################################################################
def gradient_boosting(name, loss=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.GradientBoostingClassifier model.
Args:
loss([str]): choose from ['deviance', 'exponential']
See help(hpsklearn.components._grad_boosting_hp_space) for info on
additional available GradientBoosting arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'gradient_boosting', msg)
hp_space = _grad_boosting_hp_space(_name, **kwargs)
hp_space['loss'] = (_grad_boosting_clf_loss(_name('loss'))
if loss is None else loss)
return scope.sklearn_GradientBoostingClassifier(**hp_space)
def gradient_boosting_regression(name, loss=None, alpha=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.GradientBoostingRegressor model.
Args:
loss([str]): choose from ['ls', 'lad', 'huber', 'quantile']
alpha([float]): alpha parameter for huber and quantile losses.
Must be within [0.0, 1.0].
See help(hpsklearn.components._grad_boosting_hp_space) for info on
additional available GradientBoosting arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'gradient_boosting_reg', msg)
loss_alpha = _grad_boosting_reg_loss_alpha(_name('loss_alpha'))
hp_space = _grad_boosting_hp_space(_name, **kwargs)
hp_space['loss'] = loss_alpha[0] if loss is None else loss
hp_space['alpha'] = loss_alpha[1] if alpha is None else alpha
return scope.sklearn_GradientBoostingRegressor(**hp_space)
###########################################################
##==== Extra trees classifier/regressor constructors ====##
###########################################################
def extra_trees(name, criterion=None, **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.ExtraTreesClassifier model.
Args:
criterion([str]): choose 'gini' or 'entropy'.
See help(hpsklearn.components._trees_hp_space) for info on additional
available random forest/extra trees arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'etc', msg)
hp_space = _trees_hp_space(_name, **kwargs)
hp_space['criterion'] = (_trees_criterion(_name('criterion'))
if criterion is None else criterion)
return scope.sklearn_ExtraTreesClassifier(**hp_space)
def extra_trees_regression(name, criterion='mse', **kwargs):
'''
Return a pyll graph with hyperparamters that will construct
a sklearn.ensemble.ExtraTreesRegressor model.
Args:
criterion([str]): 'mse' is the only choice.
See help(hpsklearn.components._trees_hp_space) for info on additional
available random forest/extra trees arguments.
'''
def _name(msg):
return '%s.%s_%s' % (name, 'etr', msg)
hp_space = _trees_hp_space(_name, **kwargs)
hp_space['criterion'] = criterion
return scope.sklearn_ExtraTreesRegressor(**hp_space)
##################################################
##==== Decision tree classifier constructor ====##
##################################################
def decision_tree(name,
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | true |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/demo_support.py | training/helpers/hyperopt-sklearn/hpsklearn/demo_support.py | import time
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
def scatter_error_vs_time(estimator, ax):
losses = estimator.trials.losses()
ax.set_ylabel('Validation error rate')
ax.set_xlabel('Iteration')
ax.scatter(list(range(len(losses))), losses)
def plot_minvalid_vs_time(estimator, ax, ylim=None):
losses = estimator.trials.losses()
ts = list(range(1, len(losses)))
mins = [np.min(losses[:ii]) for ii in ts]
ax.set_ylabel('min(Validation error rate to-date)')
ax.set_xlabel('Iteration')
if ylim:
ax.set_ylim(*ylim)
ax.plot(ts, mins)
class PlotHelper(object):
def __init__(self, estimator, mintodate_ylim):
self.estimator = estimator
self.fig, self.axs = plt.subplots(1, 2)
self.post_iter_wait = .5
self.mintodate_ylim = mintodate_ylim
def post_iter(self):
self.axs[0].clear()
self.axs[1].clear()
scatter_error_vs_time(self.estimator, self.axs[0])
plot_minvalid_vs_time(self.estimator, self.axs[1],
ylim=self.mintodate_ylim)
display.clear_output()
display.display(self.fig)
time.sleep(self.post_iter_wait)
def post_loop(self):
display.clear_output()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/__init__.py | training/helpers/hyperopt-sklearn/hpsklearn/__init__.py | import os
OMP_NUM_THREADS = os.environ.get('OMP_NUM_THREADS', None)
if OMP_NUM_THREADS != '1':
print('WARN: OMP_NUM_THREADS=%s =>' % OMP_NUM_THREADS)
print('... If you are using openblas if you are using openblas'
' set OMP_NUM_THREADS=1'
' or risk subprocess calls hanging indefinitely')
from .estimator import hyperopt_estimator as HyperoptEstimator
from .components import *
# -- flake8
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_demo.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_demo.py | from __future__ import print_function
# import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from hyperopt import tpe
import hpsklearn
import sys
def test_demo_iris():
iris = datasets.load_iris()
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=.25, random_state=1)
estimator = hpsklearn.HyperoptEstimator(
preprocessing=hpsklearn.components.any_preprocessing('pp'),
classifier=hpsklearn.components.any_classifier('clf'),
algo=tpe.suggest,
trial_timeout=15.0, # seconds
max_evals=10,
seed=1
)
# /BEGIN `Demo version of estimator.fit()`
print('', file=sys.stderr)
print('====Demo classification on Iris dataset====', file=sys.stderr)
iterator = estimator.fit_iter(X_train, y_train)
next(iterator)
n_trial = 0
while len(estimator.trials.trials) < estimator.max_evals:
iterator.send(1) # -- try one more model
n_trial += 1
print('Trial', n_trial, 'loss:', estimator.trials.losses()[-1],
file=sys.stderr)
# hpsklearn.demo_support.scatter_error_vs_time(estimator)
# hpsklearn.demo_support.bar_classifier_choice(estimator)
estimator.retrain_best_model_on_full_data(X_train, y_train)
# /END Demo version of `estimator.fit()`
print('Test accuracy:', estimator.score(X_test, y_test), file=sys.stderr)
print('====End of demo====', file=sys.stderr)
def test_demo_boston():
boston = datasets.load_boston()
X_train, X_test, y_train, y_test = train_test_split(
boston.data, boston.target, test_size=.25, random_state=1)
estimator = hpsklearn.HyperoptEstimator(
preprocessing=hpsklearn.components.any_preprocessing('pp'),
regressor=hpsklearn.components.any_regressor('reg'),
algo=tpe.suggest,
trial_timeout=15.0, # seconds
max_evals=10,
seed=1
)
# /BEGIN `Demo version of estimator.fit()`
print('', file=sys.stderr)
print('====Demo regression on Boston dataset====', file=sys.stderr)
iterator = estimator.fit_iter(X_train, y_train)
next(iterator)
n_trial = 0
while len(estimator.trials.trials) < estimator.max_evals:
iterator.send(1) # -- try one more model
n_trial += 1
print('Trial', n_trial, 'loss:', estimator.trials.losses()[-1],
file=sys.stderr)
# hpsklearn.demo_support.scatter_error_vs_time(estimator)
# hpsklearn.demo_support.bar_classifier_choice(estimator)
estimator.retrain_best_model_on_full_data(X_train, y_train)
# /END Demo version of `estimator.fit()`
print('Test R2:', estimator.score(X_test, y_test), file=sys.stderr)
print('====End of demo====', file=sys.stderr)
# -- flake8 eof
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_classification.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_classification.py | try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hyperopt import rand, tpe
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
class TestClassification(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X_train = np.random.randn(1000, 2)
self.Y_train = (self.X_train[:, 0] > 0).astype('int')
self.Y_train_multilabel = (self.X_train[:, :] > 0).astype('int')
self.X_test = np.random.randn(1000, 2)
self.Y_test = (self.X_test[:, 0] > 0).astype('int')
self.Y_test_multilabel = (self.X_test[:, :] > 0).astype('int')
def test_multinomial_nb(self):
model = hyperopt_estimator(
classifier=components.multinomial_nb('classifier'),
preprocessing=[],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
# Inputs for MultinomialNB must be non-negative
model.fit(np.abs(self.X_train), self.Y_train)
model.score(np.abs(self.X_test), self.Y_test)
def create_function(clf_fn):
def test_classifier(self):
model = hyperopt_estimator(
classifier=clf_fn('classifier'),
preprocessing=[],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
model.fit(self.X_train, self.Y_train)
model.score(self.X_test, self.Y_test)
test_classifier.__name__ = 'test_{0}'.format(clf_fn.__name__)
return test_classifier
def create_multilabel_function(clf_fn):
def test_classifier(self):
model = hyperopt_estimator(
classifier=clf_fn('classifier'),
preprocessing=[],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
model.fit(self.X_train, self.Y_train_multilabel)
model.score(self.X_test, self.Y_test_multilabel)
test_classifier.__name__ = 'test_{0}'.format(clf_fn.__name__)
return test_classifier
# List of classifiers to test
classifiers = [
components.svc,
components.svc_linear,
components.svc_rbf,
components.svc_poly,
components.svc_sigmoid,
components.liblinear_svc,
components.knn,
components.ada_boost,
components.gradient_boosting,
components.random_forest,
components.extra_trees,
components.decision_tree,
components.sgd,
#components.multinomial_nb, # special case to ensure non-negative inputs
components.gaussian_nb,
components.passive_aggressive,
components.linear_discriminant_analysis,
components.one_vs_one,
components.output_code,
]
# Special case for classifiers supporting multiple labels
multilabel_classifiers = [
components.one_vs_rest,
]
# Create unique methods with test_ prefix so that nose can see them
for clf in classifiers:
setattr(
TestClassification,
'test_{0}'.format(clf.__name__),
create_function(clf)
)
for clf in multilabel_classifiers:
setattr(
TestClassification,
'test_{0}'.format(clf.__name__),
create_multilabel_function(clf)
)
# Only test the xgboost classifier if the optional dependency is installed
try:
import xgboost
except ImportError:
xgboost = None
if xgboost is not None:
setattr(
TestClassification,
'test_{0}'.format(clf.__name__),
create_function(components.xgboost_classification)
)
if __name__ == '__main__':
unittest.main()
# -- flake8 eof
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_stories.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_stories.py | """
Unit tests in the form of user stories.
These unit tests implicitly assert that their syntax is valid,
and that programs of the given form can run. They do not
test the correctness of the result.
"""
from __future__ import print_function
import sys
from functools import partial
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
import warnings
warnings.filterwarnings('ignore')
from hyperopt import hp
from hyperopt import tpe
from hyperopt.pyll import scope
from hpsklearn import components as hpc
try:
import skdata.iris.view as iris_view
except ImportError:
import skdata.iris.views as iris_view
try:
from skdata.base import SklearnClassifier as LearningAlgo
except ImportError:
from skdata.base import LearningAlgo as LearningAlgo
from hpsklearn.estimator import hyperopt_estimator
class SkdataInterface(unittest.TestCase):
def setUp(self):
self.view = iris_view.KfoldClassification(4)
def test_search_all(self):
"""
As a ML researcher, I want a quick way to do model selection
implicitly, in order to get a baseline accuracy score for a new data
set.
"""
algo = LearningAlgo(
partial(hyperopt_estimator,
classifier=hpc.any_classifier('classifier'),
# trial_timeout=15.0, # seconds
verbose=1,
max_evals=10,
))
mean_test_error = self.view.protocol(algo)
print('\n====Iris: any preprocessing + any classifier====',
file=sys.stderr)
print('mean test error:', mean_test_error, file=sys.stderr)
print('====End optimization====', file=sys.stderr)
def test_pca_svm(self):
"""
As a ML researcher, I want to evaluate a certain parly-defined model
class, in order to do model-family comparisons.
For example, PCA followed by linear SVM.
"""
algo = LearningAlgo(
partial(
hyperopt_estimator,
preprocessing=[hpc.pca('pca')],
classifier=hpc.svc_linear('classif'),
# trial_timeout=30.0, # seconds
verbose=1,
max_evals=10))
mean_test_error = self.view.protocol(algo)
print('\n====Iris: PCA + SVM====', file=sys.stderr)
print('mean test error:', mean_test_error, file=sys.stderr)
print('====End optimization====', file=sys.stderr)
def test_preproc(self):
"""
As a domain expert, I have a particular pre-processing that I believe
reveals important patterns in my data. I would like to know how good
a classifier can be built on top of my preprocessing algorithm.
"""
# -- for testing purpose, suppose that the RBM is our "domain-specific
# pre-processing"
algo = LearningAlgo(
partial(
hyperopt_estimator,
preprocessing=hp.choice(
'pp',
[
# -- VQ (alone)
[
hpc.colkmeans(
'vq0',
n_clusters=scope.int(
hp.quniform(
'vq0.n_clusters', 1.5, 5.5, q=1)),
n_init=1,
max_iter=100),
],
# -- VQ -> RBM
[
hpc.colkmeans(
'vq1',
n_clusters=scope.int(
hp.quniform(
'vq1.n_clusters', 1.5, 5.5, q=1)),
n_init=1,
max_iter=100),
hpc.rbm(name='rbm:alone',
n_components=scope.int(
hp.qloguniform(
'rbm1.n_components',
np.log(4.5), np.log(20.5), 1)),
n_iter=100,
verbose=0)
],
# -- VQ -> RBM -> PCA
[
hpc.colkmeans(
'vq2',
n_clusters=scope.int(
hp.quniform(
'vq2.n_clusters', 1.5, 5.5, q=1)),
n_init=1,
max_iter=100),
hpc.rbm(name='rbm:pre-pca',
n_components=scope.int(
hp.qloguniform(
'rbm2.n_components',
np.log(4.5), np.log(20.5), 1)),
n_iter=100,
verbose=0),
hpc.pca('pca')
],
]),
classifier=hpc.any_classifier('classif'),
algo=tpe.suggest,
#trial_timeout=5.0, # seconds
verbose=1,
max_evals=10,
))
mean_test_error = self.view.protocol(algo)
print('\n====Iris: VQ + RBM + PCA + any classifier====',
file=sys.stderr)
print('mean test error:', mean_test_error, file=sys.stderr)
print('====End optimization====')
# -- TODO: develop tests with pure sklearn stories
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_regression.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_regression.py | try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hyperopt import rand, tpe
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
class TestRegression(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X_train = np.random.randn(1000, 2)
self.Y_train = self.X_train[:, 0] * 2
self.X_test = np.random.randn(1000, 2)
self.Y_test = self.X_test[:, 0] * 2
def create_function(reg_fn):
def test_regressor(self):
model = hyperopt_estimator(
regressor=reg_fn('regressor'),
preprocessing=[],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
model.fit(self.X_train, self.Y_train)
model.score(self.X_test, self.Y_test)
test_regressor.__name__ = 'test_{0}'.format(reg_fn.__name__)
return test_regressor
# List of regressors to test
regressors = [
components.svr,
components.svr_linear,
components.svr_rbf,
components.svr_poly,
components.svr_sigmoid,
components.knn_regression,
components.ada_boost_regression,
components.gradient_boosting_regression,
components.random_forest_regression,
components.extra_trees_regression,
components.sgd_regression,
components.lasso,
components.elasticnet,
]
# Create unique methods with test_ prefix so that nose can see them
for reg in regressors:
setattr(
TestRegression,
'test_{0}'.format(reg.__name__),
create_function(reg)
)
# Only test the xgboost regressor if the optional dependency is installed
try:
import xgboost
except ImportError:
xgboost = None
if xgboost is not None:
setattr(
TestRegression,
'test_{0}'.format(clf.__name__),
create_function(components.xgboost_regression)
)
if __name__ == '__main__':
unittest.main()
# -- flake8 eof
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_estimator.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_estimator.py |
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hyperopt import rand, tpe
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
class TestIter(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X = np.random.randn(1000, 2)
self.Y = (self.X[:, 0] > 0).astype('int')
def test_fit_iter_basic(self):
model = hyperopt_estimator(
classifier=components.any_classifier('classifier'),
verbose=1, trial_timeout=5.0)
for ii, trials in enumerate(model.fit_iter(self.X, self.Y)):
assert trials is model.trials
assert len(trials.trials) == ii
if ii == 10:
break
def test_fit(self):
model = hyperopt_estimator(
classifier=components.any_classifier('classifier'),
verbose=1, max_evals=5, trial_timeout=5.0)
model.fit(self.X, self.Y)
assert len(model.trials.trials) == 5
def test_fit_biginc(self):
model = hyperopt_estimator(
classifier=components.any_classifier('classifier'),
verbose=1, max_evals=5, trial_timeout=5.0, fit_increment=20)
model.fit(self.X, self.Y)
# -- make sure we only get 5 even with big fit_increment
assert len(model.trials.trials) == 5
def test_warm_start(self):
model = hyperopt_estimator(
classifier=components.any_classifier('classifier'),
verbose=1, max_evals=5, trial_timeout=5.0)
params = model.get_params()
assert params['algo'] == rand.suggest
assert params['max_evals'] == 5
model.fit(self.X, self.Y, warm_start=False)
assert len(model.trials.trials) == 5
model.set_params(algo=tpe.suggest, max_evals=10)
params = model.get_params()
assert params['algo'] == tpe.suggest
assert params['max_evals'] == 10
model.fit(self.X, self.Y, warm_start=True)
assert len(model.trials.trials) == 15 # 5 + 10 = 15.
def test_sparse_input():
"""
Ensure the estimator can handle sparse X matrices.
"""
import scipy.sparse as ss
# Generate some random sparse data
nrows,ncols,nnz = 100,50,10
ntrue = nrows // 2
D,C,R = [],[],[]
for r in range(nrows):
feats = np.random.choice(range(ncols), size=nnz, replace=False)
D.extend([1]*nnz)
C.extend(feats)
R.extend([r]*nnz)
X = ss.csr_matrix( (D,(R,C)), shape=(nrows, ncols))
y = np.zeros( nrows )
y[:ntrue] = 1
# Try to fit an SGD model
cls = hyperopt_estimator(
classifier=components.sgd('sgd', loss='log'),
preprocessing=[],
)
cls.fit(X,y)
def test_continuous_loss_fn():
"""
Demonstrate using a custom loss function with the continuous_loss_fn
option.
"""
from sklearn.metrics import log_loss
# Generate some random data
X = np.hstack([
np.vstack([
np.random.normal(0,1,size=(1000,10)),
np.random.normal(1,1,size=(1000,10)),
]),
np.random.normal(0,1,size=(2000,10)),
])
y = np.zeros(2000)
y[:1000] = 1
def loss_function(targ, pred):
# hyperopt_estimator flattens the prediction when saving it. This also
# affects multilabel classification.
pred = pred.reshape( (-1, 2) )
return log_loss(targ, pred[:,1])
# Try to fit an SGD model using log_loss as the loss function
cls = hyperopt_estimator(
classifier=components.sgd('sgd', loss='log'),
preprocessing=[],
loss_fn = loss_function,
continuous_loss_fn=True,
)
cls.fit(X,y,cv_shuffle=True)
# class TestSpace(unittest.TestCase):
# def setUp(self):
# np.random.seed(123)
# self.X = np.random.randn(1000, 2)
# self.Y = (self.X[:, 0] > 0).astype('int')
# def test_smoke(self):
# # -- verify the space argument is accepted and runs
# space = components.generic_space()
# model = hyperopt_estimator(
# verbose=1, max_evals=10, trial_timeout=5, space=space)
# model.fit(self.X, self.Y)
# -- flake8 eof
def test_crossvalidation():
"""
Demonstrate performing a k-fold CV using the fit() method.
"""
# Generate some random data
X = np.hstack([
np.vstack([
np.random.normal(0,1,size=(1000,10)),
np.random.normal(1,1,size=(1000,10)),
]),
np.random.normal(0,1,size=(2000,10)),
])
y = np.zeros(2000)
y[:1000] = 1
# Try to fit a model
cls = hyperopt_estimator(
classifier=components.sgd('sgd', loss='log'),
preprocessing=[],
)
cls.fit(X,y,cv_shuffle=True, n_folds=5)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/training/helpers/hyperopt-sklearn/hpsklearn/tests/test_preprocessing.py | training/helpers/hyperopt-sklearn/hpsklearn/tests/test_preprocessing.py | try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hyperopt import rand, tpe, hp
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
from hyperopt.pyll import scope
class TestPreprocessing(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X_train = np.random.randn(1000, 2)
self.Y_train = (self.X_train[:, 0] > 0).astype('int')
self.X_test = np.random.randn(1000, 2)
self.Y_test = (self.X_test[:, 0] > 0).astype('int')
def test_one_hot_encoder(self):
# requires a classifier that can handle sparse data
model = hyperopt_estimator(
classifier=components.multinomial_nb('classifier'),
preprocessing=[components.one_hot_encoder('preprocessing')],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
# Inputs for one_hot_encoder must be non-negative integers
model.fit(np.abs(np.round(self.X_test).astype(np.int)), self.Y_test)
model.score(np.abs(np.round(self.X_test).astype(np.int)), self.Y_test)
def test_tfidf(self):
# requires a classifier that can handle sparse data
model = hyperopt_estimator(
classifier=components.multinomial_nb('classifier'),
preprocessing=[components.tfidf('preprocessing')],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
X = np.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
])
Y = np.array([0, 1, 2, 0])
model.fit(X, Y)
model.score(X, Y)
def test_gaussian_random_projection(self):
# restrict n_components to be less than or equal to data dimension
# to prevent sklearn warnings from printing during tests
n_components = scope.int(hp.quniform(
'preprocessing.n_components', low=1, high=8, q=1
))
model = hyperopt_estimator(
classifier=components.gaussian_nb('classifier'),
preprocessing=[
components.gaussian_random_projection(
'preprocessing',
n_components=n_components,
)
],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
X_train = np.random.randn(1000, 8)
Y_train = (self.X_train[:, 0] > 0).astype('int')
X_test = np.random.randn(1000, 8)
Y_test = (self.X_test[:, 0] > 0).astype('int')
model.fit(X_train, Y_train)
model.score(X_test, Y_test)
def test_sparse_random_projection(self):
# restrict n_components to be less than or equal to data dimension
# to prevent sklearn warnings from printing during tests
n_components = scope.int(hp.quniform(
'preprocessing.n_components', low=1, high=8, q=1
))
model = hyperopt_estimator(
classifier=components.gaussian_nb('classifier'),
preprocessing=[
components.sparse_random_projection(
'preprocessing',
n_components=n_components,
)
],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
X_train = np.random.randn(1000, 8)
Y_train = (self.X_train[:, 0] > 0).astype('int')
X_test = np.random.randn(1000, 8)
Y_test = (self.X_test[:, 0] > 0).astype('int')
model.fit(X_train, Y_train)
model.score(X_test, Y_test)
def create_function(pre_fn):
def test_preprocessing(self):
model = hyperopt_estimator(
classifier=components.gaussian_nb('classifier'),
preprocessing=[pre_fn('preprocessing')],
algo=rand.suggest,
trial_timeout=5.0,
max_evals=5,
)
model.fit(self.X_train, self.Y_train)
model.score(self.X_test, self.Y_test)
test_preprocessing.__name__ = 'test_{0}'.format(pre_fn.__name__)
return test_preprocessing
# List of preprocessors to test
preprocessors = [
components.pca,
#components.one_hot_encoder, # handled separately
components.standard_scaler,
components.min_max_scaler,
components.normalizer,
#components.ts_lagselector, # handled in test_ts.py
#components.tfidf, # handled separately
#components.sparse_random_projection, # handled separately
#components.gaussian_random_projection, # handled separately
]
# Create unique methods with test_ prefix so that nose can see them
for pre in preprocessors:
setattr(
TestPreprocessing,
'test_{0}'.format(pre.__name__),
create_function(pre)
)
if __name__ == '__main__':
unittest.main()
# -- flake8 eof
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.