repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
robotools/fontParts | https://github.com/robotools/fontParts/blob/cb4b90defb59d967d209d254659d7265fc1abd86/Lib/fontParts/fontshell/test.py | Lib/fontParts/fontshell/test.py | from fontParts.test import testEnvironment
from fontParts.fontshell.font import RFont
from fontParts.fontshell.info import RInfo
from fontParts.fontshell.groups import RGroups
from fontParts.fontshell.kerning import RKerning
from fontParts.fontshell.features import RFeatures
from fontParts.fontshell.layer import RLayer
from fontParts.fontshell.glyph import RGlyph
from fontParts.fontshell.contour import RContour
from fontParts.fontshell.segment import RSegment
from fontParts.fontshell.bPoint import RBPoint
from fontParts.fontshell.point import RPoint
from fontParts.fontshell.anchor import RAnchor
from fontParts.fontshell.component import RComponent
from fontParts.fontshell.image import RImage
from fontParts.fontshell.lib import RLib
from fontParts.fontshell.guideline import RGuideline
# defcon does not have prebuilt support for
# selection states, so we simulate selection
# behavior with a small subclasses for testing
# purposes only.
def _get_selected(self):
if isinstance(self, FSTestSegment):
for point in self.points:
if point.selected:
return True
return False
elif isinstance(self, FSTestBPoint):
point = self._point.naked()
return point.name == "selected"
elif isinstance(self, FSTestPoint):
return self.name == "selected"
else:
if not hasattr(self.naked(), "_testSelected"):
return False
return self.naked()._testSelected
def _set_selected(self, value):
if isinstance(self, FSTestSegment):
for point in self.points:
point.selected = value
elif isinstance(self, FSTestBPoint):
point = self._point.naked()
if value:
point.name = "selected"
else:
point.name = None
elif isinstance(self, FSTestPoint):
if value:
self.name = "selected"
else:
self.name = None
else:
self.naked()._testSelected = value
class FSTestPoint(RPoint):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestBPoint(RBPoint):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestSegment(RSegment):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestGuideline(RGuideline):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestImage(RImage):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestAnchor(RAnchor):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestComponent(RComponent):
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestContour(RContour):
segmentClass = FSTestSegment
bPointClass = FSTestBPoint
pointClass = FSTestPoint
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestGlyph(RGlyph):
contourClass = FSTestContour
componentClass = FSTestComponent
anchorClass = FSTestAnchor
guidelineClass = FSTestGuideline
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestLayer(RLayer):
glyphClass = FSTestGlyph
_get_selected = _get_selected
_set_selected = _set_selected
class FSTestFont(RFont):
layerClass = FSTestLayer
guidelineClass = FSTestGuideline
_get_selected = _get_selected
_set_selected = _set_selected
classMapping = dict(
font=FSTestFont,
info=RInfo,
groups=RGroups,
kerning=RKerning,
features=RFeatures,
layer=FSTestLayer,
glyph=FSTestGlyph,
contour=FSTestContour,
segment=FSTestSegment,
bPoint=FSTestBPoint,
point=FSTestPoint,
anchor=FSTestAnchor,
component=FSTestComponent,
image=FSTestImage,
lib=RLib,
guideline=FSTestGuideline,
)
def fontshellObjectGenerator(cls):
unrequested = []
obj = classMapping[cls]()
return obj, unrequested
if __name__ == "__main__":
import sys
if {"-v", "--verbose"}.intersection(sys.argv):
verbosity = 2
else:
verbosity = 1
testEnvironment(fontshellObjectGenerator, verbosity=verbosity)
| python | MIT | cb4b90defb59d967d209d254659d7265fc1abd86 | 2026-01-05T07:13:38.892524Z | false |
robotools/fontParts | https://github.com/robotools/fontParts/blob/cb4b90defb59d967d209d254659d7265fc1abd86/Lib/fontParts/fontshell/features.py | Lib/fontParts/fontshell/features.py | import defcon
from fontParts.base import BaseFeatures
from fontParts.fontshell.base import RBaseObject
class RFeatures(RBaseObject, BaseFeatures):
wrapClass = defcon.Features
def _get_text(self):
return self.naked().text
def _set_text(self, value):
self.naked().text = value
| python | MIT | cb4b90defb59d967d209d254659d7265fc1abd86 | 2026-01-05T07:13:38.892524Z | false |
robotools/fontParts | https://github.com/robotools/fontParts/blob/cb4b90defb59d967d209d254659d7265fc1abd86/documentation/source/conf.py | documentation/source/conf.py | # -*- coding: utf-8 -*-
#
# FontParts documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 24 13:04:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('../../Lib'))
# ------------
# Mock Imports
# This try/except is a hack around an issue in Mac OS 10.11.5.
# Specifically, mock requires a version of six that is later
# than the one that comes installed with the OS. This hack
# tries to import defcon and if it can't, it kicks to mock.
# This makes both local and readthedocs compilation work.
try:
import defcon
except ImportError:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = [
'fontTools',
'fontTools.misc',
'fontTools.misc.py23',
'fontTools.pens',
'fontTools.pens.basePen',
'fontMath',
'ufoLib',
'ufoLib.pointPen',
'defcon'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# / Mock Imports
# --------------
# ------------
# Monkey Patch
#
# https://github.com/sphinx-doc/sphinx/issues/1254
#
from fontParts.base.base import dynamicProperty
dynamicProperty.__get__ = lambda self, *args, **kwargs: self
#
# /MonkeyPatch
# ------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
]
autodoc_member_order = 'bysource'
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents' # was 'index'
# General information about the project.
project = u'FontParts'
copyright = u'2016, Dr. Rob O. Fab'
author = u'Dr. Rob O. Fab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'fontPartsTheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'FontPartsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'FontParts.tex', u'FontParts Documentation',
u'Dr. Rob O. Fab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fontparts', u'FontParts Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'FontParts', u'FontParts Documentation',
author, 'FontParts', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
####################
# custom directives
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx import addnodes
from sphinx.util import rst
from docutils.parsers.rst import directives
from sphinx.ext.autosummary import Autosummary, import_by_name, get_import_prefixes_from_env, autosummary_table
class AutosummaryMethodList(Autosummary):
option_spec = dict(Autosummary.option_spec)
option_spec["hidesummary"] = directives.flag
def get_items(self, names):
"""
Subclass get items
to get support for all methods in an given object
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
methodNames = []
for name in names:
methodNames.append(name)
_, obj, _, _ = import_by_name(name, prefixes=prefixes)
methodNames.extend(["%s.%s" % (name, method) for method in dir(obj) if not method.startswith("_")])
return super(AutosummaryMethodList, self).get_items(methodNames)
def get_table(self, items):
"""
Subclass to get support for `hidesummary` as options
to enable displaying the short summary in the table
"""
hidesummary = 'hidesummary' in self.options
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'p{0.5\linewidth}p{0.5\linewidth}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
if hidesummary:
append_row(col1)
else:
append_row(col1, col2)
return [table_spec, table]
def setup(app):
app.add_directive('autosummarymethodlist', AutosummaryMethodList)
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
class YourStyle(Style):
default_style = ""
styles = {
Comment: 'italic #888',
Keyword: 'bold #005',
Name: '#f00',
Name.Function: '#0f0',
Name.Class: 'bold #0f0',
String: 'bg:#eee #111'
} | python | MIT | cb4b90defb59d967d209d254659d7265fc1abd86 | 2026-01-05T07:13:38.892524Z | false |
robotools/fontParts | https://github.com/robotools/fontParts/blob/cb4b90defb59d967d209d254659d7265fc1abd86/documentation/source/_themes/fontPartsTheme/static/sass2css.py | documentation/source/_themes/fontPartsTheme/static/sass2css.py | # compile sass into css
import os
import os.path
import subprocess
# sass (CSS extension language) compiler
# http://sass-lang.com/install
# install postCSS and autoprixer with:
## npm install -g postcss-cli-simple
## npm install -g autoprefixer
def compileSass(sassPath):
'''
Compile a sass file (and dependencies) into a single css file.
'''
cssPath = os.path.splitext(sassPath)[0] + ".css"
# subprocess.call(["sass", sassPath, cssPath])
print("Compiling Sass")
process = subprocess.Popen(["sass", sassPath, cssPath])
process.wait()
def autoprefixCSS(sassPath):
'''
Take CSS file and automatically add browser prefixes with postCSS autoprefixer
'''
print("Autoprefixing CSS")
cssPath = os.path.splitext(sassPath)[0] + ".css"
command = "postcss --use autoprefixer --autoprefixer.browsers '> 5%' -o" + cssPath + " " + cssPath
subprocess.call(command, shell=True)
# gets path for directory of sass2css.py
baseFolder = os.path.split(os.path.abspath(__file__))[0]
for f in os.listdir(baseFolder):
name, extension = os.path.splitext(f)
# note: be sure that you import files from /partials into the the main.sass file, or code won't make it into CSS
if extension == ".sass":
sassPath = os.path.join(baseFolder, f)
compileSass(sassPath)
autoprefixCSS(sassPath)
| python | MIT | cb4b90defb59d967d209d254659d7265fc1abd86 | 2026-01-05T07:13:38.892524Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/setup.py | setup.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
Custom setup script for all operating systems.
'''
import os, json, sys
def brew_install(modules):
for i in range(len(modules)):
os.system('brew install %s'%(modules[i]))
# uninstall openCV to guarantee the right version
# this is the only step requiring manual intervention
# so it's best to do this first.
os.system('pip3 install --upgrade pip -y')
os.system('pip3 uninstall opencv-python -y')
os.system('pip3 uninstall opencv-contrib-python -y')
curdir=os.getcwd()
# possible operating systems
# | Linux (2.x and 3.x) | linux2 (*) |
# | Windows | win32 |
# | Windows/Cygwin | cygwin |
# | Windows/MSYS2 | msys |
# | Mac OS X | darwin |
# | OS/2 | os2 |
# | OS/2 EMX | os2emx |
# assumes Mac OSX for SoX and FFmpeg installations
if sys.platform.lower() in ['darwin', 'os2', 'os2emx']:
brew_modules=['sox', 'ffmpeg', 'opus-tools', 'opus', 'autoconf', 'automake', 'm4', 'libtool', 'gcc', 'portaudio', 'lasound']
brew_install(brew_modules)
os.system('pip3 install -r mac.txt')
# to install opensmile package
curdir=os.getcwd()
os.chdir('features/audio_features/helpers/opensmile/opensmile-2.3.0')
os.system('bash autogen.sh')
os.system('bash autogen.sh')
os.system('./configure')
os.system('make -j4 ; make')
os.system('make install')
os.chdir(curdir)
# install xcode if it is not already installed (on Mac) - important for OPENSMILE features
os.system('xcode-select --install')
elif sys.platform.lower() in ['linux', 'linux2']:
os.system('sudo apt-get install ffmpeg -y')
os.system('sudo apt-get install sox -y')
os.system('sudo apt-get install python-pyaudio -y')
os.system('sudo apt-get install portaudio19-dev -y')
os.system('sudo apt-get install libpq-dev python3.7-dev libxml2-dev libxslt1-dev libldap2-dev libsasl2-dev libffi-dev -y')
os.system('sudo apt upgrade gcc -y')
os.system('sudo apt-get install -y python python-dev python-pip build-essential swig git libpulse-dev')
os.system('sudo apt-get install -y tesseract-ocr')
os.system('sudo apt install -y opus-tools')
os.system('sudo apt install -y libav-tools')
os.system('sudo apt install -y libsm6')
# to install opensmile package / link
os.system('sudo apt-get install autoconf automake m4 libtool gcc -y')
curdir=os.getcwd()
os.chdir('features/audio_features/helpers/opensmile/opensmile-2.3.0')
os.system('bash autogen.sh')
os.system('bash autogen.sh')
os.system('./configure')
os.system('make -j4 ; make')
os.system('sudo make install')
os.system('sudo ldconfig')
os.chdir(curdir)
os.system('pip3 install -r requirements.txt')
elif sys.platform.lower() in ['win32', 'cygwin', 'msys']:
# https://www.thewindowsclub.com/how-to-install-ffmpeg-on-windows-10
print('you have to install FFmpeg from source')
# https://github.com/JoFrhwld/FAVE/wiki/Sox-on-Windows
print('you have to install SoX from source')
# now install all modules with pip3 - install individually to reduce errors
os.system('pip3 install -r requirements.txt')
# custom installations across all operating systems
os.system('pip3 install git+https://github.com/detly/gammatone.git')
os.system('pip3 install https://github.com/vBaiCai/python-pesq/archive/master.zip')
os.system('pip3 install git+https://github.com/aliutkus/speechmetrics#egg=speechmetrics[cpu]')
os.system('pip3 install markovify==0.8.3')
os.system('pip3 install tsaug==0.2.1')
os.system('pip3 install seaborn==0.10.1')
os.system('pip3 install psutil==5.7.2')
os.system('pip3 install pyfiglet==0.8.post1')
os.system('pip3 install gensim==3.8.3')
os.system('pip3 install wget==3.2')
os.system('pip3 install textblob==0.15.3')
os.system('pip3 install moviepy==1.0.3')
os.system('pip3 install textacy==0.10.0')
os.system('pip3 install SpeechRecognition==3.8.1')
os.system('pip3 install pytesseract==0.3.4')
os.system('pip3 install pydub==0.24.1')
os.system('pip3 install ctgan==0.2.1')
os.system('pip3 install librosa==0.6.2')
os.system('pip3 install numba==0.48')
os.system('pip3 install sk-video==1.1.10')
os.system('pip3 install opencv-python==3.4.2.17')
os.system('pip3 install opencv-contrib-python==3.4.2.17')
# install add-ons to NLTK
os.system('pip3 install nltk==3.4.5')
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
# install spacy add-ons
os.system('python3 -m spacy download en')
os.system("python3 -m spacy download 'en_core_web_sm'")
# install hyperopt-sklearn
curdir=os.getcwd()
os.chdir(curdir+'/training/helpers/hyperopt-sklearn')
os.system('pip3 install -e .')
# install keras-compressor
os.chdir(curdir)
os.chdir(curdir+'/training/helpers/keras_compressor')
os.system('pip3 install .')
# go back to host directory
os.chdir(curdir)
# now go setup tests
os.chdir('tests')
os.system('python3 test.py')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/allie.py | allie.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
_____ _ _ _
/ __ \ | | | | (_)
| / \/ ___ _ __ ___ _ __ ___ __ _ _ __ __| | | | _ _ __ ___
| | / _ \| '_ ` _ \| '_ ` _ \ / _` | '_ \ / _` | | | | | '_ \ / _ \
| \__/\ (_) | | | | | | | | | | | (_| | | | | (_| | | |___| | | | | __/
\____/\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|\__,_| \_____/_|_| |_|\___|
_____ _ __
|_ _| | | / _|
| | _ __ | |_ ___ _ __| |_ __ _ ___ ___
| || '_ \| __/ _ \ '__| _/ _` |/ __/ _ \
_| || | | | || __/ | | || (_| | (_| __/
\___/_| |_|\__\___|_| |_| \__,_|\___\___|
This is a command-line interface for all of Allie's APIs.
This makes it easy-to-call many common APIs within Allie,
which include:
- Annotate API - https://github.com/jim-schwoebel/allie/tree/master/annotation
- Augmentation API - https://github.com/jim-schwoebel/allie/tree/master/augmentation
- Cleaning API - https://github.com/jim-schwoebel/allie/tree/master/cleaning
- Datasets API - https://github.com/jim-schwoebel/allie/tree/master/datasets
- Features API - https://github.com/jim-schwoebel/allie/tree/master/features
- Model Prediction API - https://github.com/jim-schwoebel/allie/tree/master/models
- Preprocessing API - https://github.com/jim-schwoebel/allie/tree/master/preprocessing
- Model Training API - https://github.com/jim-schwoebel/allie/tree/master/training
- Test API - https://github.com/jim-schwoebel/allie/tree/master/tests
- Visualize API - https://github.com/jim-schwoebel/allie/tree/master/visualize
All of these commands will ingest the default settings via
the settings.json document, so be sure to set the right settings
when calling the API.
Usage: allie.py [options]
Options:
-h, --help show this help message and exit
--c=command, --command=command
the target command (annotate API = 'annotate',
augmentation API = 'augment', cleaning API = 'clean',
datasets API = 'data', features API = 'features',
model prediction API = 'predict', preprocessing API =
'transform', model training API = 'train', testing
API = 'test', visualize API = 'visualize',
list/change default settings = 'settings')
--p=problemtype, --problemtype=problemtype
specify the problem type ('c' = classification or 'r'
= regression)
--s=sampletype, --sampletype=sampletype
specify the type files that you'd like to operate on
(e.g. 'audio', 'text', 'image', 'video', 'csv')
--n=common_name, --name=common_name
specify the common name for the model (e.g. 'gender'
for a male/female problem)
--i=class_, --class=class_
specify the class that you wish to annotate (e.g.
'male')
--d=dir, --dir=dir an array of the target directory (or directories) that
contains sample files for the annotation API,
prediction API, features API, augmentation API,
cleaning API, and preprocessing API (e.g.
'/Users/jim/desktop/allie/train_dir/teens/')
If you have any questions or would like to contribute to our community,
please reach out to Jim Schwoebel @ js@neurolex.co
'''
import os, shutil, time, json
from optparse import OptionParser
from tqdm import tqdm
from pyfiglet import Figlet
# helper function to render modules and functions
def render(text, f):
print(f.renderText(text))
f=Figlet(font='doh')
render('Allie',f)
f=Figlet(font='doom')
render('Command Line Interface',f)
###############################################################
## INITIALIZATION ##
###############################################################
# initialize variables for the test
prevdir=os.getcwd()
load_dir = prevdir+'/load_dir'
train_dir = prevdir + '/train_dir'
model_dir = prevdir+ '/training'
features_dir=prevdir+'/features'
loadmodel_dir = prevdir+'/models'
clean_dir=prevdir+'/cleaning/'
data_dir=prevdir+'/datasets'
augment_dir=prevdir+'/augmentation'
test_dir=prevdir+'/tests'
visualization_dir=prevdir+'/visualize'
preprocessing_dir=prevdir+'/preprocessing'
annotation_dir=prevdir+'/annotation'
# settings
settings=json.load(open(prevdir+'/settings.json'))
clean_data=settings['clean_data']
augment_data=settings['augment_data']
# transcript settings
default_audio_transcript=settings['default_audio_transcriber']
default_image_transcript=settings['default_image_transcriber']
default_text_transcript=settings['default_text_transcriber']
default_video_transcript=settings['default_video_transcriber']
default_csv_transcript=settings['default_csv_transcriber']
transcribe_audio=settings['transcribe_audio']
transcribe_text=settings['transcribe_text']
transcribe_image=settings['transcribe_image']
transcribe_video=settings['transcribe_video']
transcribe_csv=settings['transcribe_csv']
# feature settings
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
# cleaning settings
default_audio_cleaners=settings['default_audio_cleaners']
default_text_cleaners=settings['default_text_cleaners']
default_image_cleaners=settings['default_image_cleaners']
default_video_cleaners=settings['default_video_cleaners']
default_csv_cleaners=settings['default_csv_cleaners']
# augmentation settings
default_audio_augmenters=settings['default_audio_augmenters']
default_text_augmenters=settings['default_text_augmenters']
default_image_augmenters=settings['default_image_augmenters']
default_video_augmenters=settings['default_video_augmenters']
default_csv_augmenters=settings['default_csv_augmenters']
# preprocessing settings
select_features=settings['select_features']
reduce_dimensions=settings['reduce_dimensions']
scale_features=settings['scale_features']
default_scaler=settings['default_scaler']
default_feature_selector=settings['default_feature_selector']
default_dimensionality_reducer=settings['default_dimensionality_reducer']
dimension_number=settings['dimension_number']
feature_number=settings['feature_number']
# other settings for raining scripts
training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
# directories
audiodir=loadmodel_dir+'/audio_models'
textdir=loadmodel_dir+'/text_models'
imagedir=loadmodel_dir+'/image_models'
videodir=loadmodel_dir+'/video_models'
csvdir=loadmodel_dir+'/csv_models'
# for if/then statements later
commands=['annotate', 'augment', 'clean', 'data',
'features', 'predict', 'transform', 'train',
'test', 'visualize', 'settings']
sampletypes=['audio', 'text', 'image', 'video', 'csv']
problemtypes = ['c','r']
# get all the options from the terminal
parser = OptionParser()
parser.add_option("--c", "--command", dest="command",
help="the target command (annotate API = 'annotate', \n"+
"augmentation API = 'augment', \n"+
"cleaning API = 'clean', \n"+
"datasets API = 'data', \n"+
"features API = 'features', \n"+
"model prediction API = 'predict', \n"+
"preprocessing API = 'transform', \n"+
"model training API = 'train', \n"+
"testing API = 'test', \n"+
"visualize API = 'visualize', \n" +
"list/change default settings = 'settings')", metavar="command")
parser.add_option("--p", "--problemtype", dest="problemtype",
help="specify the problem type ('c' = classification or 'r' = regression)", metavar="problemtype")
parser.add_option("--s", "--sampletype", dest="sampletype",
help="specify the type files that you'd like to operate on (e.g. 'audio', 'text', 'image', 'video', 'csv')", metavar="sampletype")
parser.add_option("--n", "--name", dest="common_name",
help="specify the common name for the model (e.g. 'gender' for a male/female problem)", metavar="common_name")
parser.add_option("--i", "--class", dest="class_",
help="specify the class that you wish to annotate (e.g. 'male')", metavar="class_")
# preprocessing, featurization, cleaning, and augmentation API directories (as an appended list)
parser.add_option("--d", "--dir", dest="dir",
help="an array of the target directory (or directories) that contains sample files for the annotation API, prediction API, features API, augmentation API, cleaning API, and preprocessing API (e.g. '/Users/jim/desktop/allie/train_dir/teens/')", metavar="dir",
action='append')
# parse arguments
(options, args) = parser.parse_args()
# pull arguments from CLI
try:
command= options.command.lower().replace(' ','')
except:
pass
try:
common_name = options.common_name.lower()
except:
pass
try:
sampletype = options.sampletype.lower()
except:
pass
try:
problemtype = options.problemtype.lower()
except:
pass
try:
class_ = options.class_.lower()
except:
pass
try:
directory=options.dir
except:
pass
# now pursue relevant command passed
try:
if str(command) != 'None' and command in commands:
if command == 'annotate':
# - Annotate API - https://github.com/jim-schwoebel/allie/tree/master/annotation
if str(directory) != 'None' and sampletype in sampletypes and str(class_) != 'None' and problemtype in problemtypes:
for i in range(len(directory)):
os.chdir(annotation_dir)
os.system('python3 annotate.py -d %s -s %s -c %s -p %s'%(directory[i], sampletype, class_, problemtype))
else:
if str(directory) == 'None':
print('ERROR - annotation directory (-dir) not specified in the CLI')
elif sampletype not in sampletypes:
print('ERROR - sample type (%s) not in possible sample types (%s)'%(str(sampletype), str(sampletypes)))
elif str(class_) == 'None':
print('ERROR - annotation class not specified (-class)')
elif problemtype not in problemtypes:
print('ERROR - probelm type (%s) not in possible problem types (%s)'%(str(problemtype),str(problemtypes)))
elif command == 'augment':
# - Augmentation API - https://github.com/jim-schwoebel/allie/tree/master/augmentation
if sampletype in sampletypes:
os.chdir(augment_dir+'/%s_augmentation'%(sampletype))
if str(directory) != 'None':
for i in range(len(directory)):
os.system('python3 augment.py %s'%(directory[i]))
else:
print('ERROR - '+sample +' - not in list of possible sample types: %s'%(str(sampletypes)))
elif command == 'clean':
# - Cleaning API - https://github.com/jim-schwoebel/allie/tree/master/cleaning
if sampletype in sampletypes:
os.chdir(clean_dir+'/%s_cleaning'%(sampletype))
if str(directory) != 'None':
for i in range(len(directory)):
os.system('python3 clean.py %s'%(directory[i]))
else:
print('ERROR - '+sample +' - not in list of possible sample types: %s'%(str(sampletypes)))
elif command == 'data':
# - Datasets API - https://github.com/jim-schwoebel/allie/tree/master/datasets
os.chdir(data_dir+'/downloads')
os.system('python3 download.py')
elif command == 'features':
# - Features API - https://github.com/jim-schwoebel/allie/tree/master/features
if sampletype in sampletypes:
os.chdir(features_dir+'/%s_features'%(sampletype))
if str(directory) != 'None':
for i in range(len(directory)):
os.system('python3 featurize.py %s'%(directory[i]))
else:
print('ERROR - '+sample +' - not in list of possible sample types: %s'%(str(sampletypes)))
elif command == 'predict':
# - Model Prediction API - https://github.com/jim-schwoebel/allie/tree/master/models
if str(directory) == 'None':
print('Making model predictions in ./load_dir because ldir was not specified...')
os.chdir(loadmodel_dir)
os.system('python3 load.py')
else:
print('Making model predictions in the directory specified: %s'%(str(ldir)))
if str(directory) == 'None' and len(directory) == 1:
os.chdir(loadmodel_dir)
os.system('python3 load.py %s'%(directory[0]))
else:
print('too many directories (%s) specified for model prediction. \n\nPlease only specify one directory.'%(str(len(directory))))
elif command == 'transform':
# - Preprocessing API - https://github.com/jim-schwoebel/allie/tree/master/preprocessing
os.chdir(preprocessing_dir)
# get first folder
if sampletype in sampletypes and problemtype in problemtypes and str(common_name) != 'None' and str(directory) != 'None' and len(directory) > 1:
# get to N number of folders
command='python3 transform.py %s %s %s'%(sampletype, problemtype, common_name)
for i in range(len(directory)):
command=command+' '+directory[i]
os.system(command)
print('your transform can now be found in the ./preprocessing/%s_transforms directory'%(sampletype))
else:
if str(tdir1) == 'None' or str(tdir2) == 'None':
print('ERROR - transform API cannot be called. Please be sure that you defined your sample, problem type, common_name, and 2 directoreis properly (-tdir1 and -tdir2).')
elif sampletype not in sampletypes:
print('ERROR - '+sampletype +' not in possible sample types (%s)'%(str(sampletypes)))
elif problem not in problemtypes:
print('ERROR - '+problemtype + ' not in possible problem types (%s)'%(str(problemtypes)))
elif str(common_name) == 'None':
print('ERROR - common name not specified during creation of the transform in the preprocessing API.')
elif command == 'train':
# - https://github.com/jim-schwoebel/allie/tree/master/training
os.chdir(model_dir)
os.system('python3 model.py')
elif command == 'test':
# - Test API - https://github.com/jim-schwoebel/allie/tree/master/tests
os.chdir(test_dir)
os.system('python3 test.py')
elif command == 'visualize':
# - Visualize API - https://github.com/jim-schwoebel/allie/tree/master/visualize
os.chdir(visualization_dir)
os.system('python3 visualize.py')
elif command == 'settings':
print(settings)
print('\n')
settingslist=list(settings)
textinput=input('Would you like to change any of these settings? Yes (-y) or No (-n)\n')
if textinput.lower().replace(' ','') in ['yes','y']:
textinput=input('What setting would you like to change?\n')
while textinput not in settingslist and textinput.lower() != 'version':
print('Setting not recognized, options are:')
time.sleep(0.5)
for i in range(len(settingslist)):
if settingslist[i] != 'version':
print('- '+settingslist[i])
time.sleep(0.05)
textinput=input('What setting would you like to change?\n')
newsetting=input('What setting would you like to set here?\n')
if str(newsetting).title() in ['True']:
newsetting=True
elif str(newsetting).title() in ['False']:
newsetting=False
elif textinput in ['dimension_number', 'feature_number']:
newsetting=int(newsetting)
elif textinput in ['test_size']:
newsetting=float(newsetting)
else:
settingnum=input('how many more settings would you like to set here?\n')
newsetting=[newsetting]
try:
for i in range(int(settingnum)):
newsetting2=input('What additional setting would you like to set here?\n')
newsetting.append(newsetting2)
except:
pass
print(type(newsetting))
jsonfile=open('settings.json','w')
settings[textinput]=newsetting
json.dump(settings,jsonfile)
jsonfile.close()
else:
print('ERROR - %s is not a valid command in the Allie CLI. Please use one of these commands'%(str(command)))
print('\n')
for i in range(len(commands)):
print(' - '+commands[i])
print('\n\n')
except:
print('ERROR - no command provided in the Allie CLI. \n\nPlease use one of these commands. \n')
for i in range(len(commands)):
print(' - '+commands[i])
print('\n')
print('Sample usage: \npython3 allie.py --command features --dir /Users/jimschwoebel/desktop/allie/train_dir/females --sampletype audio')
print('\nFor additional help, type in:')
print('python3 allie.py -h\n')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/docker.py | docker.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
Custom setup script for docker installations.
To start, you need to download docker for you computer:
https://www.docker.com/get-started
Now you go to the Allie repository and build the image:
-> git clone git@github.com:jim-schwoebel/allie.git
-> cd allie
-> docker build -t allie_image .
Then you can use the terminal to use the Docker container as if it were your own computer:
-> docker run -it --entrypoint=/bin/bash allie_image
To learn more about how to use Allie and Docker, visit
https://github.com/jim-schwoebel/allie/wiki/6.-Using-Allie-and-Docker
'''
import os, json, sys, nltk
# add-on script for docker
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
# install hyperopt-sklearn
curdir=os.getcwd()
os.chdir(curdir+'/training/helpers/hyperopt-sklearn')
os.system('pip3 install -e .')
# install keras-compressor
os.chdir(curdir)
os.chdir(curdir+'/training/helpers/keras_compressor')
os.system('pip3 install .')
# now go setup tests
os.chdir(curdir)
os.chdir('tests')
os.system('python3 test.py')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/preprocessing/feature_reduce.py | preprocessing/feature_reduce.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _
| ___ \ (_)
| |_/ / __ ___ _ __ _ __ ___ ___ ___ ___ ___ _ _ __ __ _
| __/ '__/ _ \ '_ \| '__/ _ \ / __/ _ \/ __/ __| | '_ \ / _` |
| | | | | __/ |_) | | | (_) | (_| __/\__ \__ \ | | | | (_| |
\_| |_| \___| .__/|_| \___/ \___\___||___/___/_|_| |_|\__, |
| | __/ |
|_| |___/
___ ______ _____
/ _ \ | ___ \_ _|
/ /_\ \| |_/ / | |
| _ || __/ | |
| | | || | _| |_
\_| |_/\_| \___/
Employ dimensionality reduction strategies as part of Allie's preprocessing API.
'''
import json, os, sys
import numpy as np
from sklearn.model_selection import train_test_split
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def feature_reduce(dimensionality_selector, X_train, y_train, component_num):
if dimensionality_selector == 'autoencoder':
from keras.layers import Input, Dense
from keras.models import Model
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# preprocess labels (make into integers)
label_encoder = LabelEncoder()
y_train=label_encoder.fit_transform(y_train)
y_test=label_encoder.fit_transform(y_test)
# this is the size of our encoded representations (208 features in X)
encoding_dim = 32
# add a few dimensions for encoder and decoder
input_dim = Input(shape=X_train[0].shape)
encoder=Dense(encoding_dim, activation='tanh')
autoencoder = Model(input_dim, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_dim, encoded)
# create a placeholder for an encoded (50-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
# now train autoencoder
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(X_train, y_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(X_test, y_test))
# predict emebddings
encoded_audio = encoder.predict(X_test)
decoded_audio = decoder.predict(encoded_audio)
print('not saving model due to keras autoencoder')
elif dimensionality_selector == 'cca':
from sklearn.cross_decomposition import CCA
cca = CCA(n_components=component_num)
return cca
elif dimensionality_selector == 'dictionary':
from sklearn.decomposition import MiniBatchDictionaryLearning
dico_X = MiniBatchDictionaryLearning(n_components=component_num, alpha=1, n_iter=500)
model=dico_X
elif dimensionality_selector == 'ica':
from sklearn.decomposition import FastICA
ica = FastICA(n_components=component_num)
model=ica
elif dimensionality_selector == 'kmeans':
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=component_num, random_state=0)
model=kmeans
elif dimensionality_selector == 'lda':
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=component_num).fit(X_train, y_train).transform(X_train)
model=lda
elif dimensionality_selector == 'manifold':
from sklearn import manifold
manifold_X = manifold.Isomap(10, component_num)
model=manifold_X
elif dimensionality_selector == 'neighborhood':
from sklearn.neighbors import NeighborhoodComponentsAnalysis
nca = NeighborhoodComponentsAnalysis(random_state=42)
model=nca
# feature_engineering.gradient.selector
elif dimensionality_selector == 'pca':
from sklearn.decomposition import PCA
pca = PCA(n_components=component_num)
model = pca
elif dimensionality_selector == 'pls':
from sklearn.cross_decomposition import PLSRegression
pls = PLSRegression(n_components=component_num)
model=pls
return model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/preprocessing/feature_scale.py | preprocessing/feature_scale.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _
| ___ \ (_)
| |_/ / __ ___ _ __ _ __ ___ ___ ___ ___ ___ _ _ __ __ _
| __/ '__/ _ \ '_ \| '__/ _ \ / __/ _ \/ __/ __| | '_ \ / _` |
| | | | | __/ |_) | | | (_) | (_| __/\__ \__ \ | | | | (_| |
\_| |_| \___| .__/|_| \___/ \___\___||___/___/_|_| |_|\__, |
| | __/ |
|_| |___/
___ ______ _____
/ _ \ | ___ \_ _|
/ /_\ \| |_/ / | |
| _ || __/ | |
| | | || | _| |_
\_| |_/\_| \___/
Scale features according to Allie's preprocessing API.
'''
import json, os, sys
import numpy as np
from sklearn import preprocessing
import numpy as np
from sklearn.model_selection import train_test_split
from tqdm import tqdm
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_classes():
count=2
classes=list()
while True:
try:
class_=sys.argv[count]
classes.append(class_)
print(classes)
count=count+1
except:
break
return classes
def feature_scale(feature_scaler, X_train, y_train):
# more information about these scalers can be found @
# https://scikit-learn.org/stable/modules/preprocessing.html
if feature_scaler == 'binarizer':
# scale the X values in the set
model = preprocessing.Binarizer()
elif feature_scaler == 'one_hot_encoder':
'''
>>> enc.transform([['female', 'from US', 'uses Safari'],
['male', 'from Europe', 'uses Safari']]).toarray()
array([[1., 0., 0., 1., 0., 1.],
[0., 1., 1., 0., 0., 1.]])
'''
# This is on y values
model = preprocessing.OneHotEncoder(handle_unknown='ignore')
elif feature_scaler == 'maxabs':
model=preprocessing.MaxAbsScaler()
elif feature_scaler == 'minmax':
model=preprocessing.MinMaxScaler()
elif feature_scaler == 'normalize':
# L2 normalization
model = preprocessing.Normalizer()
elif feature_scaler == 'poly':
# scale the X values in the set
model = PolynomialFeatures(2)
elif feature_scaler == 'power_transformer':
# scale the X values in the set
model = preprocessing.PowerTransformer(method='yeo-johnson')
elif feature_scaler == 'quantile_transformer_normal':
# scale the X values in the set
model = preprocessing.QuantileTransformer(output_distribution='normal')
elif feature_scaler == 'robust':
model=preprocessing.RobustScaler(quantile_range=(25, 75))
elif feature_scaler == 'standard_scaler':
# scale the X values in the set
model=preprocessing.StandardScaler()
return model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/preprocessing/transform.py | preprocessing/transform.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _
| ___ \ (_)
| |_/ / __ ___ _ __ _ __ ___ ___ ___ ___ ___ _ _ __ __ _
| __/ '__/ _ \ '_ \| '__/ _ \ / __/ _ \/ __/ __| | '_ \ / _` |
| | | | | __/ |_) | | | (_) | (_| __/\__ \__ \ | | | | (_| |
\_| |_| \___| .__/|_| \___/ \___\___||___/___/_|_| |_|\__, |
| | __/ |
|_| |___/
___ ______ _____
/ _ \ | ___ \_ _|
/ /_\ \| |_/ / | |
| _ || __/ | |
| | | || | _| |_
\_| |_/\_| \___/
Make feature transformations as a part of Allie's preprocessing API.
Transformation are done in this order:
Feature scalers --> reduce dimensions --> select features.
A --> A`--> A`` --> A```
------
Usage: python3 transform.py audio c gender males females
------
sys.argv[1] - 'audio' - the sample type of file preprocessed by the transformer
['audio', 'text', 'image', 'video', 'csv']
sys.argv[2] - 'c' - classification or regression problems
['c', 'r']
sys.argv[3] - 'gender' - the common name for the transformer
can be any string (e.g. 'gender')
sys.argv[4], sys.argv[5], sys.argv[n]
'males' classes that you seek to model in the train_dir folder any string folder name
'''
import json, os, sys, time, pickle
os.system('pip3 install scikit-learn==0.22.2.post1')
import numpy as np
from sklearn import preprocessing
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from tqdm import tqdm
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def get_class_dir():
count=4
classes=list()
while True:
try:
class_=sys.argv[count]
classes.append(class_)
print(classes)
count=count+1
except:
break
classdirs=list()
actualclasses=list()
for i in range(len(classes)):
if classes[i].count('/') > 0:
# a directory
classdirs.append(classes[i])
actualclasses.append(classes[i].split('/')[-1])
else:
classdirs.append(os.getcwd()+'/'+classes[i])
actualclasses.append(classes[i])
return classdirs, actualclasses
def get_features(classdirs, classes, problem_type, settings):
# initialize lists
features=list()
feature_labels=list()
class_labels=list()
curdir=os.getcwd()
# get defaults (if multiple feature arrays)
default_audio_features=settings["default_audio_features"]
default_text_features=settings["default_text_features"]
default_image_features=settings["default_image_features"]
default_video_features=settings["default_video_features"]
default_csv_features=settings["default_csv_features"]
defaults=default_audio_features+default_text_features+default_image_features+default_video_features+default_csv_features
for i in range(len(classes)):
classname=class_labels
print('----------LOADING %s----------'%(classes[i].upper()))
os.chdir(classdirs[i])
listdir=os.listdir()
jsonfiles=list()
for j in range(len(listdir)):
if listdir[j].endswith('.json'):
jsonfiles.append(listdir[j])
g=json.load(open(jsonfiles[0]))
feature_list=list(g['features'][problem_type])
for j in tqdm(range(len(jsonfiles))):
try:
g=json.load(open(jsonfiles[j]))
feature_=list()
label_=list()
try:
for k in range(len(feature_list)):
if feature_list[k] in defaults:
feature_=feature_+g['features'][problem_type][feature_list[k]]['features']
label_=label_+g['features'][problem_type][feature_list[k]]['labels']
# quick quality check to only add to list if the feature_labels match in length the features_
if len(feature_) == len(label_):
features.append(feature_)
feature_labels.append(label_)
class_labels.append(classes[i])
except:
print('error loading feature embedding: %s'%(feature_list[k].upper()))
except:
print('error loading %s -> %s'%(classes[i].upper(), jsonfiles[j]))
return features, feature_labels, class_labels
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
print(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
# get all the important settings for the transformations
scale_features=settings['scale_features']
reduce_dimensions=settings['reduce_dimensions']
select_features=settings['select_features']
default_scalers=settings['default_scaler']
default_reducers=settings['default_dimensionality_reducer']
default_selectors=settings['default_feature_selector']
print(scale_features)
print(reduce_dimensions)
print(select_features)
print(default_scalers)
print(default_reducers)
print(default_selectors)
os.chdir(basedir)
################################################
## Now go featurize! ##
################################################
# get current directory
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir+'/train_dir')
problem_type=sys.argv[1] #audio, text, image, video, csv
train_type=sys.argv[2] #c = classification, r=regression
if train_type == 'c':
common_name=sys.argv[3] #common_name = 'gender'
if problem_type != 'csv':
classdirs, classes=get_class_dir()
features, feature_labels, class_labels = get_features(classdirs, classes, problem_type, settings)
elif problem_type == 'csv':
class_='class_'
classes=['class_']
os.chdir(basedir+'/models/')
g=pd.read_csv(common_name+'_all.csv')
features=g.drop(['class_'], axis=1)
feature_labels=list(features)
features_=list()
for i in range(len(features)):
features_.append(list(features.iloc[i,:]))
features=features_
class_labels=list()
for i in range(len(features)):
class_labels.append(class_)
os.chdir(curdir)
X_train, X_test, y_train, y_test = train_test_split(features, class_labels, train_size=0.90, test_size=0.10)
print(features[0])
print(feature_labels[0])
elif train_type == 'r':
# only 1 class here
target=[sys.argv[3]]
spreadsheet=sys.argv[4]
spreadsheet_dir=sys.argv[5]
print(target)
print(spreadsheet)
print(spreadsheet_dir)
common_name=sys.argv[6] #common_name = 'gender'
os.chdir(spreadsheet_dir)
data=pd.read_csv(spreadsheet)
features=np.array(data.drop(columns=target, axis=1))
feature_labels=list(features)
class_labels=np.array(data.pop(target[0]))
print(features)
print(feature_labels)
print(class_labels)
X_train, X_test, y_train, y_test = train_test_split(features, class_labels, train_size=0.90, test_size=0.10)
# create a scikit-learn pipeline
feature_num=settings['feature_number']
component_num=settings['dimension_number']
estimators = []
os.chdir(basedir+'/train_dir')
# for i in range(len(X_train)):
# if len(X_train[i]) != len(X_train[0]):
# print(X_train[i])
################################################
## Scale features ##
################################################
if scale_features == True:
import feature_scale as fsc_
for i in range(len(default_scalers)):
feature_scaler=default_scalers[i]
print(feature_scaler.upper())
scaler_model=fsc_.feature_scale(feature_scaler, X_train, y_train)
# print(len(scaler_model))
estimators.append((feature_scaler, scaler_model))
################################################
## Reduce dimensions ##
################################################
if reduce_dimensions == True:
import feature_reduce as fre_
for i in range(len(default_reducers)):
feature_reducer=default_reducers[i]
print(feature_reducer.upper()+' - %s features'%(str(component_num)))
dimension_model=fre_.feature_reduce(feature_reducer, X_train, y_train, component_num)
# print(len(dimension_model))
estimators.append((feature_reducer, dimension_model))
################################################
## Feature selection ##
################################################
if select_features == True:
import feature_select as fse_
for i in range(len(default_selectors)):
feature_selector=default_selectors[i]
print(feature_selector.upper()+' - %s features'%(str(feature_num)))
selection_model=fse_.feature_select(feature_selector, X_train, y_train, feature_num)
estimators.append((feature_selector, selection_model))
print(estimators)
model=Pipeline(estimators)
# make all train and test data into binary labels
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html
if train_type == 'c':
le=preprocessing.LabelEncoder()
le.fit(y_train)
y_train=le.transform(y_train)
y_test=le.transform(y_test)
'''
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"])
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
'''
model=model.fit(X_train, y_train)
print(len(X_test))
X_test=model.transform(X_test)
print(len(X_test))
print('transformed training size')
print(X_test[0])
# pickle me timbers
os.chdir(curdir)
print(os.getcwd())
try:
os.chdir('%s_transformer'%(problem_type))
except:
os.mkdir('%s_transformer'%(problem_type))
os.chdir('%s_transformer'%(problem_type))
# get filename / create a unique file name
filename=train_type+'_'+common_name
# only add names in if True
if scale_features == True:
for i in range(len(default_scalers)):
filename=filename+'_'+default_scalers[i]
if reduce_dimensions == True:
for i in range(len(default_reducers)):
filename=filename+'_'+default_reducers[i]
if select_features == True:
for i in range(len(default_selectors)):
filename=filename+'_'+default_selectors[i]
model_file=filename+'.pickle'
le_file=filename+'_encoder.pickle'
json_file=filename+'.json'
# create model
modelfile=open(model_file,'wb')
pickle.dump(model, modelfile)
modelfile.close()
# save encoder (to decode classes into the future)
if train_type == 'c':
modelfile=open(le_file,'wb')
pickle.dump(le, modelfile)
modelfile.close()
# write json file
if train_type=='c':
data={'estimators': str(estimators),
'settings': settings,
'classes': np.array(list(set(y_test))).tolist(),
'sample input X': X_train[0],
'sample input Y': int(y_train[0]),
'sample transformed X': X_test[0].tolist(),
'sample transformed y': int(y_train[0]),
}
else:
data={'estimators': str(estimators),
'settings': settings,
'classes': np.array(list(set(y_test))).tolist(),
'sample input X': X_train[0].tolist(),
'sample input Y': float(y_train[0]),
'sample transformed X': X_test[0].tolist(),
'sample transformed y': float(y_train[0]),
}
# for testing purposes
# data_list=list(data)
# for i in range(len(data_list)):
# print(data_list[i])
# print(type(data[data_list[i]]))
# if str(type(data[data_list[i]])) == "<class 'list'>":
# for j in range(len(data[data_list[i]])):
# print(type(data[data_list[i]][j]))
jsonfile=open(json_file,'w')
json.dump(data,jsonfile)
jsonfile.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/preprocessing/feature_select.py | preprocessing/feature_select.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _
| ___ \ (_)
| |_/ / __ ___ _ __ _ __ ___ ___ ___ ___ ___ _ _ __ __ _
| __/ '__/ _ \ '_ \| '__/ _ \ / __/ _ \/ __/ __| | '_ \ / _` |
| | | | | __/ |_) | | | (_) | (_| __/\__ \__ \ | | | | (_| |
\_| |_| \___| .__/|_| \___/ \___\___||___/___/_|_| |_|\__, |
| | __/ |
|_| |___/
___ ______ _____
/ _ \ | ___ \_ _|
/ /_\ \| |_/ / | |
| _ || __/ | |
| | | || | _| |_
\_| |_/\_| \___/
Select features as a part of Allie's Preprocessing API.
'''
import json, os, sys
import numpy as np
# from nni.feature_engineering.gradient_selector import FeatureGradientSelector, GBDTSelector
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest, chi2, GenericUnivariateSelect, SelectFwe, SelectFpr, SelectFdr, SelectPercentile
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import VarianceThreshold
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def feature_select(feature_selector, X_train, y_train, feature_number):
# feature_engineering.gradient.selector
if feature_selector == 'chi':
'''
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic variables,
so using this function “weeds out” the features that are the most
likely to be independent of class and therefore irrelevant for classification.
http://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.chi2.html
'''
# Select 50 features with highest chi-squared statistics
model = SelectKBest(chi2, feature_number)
####################################################################################
# NNI-based feature selectors cn be chosen into the future
# elif feature_selector == 'gradient':
# model = FeatureGradientSelector(n_features=feature_number)
# elif feature_selector == 'gbdt':
# model = GBDTSelector(n_features=feature_number)
####################################################################################
elif feature_selector == 'fdr':
'''
Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. alpha is an upper bound on the expected false discovery rate.
'''
model = SelectFdr(chi2, alpha=0.01)
elif feature_selector == 'fpr':
'''
Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total amount of false detections.
'''
model = SelectFpr(chi2, alpha=0.01)
elif feature_selector == 'fwe':
'''
https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectFwe.html#sklearn.feature_selection.SelectFwe
'''
model = SelectFwe(chi2, alpha=0.01)
elif feature_selector == 'lasso':
# lasso technique
'''
Reconstruction with L1 (Lasso) penalization
the best value of alpha can be determined using cross validation
with LassoCV
http://scikit-learn.org/stable/modules/feature_selection.html#l1-feature-selection
https://www.analyticsvidhya.com/blog/2016/01/complete-tutorial-ridge-lasso-regression-python/
'''
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False)
model = SelectFromModel(lsvc)
elif feature_selector == 'percentile':
'''
Select features according to a percentile of the highest scores.
https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SelectPercentile.html#sklearn.feature_selection.SelectPercentile
'''
model = SelectPercentile(chi2, percentile=10)
elif feature_selector == 'rfe':
'''
Recursive feature elmination works by recursively removing
attributes and building a model on attributes that remain.
It uses model accuracy to identify which attributes
(and combinations of attributes) contribute the most to predicting the
target attribute. You can learn more about the RFE class in
the scikit-learn documentation.
'''
estimator = SVR(kernel="linear")
model = RFE(estimator, n_features_to_select=feature_number, step=1)
elif feature_selector == 'univariate':
model = GenericUnivariateSelect(chi2, mode='k_best', param=feature_number)
elif feature_selector == 'variance':
model = VarianceThreshold(threshold=(.8 * (1 - .8)))
return model
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/preprocessing/load_transformer.py | preprocessing/load_transformer.py | '''
[58.0, 13.0, 36.0, 41.0, 128.0, 15.0, 16.0, 40.0, 162.0, 1.0, 14.0,
30.0, 29.0, 69.0, 80.0, 14.0, 1.0, 58.0, 65.0, 80.0, 60.0, 7.0,
12.0, 5.0, 18.0, 4.0, 139.0, 29.0, 61.0, 25.0, 24.0, 59.0, 0.0,
7.0, 180.0, 0.0, 0.0, 0.0, 6.0, 619.0, 159.0, 1.0, 0.0, 1.0, 5.0,
0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 19.0, 12.0, 0.0, 0.0, 59.0, 36.0,
0.0, 0.0, 0.0, -0.05375744047619047, 0.4927827380952381, 32.0]
--->
array([[-1.4530485 , -0.03725366, 0.53727615, 0.51361116, 0.26511576,
0.79677552, 0.01716853, 0.77574926, 0.24912955, -0.64725461,
0.01852962, -0.02733052]])
'''
import sys, os, pickle, json, time
import numpy as np
# files
problemtype=sys.argv[1]
picklefile=sys.argv[2]
jsonfile=picklefile[0:-7]+'.json'
# load the model
os.chdir(problemtype+'_transformer')
g=pickle.load(open(picklefile,'rb'))
# load the corresponding SON
h=json.load(open(jsonfile))
# see sample input
sample=h['sample input X']
# reshape the data to output array
print('----------------------------------')
print('-%-$-V-|-%-$-V-|-%-$-V-|-%-$-V-|-%-$-')
print(' TRANSFORMATION ')
print('-%-$-V-|-%-$-V-|-%-$-V-|-%-$-V-|-%-$-')
print('----------------------------------')
print(sample)
print('-->')
print(g.transform(np.array(sample).reshape(1,-1)))
print('----------------------------------')
time.sleep(3) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/seed_files.py | tests/seed_files.py |
'''
seed_files.py audio
^^ seed files from command line
'''
import sys, uuid, os, shutil, time, random
import sounddevice as sd
import pandas as pd
import soundfile as sf
import pyautogui, markovify
def audio_record(filename, duration, fs, channels):
print('---------------')
print('recording audio...')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording %s'%(filename))
print('---------------')
def text_record(filename, text_model):
textfile=open(filename, 'w')
# Print five randomly-generated sentences
for i in range(5):
sentence=text_model.make_sentence()
textfile.write(sentence)
textfile.close()
def image_record(filename):
pyautogui.screenshot(filename)
def video_record(filename, test_dir, train_dir):
print('---------------')
print('recording video...')
cur_dur=os.getcwd()
os.chdir(test_dir+'/helpers/video_record')
# 3 second recordings
os.system('python3 record.py %s 3 %s'%(filename, train_dir))
os.chdir(cur_dir)
print('---------------')
def csv_record(filename, newfilename):
# take in test .CSV and manipulate the columns by copy/paste and re-write
csvfile=pd.read_csv(filename)
filelength=len(filename)
newlength=random.randint(0,filelength-1)
# now re-write CSV with the new length
g=csvfile.iloc[0:newlength]
randint2=random.randint(0,1)
if randint2 == 0:
g=g+g
g.to_csv(newfilename)
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
# get filetype from command line
filetype=sys.argv[1]
train_dir=sys.argv[2]
cur_dir=os.getcwd()
try:
os.chdir(train_dir)
except:
os.mkdir(train_dir)
os.chdir(cur_dir)
# prevdir=prev_dir(cur_dir)
# prevdir=os.getcwd()
if filetype == 'audio':
'''
sample command in terminal:
python3 seed_files.py audio /Users/jimschwoebel/allie/train_dir/one
'''
# load test data directory
if train_dir.endswith('one'):
data_dir=cur_dir+'/helpers/audio_data/one'
elif train_dir.endswith('two'):
data_dir=cur_dir+'/helpers/audio_data/two'
listdir=os.listdir(data_dir)
# print(data_dir)
# option 1 - copy test files
# --------------------------
for i in range(len(listdir)):
if listdir[i][-4:]=='.wav':
print(listdir[i])
shutil.copy(data_dir+'/'+listdir[i], train_dir+'/'+listdir[i])
# option 2 - record data yourself (must be non-silent data)
# --------------------------
# for i in range(20):
# filename=str(uuid.uuid4())+'.wav'
# audio_record(filename, 1, 16000, 1)
elif filetype == 'text':
'''
python3 seed_files.py text /Users/jimschwoebel/allie/train_dir/one
'''
# Get raw text as string (the Brother's Karamazov)
with open(cur_dir+'/helpers/text.txt') as f:
text = f.read()
# Build the model.
text_model = markovify.Text(text)
os.chdir(train_dir)
for i in range(20):
filename=str(uuid.uuid4()).replace('-','_')+'.txt'
text_record(filename, text_model)
elif filetype == 'image':
'''
python3 seed_files.py image /Users/jimschwoebel/allie/train_dir/one
'''
# take 20 random screenshots with pyscreenshot
os.chdir(train_dir)
for i in range(20):
filename=str(uuid.uuid4()).replace('-','_')+'.png'
image_record(filename)
elif filetype == 'video':
'''
python3 seed_files.py video /Users/jimschwoebel/allie/train_dir/one
'''
# make 20 random videos with screenshots
os.chdir(train_dir)
for i in range(20):
filename=str(uuid.uuid4()).replace('-','_')+'.avi'
video_record(filename, cur_dir, train_dir)
elif filetype == 'csv':
'''
python3 seed_files.py csv /Users/jimschwoebel/allie/train_dir/one
'''
# prepopulate 20 random csv files with same headers
filename='test_csv.csv'
shutil.copy(cur_dir+'/'+filename, train_dir+'/'+filename)
os.chdir(train_dir)
for i in range(20):
newfilename=str(uuid.uuid4())+'.csv'
csv_record(filename, newfilename)
os.remove(filename)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/unit_test.py | tests/unit_test.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
|_ _| | | / _ \ | ___ \_ _|
| | ___ ___| |_ / /_\ \| |_/ / | |
| |/ _ \/ __| __| | _ || __/ | |
| | __/\__ \ |_ | | | || | _| |_
\_/\___||___/\__| \_| |_/\_| \___/
Use this script for unit testing of Allie's functionality.
In particular:
- class test_dependencies(unittest.TestCase)
tests for modules and brew installations (FFmpeg and SoX).
- class test_cleaning(unittest.TestCase)
tests ability to clean files via cleaning scripts (mostly de-duplication, will expand in future).
- class test_augmentation(unittest.TestCase)
tests ability to augment files via augmentation scripts (in ./datasets/) directory.
- class test_features(unittest.TestCase)
tests ability to featurize files via default_featurizers.
- class test_transcription(unittest.TestCase)
tests ability to transcribe files.
- class test_training(unittest.TestCase)
tests ability to train machine learning models (classification and regression) with all settings.
- class test_preprocessing(unittest.TestCase)
tests ability to create transformations with the transform.py script (for model training).
- class test_loading(unittest.TestCase)
tests ability to load model files and make predictions via model directory (via test files / load_dir / models trained).
- class test_visualization(unittest.TestCase)
tests ability to visualize classification problems through the visualize.py script.
Note that these unit tests are contextual, meaning that only the settings specified
in settings.json are tested. This makes it easy to quickly test whether or not your
current computer and operating system can handle new settings that you specify within
Allie's core API.
'''
import unittest, os, shutil, time, uuid, random, json, pickle
import numpy as np
import pandas as pd
###############################################################
## HELPER FUNCTIONS
## Below are some helper functions to reduce code redundancy
## During the unit testing process.
###############################################################
def prev_dir(directory):
'''
take in a directory and get the next innermost directory
in the tree structure.
For example,
directory = /Users/jim/desktop
prev_dir(directory) --> /Users/jim
'''
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def clean_file(directory, clean_dir, cur_dir, train_dir, file):
'''
take in a few directories and output a clean file for audio,
text, image, and video files.
test_audio.wav --> test_audio.wav (clean)
'''
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
os.chdir(directory)
shutil.copy(cur_dir+'/'+file, train_dir+'/'+directory+'/'+file)
os.chdir(clean_dir+'/'+directory)
os.system('python3 clean.py %s'%(train_dir+'/'+directory))
os.chdir(train_dir+'/'+directory)
listdir=os.listdir()
b=False
if len(listdir) == 1 and file.endswith('.csv') == False:
b=True
elif len(listdir) == 2 and file.endswith('.csv') == True:
b=True
# remove temp directory
os.chdir(train_dir)
shutil.rmtree(train_dir+'/'+directory)
msg='failed cleaning process, file does not exist in directory'
return b, msg
def augment_file(directory, augment_dir, cur_dir, train_dir, file):
'''
take in a few directories and output augmented files for audio,
text, image, and video files.
test_audio.wav --> test_audio.wav + tsaug_test_audio.wav
Typically augmentation strategies add 2x more data to the original
dataset.
'''
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
shutil.copy(cur_dir+'/'+file, train_dir+'/'+directory+'/'+file)
os.chdir(augment_dir+'/'+directory)
os.system('python3 augment.py %s'%(train_dir+'/'+directory))
os.chdir(train_dir+'/'+directory)
# remove temp directory
listdir=os.listdir()
b=False
if len(listdir) > 1:
b=True
os.chdir(train_dir)
shutil.rmtree(train_dir+'/'+directory)
msg='failed augmentation, only one file exists in the directory'
return b, msg
def featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features):
'''
take in a file and output a featurized .JSON file using
Allie internal Feature API.
test.wav --> test.json, test.wav with features in test.json
'''
directory='%s_features'%(sampletype)
folder=train_dir+'/'+directory
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
# put test audio file in directory
shutil.copy(cur_dir+'/'+file, folder+'/'+file)
os.chdir(features_dir+'/%s_features/'%(sampletype))
features_list=default_features
for i in range(len(features_list)):
print('------------------------------')
print('FEATURIZING - %s'%(features_list[i].upper()))
print('------------------------------')
os.system('python3 featurize.py %s %s'%(folder, features_list[i]))
# now that we have the folder let's check if the array has all the features
os.chdir(folder)
gopen=open('test_%s.json'%(sampletype),'r')
g=json.load(gopen)
features=g['features'][sampletype]
gopen.close()
test_features=list(features)
if test_features == features_list:
b=True
else:
b=False
notcount=list()
for i in range(len(features_list)):
if features_list[i] not in test_features:
notcount.append(features_list[i])
msg=str(notcount) + ' failed during featurization'
return b, msg
def transcribe_file(train_dir, file, sampletype, default_transcript):
os.chdir(train_dir)
directory='%s_transcription'%(sampletype)
folder=train_dir+'/'+directory
os.chdir(train_dir)
try:
os.mkdir(directory)
except:
shutil.rmtree(directory)
os.mkdir(directory)
# put test audio file in directory
shutil.copy(cur_dir+'/'+file, folder+'/'+file)
os.chdir(features_dir+'/%s_features/'%(sampletype))
os.system('python3 featurize.py %s'%(folder))
# now that we have the folder let's check if the array has all the features
os.chdir(folder)
g=json.load(open('test_%s.json'%(sampletype)))
transcripts=list(g['transcripts'][sampletype])
if default_transcript[0] in transcripts:
msg='success'
b=True
else:
msg='failure'
b=False
os.chdir(train_dir)
shutil.rmtree(directory)
return b, msg
def model_predict(filetype, testfile, loadmodel_dir, load_dir):
# copy machine learning model into image_model dir
os.chdir(cur_dir+'/helpers/models/%s_models/'%(filetype))
listdir=os.listdir()
temp=os.getcwd()
tempfiles=list()
os.chdir(loadmodel_dir)
if '%s_models'%(filetype) not in os.listdir():
os.mkdir('%s_models'%(filetype))
os.chdir(temp)
# copy audio machine learning model into directory (one_two)
tempfiles=list()
for i in range(len(listdir)):
try:
shutil.copytree(temp+'/'+listdir[i], loadmodel_dir+'/%s_models/'%(filetype)+listdir[i])
except:
pass
tempfiles.append(listdir[i])
# copy file in load_dir
shutil.copy(cur_dir+'/'+testfile, load_dir+'/'+testfile)
# copy machine learning models into proper models directory
os.chdir(cur_dir+'/helpers/models/%s_models/'%(filetype))
listdir=os.listdir()
# copy audio machine learning model into directory (one_two)
tempfiles=list()
for i in range(len(listdir)):
try:
shutil.copytree(temp+'/'+listdir[i], loadmodel_dir+'/%s_models/'%(filetype)+listdir[i])
except:
pass
tempfiles.append(listdir[i])
os.chdir(loadmodel_dir)
os.system('python3 load.py')
os.chdir(load_dir)
os.chdir(load_dir)
listdir=os.listdir()
b=False
for i in range(len(listdir)):
if filetype == 'audio':
if listdir[i].endswith('.wav') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'text':
if listdir[i].endswith('.txt') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'image':
if listdir[i].endswith('.png') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
elif filetype == 'video':
if listdir[i].endswith('.mp4') and listdir[i][0:-4]+'.json' in listdir:
b=True
break
# now remove all the temp files
os.chdir(loadmodel_dir+'/%s_models'%(filetype))
for i in range(len(tempfiles)):
shutil.rmtree(tempfiles[i])
msg = filetype + ' model prediction failed.'
return b, msg
###############################################################
## INITIALIZATION ##
###############################################################
# initialize variables for the test
cur_dir=os.getcwd()
prevdir= prev_dir(cur_dir)
load_dir = prevdir+'/load_dir'
train_dir = prevdir + '/train_dir'
model_dir = prevdir+ '/training'
features_dir=prevdir+'/features'
loadmodel_dir = prevdir+'/models'
clean_dir=prevdir+'/cleaning/'
augment_dir=prevdir+'/augmentation'
test_dir=prevdir+'/tests'
visualization_dir=prevdir+'/visualize'
preprocessing_dir=prevdir+'/preprocessing'
# settings
settings=json.load(open(prevdir+'/settings.json'))
clean_data=settings['clean_data']
augment_data=settings['augment_data']
# transcript settings
default_audio_transcript=settings['default_audio_transcriber']
default_image_transcript=settings['default_image_transcriber']
default_text_transcript=settings['default_text_transcriber']
default_video_transcript=settings['default_video_transcriber']
default_csv_transcript=settings['default_csv_transcriber']
transcribe_audio=settings['transcribe_audio']
transcribe_text=settings['transcribe_text']
transcribe_image=settings['transcribe_image']
transcribe_video=settings['transcribe_video']
transcribe_csv=settings['transcribe_csv']
# feature settings
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
# cleaning settings
default_audio_cleaners=settings['default_audio_cleaners']
default_text_cleaners=settings['default_text_cleaners']
default_image_cleaners=settings['default_image_cleaners']
default_video_cleaners=settings['default_video_cleaners']
default_csv_cleaners=settings['default_csv_cleaners']
# augmentation settings
default_audio_augmenters=settings['default_audio_augmenters']
default_text_augmenters=settings['default_text_augmenters']
default_image_augmenters=settings['default_image_augmenters']
default_video_augmenters=settings['default_video_augmenters']
default_csv_augmenters=settings['default_csv_augmenters']
# preprocessing settings
select_features=settings['select_features']
reduce_dimensions=settings['reduce_dimensions']
scale_features=settings['scale_features']
default_scaler=settings['default_scaler']
default_feature_selector=settings['default_feature_selector']
default_dimensionality_reducer=settings['default_dimensionality_reducer']
dimension_number=settings['dimension_number']
feature_number=settings['feature_number']
# other settings for raining scripts
training_scripts=settings['default_training_script']
model_compress=settings['model_compress']
# directories
audiodir=loadmodel_dir+'/audio_models'
textdir=loadmodel_dir+'/text_models'
imagedir=loadmodel_dir+'/image_models'
videodir=loadmodel_dir+'/video_models'
csvdir=loadmodel_dir+'/csv_models'
###############################################################
## UNIT TESTS ##
###############################################################
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_dependencies(unittest.TestCase):
'''
DEPENDENCY TESTS
Confirms that all the modules are installed correctly, along with
all brew install commands.
'''
#### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_sox(self):
# test brew installation by merging two test files
os.chdir(cur_dir)
os.system('sox test_audio.wav test_audio.wav test2.wav')
if 'test2.wav' in os.listdir():
b=True
else:
b=False
self.assertEqual(True, b)
def test_c_ffmpeg(self):
# test FFmpeg installation with test_audio file conversion
os.chdir(cur_dir)
if 'test_audio.mp3' in os.listdir():
os.remove('test_audio.mp3')
os.system('ffmpeg -i test_audio.wav test_audio.mp3 -y')
if 'test_audio.mp3' in os.listdir():
b=True
else:
b=False
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_cleaning(unittest.TestCase):
'''
CLEANING API TESTS
Tests file cleaning capabilities by removing duplicates, etc.
across all file types.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='audio_cleaning'
file='test_audio.wav'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_text_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='text_cleaning'
file='test_text.txt'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_image_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='image_cleaning'
file='test_image.png'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_video_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='video_cleaning'
file='test_video.mp4'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_csv_clean(self, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='csv_cleaning'
file='test_csv.csv'
b, msg = clean_file(directory, clean_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_augmentation(unittest.TestCase):
'''
AUGMENTATION API TESTS
Tests augmentation capabilities for all data types.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='audio_augmentation'
file='test_audio.wav'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_text_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='text_augmentation'
file='test_text.txt'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_image_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='image_augmentation'
file='test_image.png'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_video_augment(self, augment_dir=augment_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='video_augmentation'
file='test_video.mp4'
b, msg=augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
def test_csv_augment(self, augment_dir=augment_dir, clean_dir=clean_dir, train_dir=train_dir, cur_dir=cur_dir):
directory='csv_augmentation'
file='test_csv.csv'
b, msg = augment_file(directory, augment_dir, cur_dir, train_dir, file)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_features(unittest.TestCase):
'''
FEATURIZATION API TESTS
Tests featurization capabilities across all training scripts.
'''
#### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_audio_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_audio_features=default_audio_features):
file='test_audio.wav'
sampletype='audio'
default_features=default_audio_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_text_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_text_features=default_text_features):
file='test_text.txt'
sampletype='text'
default_features=default_text_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_image_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_image_features=default_image_features):
file='test_image.png'
sampletype='image'
default_features=default_image_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
def test_video_features(self, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir, default_video_features=default_video_features):
file='test_video.mp4'
sampletype='video'
default_features=default_video_features
b, msg = featurize_file(features_dir, cur_dir, train_dir, file, sampletype, default_features)
self.assertEqual(True, b, msg)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_transcription(unittest.TestCase):
'''
TRANSCRIPTION API TESTS
tests the ability to transcribe across many
data types
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.system('pip3 install opencv-python==3.4.2.16 opencv-contrib-python==3.4.2.16')
os.chdir(prevdir)
settings=json.load(open('settings.json'))
settings['transcribe_audio']=True
settings['transcribe_text']=True
settings['transcribe_image']=True
settings['transcribe_videos']=True
settings['transcribe_csv']=True
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, prevdir=prevdir, transcribe_audio=transcribe_audio, transcribe_text=transcribe_text, transcribe_image=transcribe_image, transcribe_video=transcribe_video, transcribe_csv=transcribe_csv):
# change settings.json back to normal to defaults
os.chdir(prevdir)
settings=json.load(open('settings.json'))
settings['transcribe_audio']=transcribe_audio
settings['transcribe_text']=transcribe_text
settings['transcribe_image']=transcribe_image
settings['transcribe_videos']=transcribe_video
settings['transcribe_csv']=transcribe_csv
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
# audio transcription
def test_audio_transcription(self, default_audio_transcript=default_audio_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_audio.wav'
sampletype='audio'
default_transcript=default_audio_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# text transcription
def test_text_transcription(self, default_text_transcript=default_text_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_text.txt'
sampletype='text'
default_transcript=default_text_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# image transcription
def test_image_transcription(self, default_image_transcript=default_image_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_image.png'
sampletype='image'
default_transcript=default_image_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
# video transcription
def test_video_transcription(self, default_video_transcript=default_video_transcript, features_dir=features_dir, train_dir=train_dir, cur_dir=cur_dir):
file='test_video.mp4'
sampletype='video'
default_transcript=default_video_transcript
b, msg = transcribe_file(train_dir, file, sampletype, default_transcript)
self.assertEqual(True, b, msg)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_training(unittest.TestCase):
'''
MODEL TRAINING API TESTS
Tests all available training scripts and compression abilities.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir, training_scripts=training_scripts):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
settings['default_training_script']=training_scripts
settings["default_text_features"] = ["nltk_features"]
settings['select_features']=False
settings['scale_features']=False
settings['reduce_dimensions']=False
settings['remove_outliers']=True
settings['visualize_data']=False
settings['clean_data']=False
settings['augment_data']=False
settings['model_compress']=False
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, textdir=textdir, prevdir=prevdir, training_scripts=training_scripts, clean_data=clean_data, augment_data=augment_data, model_compress=model_compress):
# change settings.json back to normal to defaults
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
settings['default_training_script']=training_scripts
settings['clean_data']=clean_data
settings['augment_data']=augment_data
settings['model_compress']=model_compress
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_training(self, cur_dir=cur_dir, train_dir=train_dir, model_dir=model_dir, clean_data=clean_data, augment_data=augment_data, test_dir=test_dir):
# use text model for training arbitrarily because it's the fastest model training time.
# note that the files here are already featurized to only test modeling capability (and not featurization or other aspects of the Models API)
os.chdir(train_dir)
shutil.copytree(test_dir+'/helpers/model_test/one', os.getcwd()+'/one')
shutil.copytree(test_dir+'/helpers/model_test/two', os.getcwd()+'/two')
os.chdir(model_dir)
# iterate through all machine learning model training methods
os.system('python3 model.py text 2 c onetwo one two')
os.chdir(train_dir)
shutil.rmtree('one')
shutil.rmtree('two')
# now find the model
os.chdir(textdir)
listdir=os.listdir()
b=False
# remove temporary files in the textdir
for i in range(len(listdir)):
if listdir[i].find('onetwo') >= 0:
b=True
# use shutil to remove a folder.
shutil.rmtree(listdir[i])
break
else:
os.remove(listdir[i])
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_loading(unittest.TestCase):
'''
LOADING API TESTS
Note we have to do a loop here to end where the end is
'audio.json' | 'text.json' | 'image.json' | 'video.json' | 'csv.json'
this is because the files are renamed to not have conflicts.
For example, if 'audio.wav' --> 'audio.json' and 'audio.mp4' --> 'audio.json',
both would have a conflicting name and would overwrite each other.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['default_audio_features']=['librosa_features']
settings['default_text_features']=['nltk_features']
settings['default_image_features']=['image_features']
settings['default_video_features']=['video_features']
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, default_audio_features=default_audio_features, default_text_features=default_text_features, default_image_features=default_image_features, default_video_features=default_video_features, default_csv_features=default_csv_features):
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features back to what they were before.
settings['default_audio_features']=default_audio_features
settings['default_text_features']=default_text_features
settings['default_image_features']=default_image_features
settings['default_video_features']=default_video_features
settings['default_csv_features']=default_csv_features
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_loadaudio(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='audio'
testfile='test_audio.wav'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadtext(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='text'
testfile='test_text.txt'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadimage(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='image'
testfile='test_image.png'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
def test_loadvideo(self, load_dir=load_dir, cur_dir=cur_dir, loadmodel_dir=loadmodel_dir):
filetype='video'
testfile='test_video.mp4'
b, msg = model_predict(filetype, testfile, loadmodel_dir, load_dir)
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_preprocessing(unittest.TestCase):
'''
PREPROCESSING API TESTS
Tests Allie's preprocessing functionality to reduce dimensionality,
select features, and scale features.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def setUp(self, prevdir=prevdir):
# change settings.json to test all model scripts
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['select_features']=True
settings['reduce_dimensions']=True
settings['scale_features']=True
settings['default_scaler']=["standard_scaler"]
settings['default_feature_selector']=["rfe"]
settings['default_dimensionionality_reducer']=["pca"]
settings['dimension_number']=20
settings['feature_number']=2
jsonfile=open('settings.json', 'w')
json.dump(settings, jsonfile)
jsonfile.close()
def tearDown(self, prevdir=prevdir, select_features=select_features, reduce_dimensions=reduce_dimensions, scale_features=scale_features, default_scaler=default_scaler, default_feature_selector=default_feature_selector, default_dimensionality_reducer=default_dimensionality_reducer, dimension_number=dimension_number, feature_number=feature_number):
os.chdir(prevdir)
gopen=open('settings.json','r')
settings=json.load(gopen)
gopen.close()
# set features for the right ML models
settings['select_features']=select_features
settings['reduce_dimensions']=reduce_dimensions
settings['scale_features']=scale_features
settings['default_scaler']=default_scaler
settings['default_feature_selector']=default_feature_selector
settings['default_dimensionality_reducer']=default_dimensionality_reducer
settings['dimension_number']=dimension_number
settings['feature_number']=feature_number
jsonfile=open('settings.json','w')
json.dump(settings, jsonfile)
jsonfile.close()
def test_createtransformer(self, preprocessing_dir=preprocessing_dir, test_dir=test_dir):
# copy files into the train_dir
os.chdir(test_dir)
try:
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
except:
shutil.rmtree(train_dir+'/one')
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
try:
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
except:
shutil.rmtree(train_dir+'/two')
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
os.chdir(preprocessing_dir)
# call it using proper format
os.system('python3 transform.py text c onetwo one two')
# now that we have transformer test to see if it exists
if 'text_transformer' in os.listdir():
os.chdir('text_transformer')
listdir=os.listdir()
if 'c_onetwo_standard_scaler_pca_rfe.json' in listdir:
b=True
else:
b=False
else:
b=False
shutil.rmtree(train_dir+'/one')
shutil.rmtree(train_dir+'/two')
# feature select data
self.assertEqual(True, b)
def test_loadtransformer(self, test_dir=test_dir, preprocessing_dir=preprocessing_dir):
try:
shutil.copytree(test_dir+'/helpers/text_transformer', preprocessing_dir+'/text_transformer/')
except:
shutil.rmtree(preprocessing_dir+'/text_transformer/')
shutil.copytree(test_dir+'/helpers/text_transformer', preprocessing_dir+'/text_transformer/')
# now actually convert and load data with this transformer
os.chdir(preprocessing_dir+'/text_transformer/')
model=pickle.load(open('c_onetwo_standard_scaler_pca_rfe.pickle','rb'))
jsonfile=json.load(open('c_onetwo_standard_scaler_pca_rfe.json'))
sample=jsonfile['sample input X']
transformed_sample=jsonfile['sample transformed X']
newsize=model.transform(np.array(sample).reshape(1,-1))
# ---> FOR TESTING ONLY <----
# print(model)
# print(newsize)
# print(type(newsize))
# print(transformed_sample)
# print(type(transformed_sample))
if np.size(newsize[0]) == np.size(np.array(transformed_sample)):
b=True
else:
b=False
self.assertEqual(True, b)
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
class test_visualization(unittest.TestCase):
'''
VISUALIZATION API TESTS
Tests Allie's visualization API capabilities for classification problems.
'''
##### ##### ##### ##### ##### ##### ##### ##### ##### #####
def test_visualization(self, test_dir=test_dir, train_dir=train_dir, visualization_dir=visualization_dir):
# copy files into the train_dir
os.chdir(test_dir)
shutil.copytree(test_dir+'/helpers/model_test/one', train_dir+'/one')
shutil.copytree(test_dir+'/helpers/model_test/two', train_dir+'/two')
# now run the visualization
os.chdir(visualization_dir)
if 'visualization_session' in os.listdir():
shutil.rmtree('visualization_session')
os.system('python3 visualize.py text one two')
if 'visualization_session' in os.listdir():
os.chdir('visualization_session')
files=os.listdir()
if 'clustering' in files and 'feature_ranking' in files and 'model_selection' in files and 'classes.png' in files:
b=True
else:
b=False
else:
b=False
os.chdir(train_dir)
shutil.rmtree("one")
shutil.rmtree("two")
# visualize data (text data featurized)
self.assertEqual(True, b)
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/test.py | tests/test.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
|_ _| | | / _ \ | ___ \_ _|
| | ___ ___| |_ / /_\ \| |_/ / | |
| |/ _ \/ __| __| | _ || __/ | |
| | __/\__ \ |_ | | | || | _| |_
\_/\___||___/\__| \_| |_/\_| \___/
Use this script for unit testing of Allie's functionality.
In particular:
- class test_dependencies(unittest.TestCase)
tests for modules and brew installations (FFmpeg and SoX).
- class test_cleaning(unittest.TestCase)
tests ability to clean files via cleaning scripts (mostly de-duplication, will expand in future).
- class test_augmentation(unittest.TestCase)
tests ability to augment files via augmentation scripts (in ./datasets/) directory.
- class test_features(unittest.TestCase)
tests ability to featurize files via default_featurizers.
- class test_transcription(unittest.TestCase)
tests ability to transcribe files.
- class test_training(unittest.TestCase)
tests ability to train machine learning models (classification and regression) with all settings.
- class test_preprocessing(unittest.TestCase)
tests ability to create transformations with the transform.py script (for model training).
- class test_loading(unittest.TestCase)
tests ability to load model files and make predictions via model directory (via test files / load_dir / models trained).
- class test_visualization(unittest.TestCase)
tests ability to visualize classification problems through the visualize.py script.
Note that these unit tests are contextual, meaning that only the settings specified
in settings.json are tested. This makes it easy to quickly test whether or not your
current computer and operating system can handle new settings that you specify within
Allie's core API.
'''
import os, time, shutil
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def remove_temp_model():
'''removes temporary model files'''
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].find('one_two') == 0:
os.remove(listdir[i])
###############################################################
## GET FOLDER INFO. ##
###############################################################
cur_dir = os.getcwd()
prevdir= prev_dir(cur_dir)
load_dir = prevdir+'/load_dir'
train_dir = prevdir + '/train_dir'
model_dir = prevdir+ '/training'
loadmodel_dir = prevdir+'/models'
# remove one and two directories if they exist in train_dir to allow for
# proper testing.
os.chdir(train_dir)
listdir=os.listdir()
if 'one' in listdir:
shutil.rmtree('one')
if 'two' in listdir:
shutil.rmtree('two')
os.chdir(cur_dir)
###############################################################
## RUN UNIT TESTS. ##
###############################################################
os.system('python3 unit_test.py')
###############################################################
## REMOVE TEMP FILES ##
###############################################################
print('-----------------^^^-----------------------')
print('-------------^^^^---^^^^-------------------')
print('-----------CLEANUP TEMP FILES--------------')
print('---------^^^^^^^^^^^^^^^^^^^^^^------------')
os.chdir(cur_dir)
print('deleting temp files from FFmpeg and SoX tests')
print('-------------------------------------------')
try:
os.remove('test2.wav')
except:
print('test2.wav does not exist, cannot delete it.')
try:
os.remove('test_audio.mp3')
except:
print('test_audio.wav does not exist, cannot delete it.')
# now we can remove everything in load_dir
os.chdir(load_dir)
listdir=os.listdir()
# remove everything in the load_dir (to allow for future non-conflicting featurizations / model predictions)
print('deleting temp files load_dir tests')
print('-------------------------------------------')
for i in range(len(listdir)):
if listdir[i].endswith('.json') or listdir[i].endswith('.wav') or listdir[i].endswith('.png') or listdir[i].endswith('.txt') or listdir[i].endswith('.csv') or listdir[i].endswith('.mp4'):
os.remove(listdir[i])
print('deleting temp model files (audio, text, image, and video)')
print('-------------------------------------------')
# we can also remove all temporarily trained machine learning models
try:
os.chdir(loadmodel_dir+'/audio_models')
remove_temp_model()
except:
pass
try:
os.chdir(loadmodel_dir+'/text_models')
remove_temp_model()
except:
pass
try:
os.chdir(loadmodel_dir+'/image_models')
remove_temp_model()
except:
pass
try:
os.chdir(loadmodel_dir+'/video_models')
remove_temp_model()
except:
pass
try:
os.chdir(loadmodel_dir+'/csv_models')
remove_temp_model()
except:
pass
os.chdir(train_dir)
try:
shutil.rmtree('audio_features')
except:
pass
try:
shutil.rmtree('text_features')
except:
pass
try:
shutil.rmtree('image_features')
except:
pass
try:
shutil.rmtree('video_features')
except:
pass
try:
shutil.rmtree('csv_features')
except:
pass
try:
shutil.rmtree('audio_transcription')
except:
pass
try:
shutil.rmtree('text_transcription')
except:
pass
try:
shutil.rmtree('image_transcription')
except:
pass
try:
shutil.rmtree('video_transcription')
except:
pass
try:
shutil.rmtree('csv_transcription')
except:
pass
try:
shutil.rmtree('audio_augmentation')
except:
pass
try:
shutil.rmtree('image_augmentation')
except:
pass
try:
shutil.rmtree('text_augmentation')
except:
pass
try:
shutil.rmtree('video_augmentation')
except:
pass
try:
shutil.rmtree('csv_augmentation')
except:
pass
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/models/text_models/test_tpot_classifier/model/test_tpot_classifier.py | tests/helpers/models/text_models/test_tpot_classifier/model/test_tpot_classifier.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from sklearn.tree import DecisionTreeClassifier
from tpot.builtins import StackingEstimator
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('test_tpot_classifier.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: 0.5428571428571429
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
FunctionTransformer(copy)
),
DecisionTreeClassifier(criterion="gini", max_depth=5, min_samples_leaf=5, min_samples_split=16)
)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('test_tpot_classifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/models/video_models/test_tpot_classifier/model/test_tpot_classifier.py | tests/helpers/models/video_models/test_tpot_classifier/model/test_tpot_classifier.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('test_tpot_classifier.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: 0.5285714285714286
exported_pipeline = DecisionTreeClassifier(criterion="entropy", max_depth=10, min_samples_leaf=9, min_samples_split=13)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('test_tpot_classifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/models/image_models/test_tpot_classifier/model/test_tpot_classifier.py | tests/helpers/models/image_models/test_tpot_classifier/model/test_tpot_classifier.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import make_pipeline
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('test_tpot_classifier.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: 0.5583333333333333
exported_pipeline = make_pipeline(
PCA(iterated_power=5, svd_solver="randomized"),
BernoulliNB(alpha=100.0, fit_prior=True)
)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('test_tpot_classifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/models/audio_models/test_tpot_classifier/model/test_tpot_classifier.py | tests/helpers/models/audio_models/test_tpot_classifier/model/test_tpot_classifier.py | import numpy as np
import json, pickle
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from tpot.builtins import OneHotEncoder
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
g=json.load(open('test_tpot_classifier.json'))
tpot_data=np.array(g['labels'])
features=np.array(g['data'])
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data, random_state=None)
# Average CV score on the training set was: 0.7757352941176471
exported_pipeline = make_pipeline(
OneHotEncoder(minimum_fraction=0.2, sparse=False, threshold=10),
ExtraTreesClassifier(bootstrap=False, criterion="entropy", max_features=0.25, min_samples_leaf=13, min_samples_split=9, n_estimators=100)
)
exported_pipeline.fit(training_features, training_target)
print('saving classifier to disk')
f=open('test_tpot_classifier.pickle','wb')
pickle.dump(exported_pipeline,f)
f.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/video_record/setup.py | tests/helpers/video_record/setup.py | import os
os.system('pip3 install -r requirements.txt') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/video_record/record.py | tests/helpers/video_record/record.py | import sys, os, cv2, time, readchar, datetime
import soundfile as sf
import sounddevice as sd
import numpy as np
from multiprocessing import Process
import ray, json
import subprocess32 as sp
import pyautogui, shutil, zipfile
from natsort import natsorted
curdir=os.getcwd()
try:
os.mkdir('temp')
os.chdir('temp')
except:
shutil.rmtree('temp')
os.mkdir('temp')
os.chdir('temp')
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip"%(dst), "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
print('zipping %s as %s'%(os.path.join(dirname, filename),arcname))
zf.write(absname, arcname)
zf.close()
def calc_duration(vid_file_path):
''' Video's duration in seconds, return a float number
'''
def probe(vid_file_path):
''' Give a json from ffprobe command line
@vid_file_path : The absolute (full) path of the video file, string.
'''
if type(vid_file_path) != str:
raise Exception('Gvie ffprobe a full file path of the video')
return
command = ["ffprobe",
"-loglevel", "quiet",
"-print_format", "json",
"-show_format",
"-show_streams",
vid_file_path
]
pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)
out, err = pipe.communicate()
return json.loads(out)
_json = probe(vid_file_path)
if 'format' in _json:
if 'duration' in _json['format']:
return float(_json['format']['duration'])
if 'streams' in _json:
# commonly stream 0 is the video
for s in _json['streams']:
if 'duration' in s:
return float(s['duration'])
# if everything didn't happen,
# we got here because no single 'return' in the above happen.
raise Exception('I found no duration')
#return None
@ray.remote
def mouse(filename, duration):
print('recording mouse movements')
print('--> mouse_%s.json'%(filename[0:-4]))
# get features from the mouse to detect activity
deltat=.1
pyautogui.PAUSE = 1
pyautogui.FAILSAFE = True
positions=list()
#log 20 mouse movements
for i in range(0, duration):
curpos=pyautogui.position()
positions.append(curpos)
jsonfile=open('mouse_%s.json'%(filename[0:-4]),'w')
data={'mouse_positions':positions}
json.dump(data,jsonfile)
jsonfile.close()
return positions
@ray.remote
def keyboard(filename, duration):
print('recording keyboard')
print('--> keyboard_%s'%(filename[0:-4]))
# capture keyboard features
def getch():
ch=readchar.readkey()
return ch
charlist=list()
start=time.time()
end=time.time()
while end-start<duration:
end=time.time()
charlist.append(getch())
total_time=end-start
if total_time > duration+15:
# allow 15 second window after for typing activity
pass
else:
jsonfile=open('keyboard_%s.json'%(filename[0:-4]),'w')
data={'keyboard':positions}
json.dump(data,jsonfile)
jsonfile.close()
@ray.remote
def screen_record(filename, duration):
curdir=os.getcwd()
# function to write a video from an image for duration X
def video_write(imagename, videoname, duration2):
img=cv2.imread(imagename)
height, width, layers = img.shape
video=cv2.VideoWriter(videoname, -1, 1, (width, height))
for i in range(duration2):
video.write(cv2.imread(imagename))
cv2.destroyAllWindows()
video.release()
# 1 screen per second
newfilename=filename[0:-4]+'_screenshots.avi'
print('making screenshots (.AVI)')
print('--> screenshots.avi')
foldername=filename[0:-4]+'_screenshots'
try:
os.mkdir(foldername)
os.chdir(foldername)
except:
shutil.rmtree(foldername)
os.mkdir(foldername)
os.chdir(foldername)
start_time=time.time()
count=0
while True:
if time.time()-start_time < duration:
pyautogui.screenshot(str(count)+".png")
count=count+1
else:
break
# this will record 1 screenshot per time
files=natsorted(os.listdir())
print(files)
# now make a video from all the individual screenshots
for i in range(len(files)):
print('making '+files[i][0:-4]+'.avi')
video_write(files[i], files[i][0:-4]+'.avi', 1)
# os.remove(files[i])
# now combine all videos
files=natsorted(os.listdir())
file=open('mylist.txt','w')
for i in range(len(files)):
if files[i][-4:]=='.avi':
file.write("file '%s' \n"%(files[i]))
file.close()
command='ffmpeg -f concat -i mylist.txt -c copy %s'%(newfilename)
os.system(command)
# convert to .mp4 format
os.system('ffmpeg -i %s %s'%(newfilename, newfilename[0:-4]+'.mp4'))
os.remove(newfilename)
vid_duration=calc_duration(newfilename[0:-4]+'.mp4')
speedfactor=duration/vid_duration
print(speedfactor)
os.system('ffmpeg -i %s -filter:v "setpts=%s*PTS" %s'%(newfilename[0:-4]+'.mp4', str(speedfactor), newfilename[0:-4]+'_2.mp4'))
os.remove(newfilename[0:-4]+'.mp4')
os.rename(newfilename[0:-4]+'_2.mp4',newfilename[0:-4]+'.mp4')
shutil.move(os.getcwd()+'/'+newfilename[0:-4]+'.mp4', curdir+'/'+newfilename[0:-4]+'.mp4')
@ray.remote
def video_record(filename, duration):
print('recording video (.AVI)')
print('--> '+filename)
t0 = time.time() # start time in seconds
video=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
frame_width = int(video.get(3))
frame_height = int(video.get(4))
out = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width,frame_height))
a=0
start=time.time()
while True:
a=a+1
check, frame=video.read()
#print(check)
#print(frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
out.write(frame)
#cv2.imshow("frame",gray)
end=time.time()
if end-start>duration:
break
print(a)
video.release()
out.release()
cv2.destroyAllWindows()
@ray.remote
def audio_record(filename, duration):
print('recording audio (.WAV)')
print('--> '+filename)
time.sleep(0.50)
fs=44100
channels=2
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
def video_audio_record(videofile, duration):
# record each in parallel
# runInParallel(audio_record(filename[0:-4]+'.wav', duration), video_record(filename,duration))
audiofile=filename[0:-4]+'.wav'
ray.get([video_record.remote(videofile,duration), audio_record.remote(audiofile, duration), screen_record.remote(filename, duration), mouse.remote(filename, duration)])
#os.system('ffmpeg -i %s -i %s -c:v copy -c:a aac -strict experimental %s'%(videofile, audiofile, videofile))
#os.remove(audiofile)
# connect two files
filename=sys.argv[1]
duration=int(sys.argv[2])
train_dir=sys.argv[3]
print(filename)
print(duration)
if filename.find('.avi') > 0:
ray.init()
video_audio_record(filename, duration)
# for testing !! (calculate the right framerate for duration)
vid_duration=calc_duration(os.getcwd()+'/'+filename)
print(vid_duration)
# initialize names of stuff
audiofile=filename[0:-4]+'.wav'
newfilename=filename[0:-4]+'_new.mp4'
newfilename2=filename[0:-4]+'_new2.mp4'
if vid_duration > duration or vid_duration < duration:
# convert to be proper length
print('converting to 20 seconds of video...')
# following ffmpeg documentation https://trac.ffmpeg.org/wiki/How%20to%20speed%20up%20/%20slow%20down%20a%20video
speedfactor=duration/vid_duration
print(speedfactor)
os.system('ffmpeg -i %s -filter:v "setpts=%s*PTS" %s'%(filename, str(speedfactor), newfilename))
os.system('ffmpeg -i %s -i %s -c:v copy -c:a aac -strict experimental %s'%(newfilename, audiofile, newfilename2))
#os.remove(filename)
#os.remove(newfilename)
#os.rename(newfilename2, filename)
else:
os.system('ffmpeg -i %s -i %s -c:v copy -c:a aac -strict experimental %s'%(filename, audiofile, newfilename2))
#os.remove(filename)
#os.rename(newfilename2, filename)
# make everything into one video
one=newfilename2[0:-4]+'_.mp4'
two=filename[0:-4]+'_screenshots_2.mp4'
#resize video 1
os.system('ffmpeg -i %s -vf scale=640:360 %s -hide_banner'%(newfilename2, one))
# resize video 2
os.system('ffmpeg -i %s -vf scale=640:360 %s -hide_banner'%(filename[0:-4]+'_screenshots.mp4', two))
# combine
os.system('ffmpeg -i %s -i %s -filter_complex hstack output.mp4'%(one, two))
#os.system('open output.mp4')
# remove temp files and rename
os.remove(one)
os.remove(two)
os.remove(filename)
os.rename(newfilename, filename[0:-4]+'.mp4')
os.remove(filename[0:-4]+'.mp4')
os.rename(newfilename2, filename[0:-4]+'.mp4')
shutil.rmtree(filename[0:-4]+'_screenshots')
os.chdir(curdir)
file_dir=os.getcwd()+'/temp'
os.chdir(file_dir)
os.remove(filename[0:-4]+'.mp4')
os.rename('output.mp4', filename[0:-4]+'.mp4')
shutil.move(file_dir+'/'+filename[0:-4]+'.mp4', train_dir+'/'+filename[0:-4]+'.mp4')
os.chdir(curdir)
shutil.rmtree('temp')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/tests/helpers/video_record/parallel.py | tests/helpers/video_record/parallel.py | import os
# if just want one:
# os.system('python3 record.py test.avi 30')
# record 30 - can also make this contextual based on date/time
for i in range(30):
os.system("python3 record.py %s 10"%(str(i)+'.avi'))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/seed_test.py | datasets/seed_test.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ _ ___ _ _
| _ \ | | | | _ / _ \ | (_)
| | | |__ _| |_ __ _ ___ ___| |_ ___ (_) / /_\ \_ _ __| |_ ___
| | | / _` | __/ _` / __|/ _ \ __/ __| | _ | | | |/ _` | |/ _ \
| |/ / (_| | || (_| \__ \ __/ |_\__ \ _ | | | | |_| | (_| | | (_) |
|___/ \__,_|\__\__,_|___/\___|\__|___/ (_) \_| |_/\__,_|\__,_|_|\___/
Quickly generate some sample audio data from a GitHub repository.
'''
import os, shutil
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
listdir=os.listdir()
if 'sample_voice_data' not in listdir:
os.system('git clone https://github.com/jim-schwoebel/sample_voice_data.git')
else:
pass
cur_dir=os.getcwd()
base_dir=prev_dir(cur_dir)
train_dir=base_dir+'/train_dir'
try:
shutil.copy(cur_dir+'/sample_voice_data/gender_all.csv',train_dir+'/gender_all.csv')
except:
os.remove(train_dir+'/gender_all.csv')
shutil.copy(cur_dir+'/sample_voice_data/gender_all.csv',train_dir+'/gender_all.csv')
try:
shutil.copytree(cur_dir+'/sample_voice_data/males',train_dir+'/males')
except:
shutil.rmtree(train_dir+'/males')
shutil.copytree(cur_dir+'/sample_voice_data/males',train_dir+'/males')
try:
shutil.copytree(cur_dir+'/sample_voice_data/females',train_dir+'/females')
except:
shutil.rmtree(train_dir+'/females')
shutil.copytree(cur_dir+'/sample_voice_data/females',train_dir+'/females')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/download.py | datasets/downloads/download.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ _
| _ \ | | | |
| | | |__ _| |_ __ _ ___ ___| |_ ___
| | | / _` | __/ _` / __|/ _ \ __/ __|
| |/ / (_| | || (_| \__ \ __/ |_\__ \
|___/ \__,_|\__\__,_|___/\___|\__|___/
A command line interface for downloading datasets through Allie.
Specify the dataset type and get links and download information.
Note this is a work-in-progress and will expand into the future.
'''
import os, json
try:
from fuzzywuzzy import fuzz
except:
os.system('pip3 install fuzzywuzzy==0.18.0')
from fuzzywuzzy import fuzz
current_dir=os.getcwd()
# now ask user what type of problem they are trying to solve
problemtype=input('what dataset would you like to download? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
while problemtype not in ['1','2','3','4','5']:
print('answer not recognized...')
problemtype=input('what problem are you solving? (1-audio, 2-text, 3-image, 4-video, 5-csv)\n')
if problemtype=='1':
problemtype='audio'
elif problemtype=='2':
problemtype='text'
elif problemtype=='3':
problemtype='image'
elif problemtype=='4':
problemtype='video'
elif problemtype=='5':
problemtype=='csv'
# go to helpers directory + get json
if problemtype=='audio':
os.chdir(current_dir+'/helpers/audio')
elif problemtype == 'text':
os.chdir(current_dir+'/helpers/text')
elif problemtype=='image':
os.chdir(current_dir+'/helpers/image')
elif problemtype=='video':
os.chdir(current_dir+'/helpers/video')
elif problemtype=='csv':
# csv is scarcest dataset
os.chdir(current_dir+'/helpers/csv')
# now get all the json files in the directory
listdir=os.listdir()
dataset_names=list()
for i in range(len(listdir)):
if listdir[i][-5:]=='.json':
dataset_names.append(listdir[i][0:-5].replace('_', ' '))
print('found %s datasets...'%(str(len(dataset_names))))
print('----------------------------')
print('here are the available %s datasets'%(problemtype.upper()))
print('----------------------------')
for i in range(len(dataset_names)):
print(dataset_names[i])
while True:
user_input=input('what %s dataset would you like to download?\n'%(problemtype))
fuzznums=list()
for i in range(len(dataset_names)):
# do fuzzy search to find best matched dataset to query (continue to download in ./data directory)
fuzznums.append(fuzz.ratio(dataset_names[i].lower(), user_input.lower()))
maxval=max(fuzznums)
maxind=fuzznums.index(maxval)
dataset=dataset_names[maxind]
print('found dataset: %s'%(dataset))
g=json.load(open(dataset.replace(' ','_')+'.json'))
print(g[dataset]['description'])
user_input2=input('just confirming, do you want to download the %s dataset? (Y - yes, N - no) \n'%(dataset))
if user_input2.lower() in ['y','yes']:
os.system('open %s'%(g[dataset]['link']))
break
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/helpers/make_file.py | datasets/downloads/helpers/make_file.py | import os, json
def make_jsonfile(textfile):
g=open(textfile).read()
h=g.split('*')
os.chdir(textfile[0:-3])
for i in range(len(h)):
i1=h[i].find('[')
i2=h[i].find(']')
i3=h[i].find('(')
i4=h[i].find(')')
i5=h[i].find('-')
name=h[i][i1+1:i2]
print(name)
description=h[i][i5:].replace('\n','')
link=h[i][i3+1:i4]
data={name: {'description': description,
'link': link}}
jsonfile=open(name.replace(' ','_')+'.json','w')
json.dump(data,jsonfile)
jsonfile.close()
text=input('what file would you like to featurize?')
make_jsonfile(text) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/setup.py | datasets/downloads/PyDataset/setup.py | #!/usr/bin/env python
# Author: Aziz Alto
# email: iamaziz.alto@gmail.com
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='pydataset',
description=("Provides instant access to many popular datasets right from "
"Python (in dataframe structure)."),
author='Aziz Alto',
url='https://github.com/iamaziz/PyDataset',
download_url='https://github.com/iamaziz/PyDataset/tarball/0.2.0',
license = 'MIT',
author_email='iamaziz.alto@gmail.com',
version='0.2.0',
install_requires=['pandas'],
packages=['pydataset', 'pydataset.utils'],
package_data={'pydataset': ['*.gz', 'resources.tar.gz']}
)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/datasets_handler.py | datasets/downloads/PyDataset/pydataset/datasets_handler.py | # datasets_handler.py
# dataset handling file
import pandas as pd
from .utils import html2text
from .locate_datasets import __items_dict, __docs_dict, __get_data_folder_path
items = __items_dict()
docs = __docs_dict()
# make dataframe layout (of __datasets_desc()) terminal-friendly
pd.set_option('display.max_rows', 170)
pd.set_option('display.max_colwidth', 90)
# for terminal, auto-detect
pd.set_option('display.width', None)
# HELPER
def __filter_doc(raw):
note = "PyDataset Documentation (adopted from R Documentation. " \
"The displayed examples are in R)"
txt = raw.replace('R Documentation', note)
return txt
def __read_docs(path):
# raw html
html = open(path, 'r').read()
# html handler
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
txt = h.handle(html)
return txt
# MAIN
def __get_csv_path(item):
"""return the full path of the item's csv file"""
return items[item]
def __read_csv(item):
path = __get_csv_path(item)
df = pd.read_csv(path, index_col=0)
# display 'optional' log msg "loaded: Titanic <class 'numpy.ndarray'>"
# print('loaded: {} {}'.format(item, type(df)))
return df
def __get_doc_path(item):
return docs[item]
def __print_item_docs(item):
path = __get_doc_path(item)
doc = __read_docs(path) # html format
txt = __filter_doc(doc) # edit R related txt
print(txt)
def __datasets_desc():
"""return a df of the available datasets with description"""
datasets = __get_data_folder_path() + 'datasets.csv'
df = pd.read_csv(datasets)
df = df[['Item', 'Title']]
df.columns = ['dataset_id', 'title']
# print('a list of the available datasets:')
return df
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/dump_data.py | datasets/downloads/PyDataset/pydataset/dump_data.py | # dump_data.py
# initialize PYDATASET_HOME, and
# dump pydataset/resources.tar.gz into $HOME/.pydataset/
import tarfile
from os import path as os_path
from os import mkdir as os_mkdir
from os.path import join as path_join
def __setup_db():
homedir = os_path.expanduser('~')
PYDATASET_HOME = path_join(homedir, '.pydataset/')
if not os_path.exists(PYDATASET_HOME):
# create $HOME/.pydataset/
os_mkdir(PYDATASET_HOME)
print('initiated datasets repo at: {}'.format(PYDATASET_HOME))
# copy the resources.tar.gz from the module files.
# # There should be a better way ? read from a URL ?
import pydataset
filename = path_join(pydataset.__path__[0], 'resources.tar.gz')
tar = tarfile.open(filename, mode='r|gz')
# # reading 'resources.tar.gz' from a URL
# try:
# from urllib.request import urlopen # py3
# except ImportError:
# from urllib import urlopen # py2
# import tarfile
#
# targz_url = 'https://example.com/resources.tar.gz'
# httpstrem = urlopen(targz_url)
# tar = tarfile.open(fileobj=httpstrem, mode="r|gz")
# extract 'resources.tar.gz' into PYDATASET_HOME
# print('extracting resources.tar.gz ... from {}'.format(targz_url))
tar.extractall(path=PYDATASET_HOME)
# print('done.')
tar.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/__init__.py | datasets/downloads/PyDataset/pydataset/__init__.py | # __init__.py
# main interface to pydataset module
from .datasets_handler import __print_item_docs, __read_csv, __datasets_desc
from .support import find_similar
def data(item=None, show_doc=False):
"""loads a datasaet (from in-modules datasets) in a dataframe data structure.
Args:
item (str) : name of the dataset to load.
show_doc (bool) : to show the dataset's documentation.
Examples:
>>> iris = data('iris')
>>> data('titanic', show_doc=True)
: returns the dataset's documentation.
>>> data()
: like help(), returns a dataframe [Item, Title]
for a list of the available datasets.
"""
if item:
try:
if show_doc:
__print_item_docs(item)
return
df = __read_csv(item)
return df
except KeyError:
find_similar(item)
else:
return __datasets_desc()
if __name__ == '__main__':
# Numerical data
rain = data('rain')
print(rain)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/locate_datasets.py | datasets/downloads/PyDataset/pydataset/locate_datasets.py |
# locate_datasets.py
# locate datasets file paths
from os import path as os_path
from os import walk as os_walk
from os.path import join as path_join
from .dump_data import __setup_db
def __get_data_folder_path():
# read rdata folder's path from $HOME
homedir = os_path.expanduser('~')
# initiate database datafile
dpath = path_join(homedir, '.pydataset/resources/rdata/')
if os_path.exists(dpath):
return dpath
else:
# create PYDATASET_HOME and folders
__setup_db()
return __get_data_folder_path()
data_path = __get_data_folder_path()
# scan data and documentation folders to build a dictionary (e.g.
# {item:path} ) for each
items = {}
docs = {}
for dirname, dirnames, filenames in os_walk(data_path):
# store item name and path to all csv files.
for fname in filenames:
if fname.endswith('.csv') and not fname.startswith('.'):
# e.g. pydataset-package/rdata/csv/boot/acme.csv
item_path = path_join(dirname, fname)
# e.g acme.csv
item_file = os_path.split(item_path)[1]
# e.g. acme
item = item_file.replace('.csv', '')
# store item and its path
items[item] = item_path
# store item name and path to all html files.
for fname in filenames:
if fname.endswith('.html') and not fname.startswith('.'):
item_path = path_join(dirname, fname)
item_file = os_path.split(item_path)[1]
item = item_file.replace('.html', '')
docs[item] = item_path
def __items_dict():
return items
def __docs_dict():
return docs
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/support.py | datasets/downloads/PyDataset/pydataset/support.py |
from difflib import SequenceMatcher as SM
from collections import Counter
from .locate_datasets import __items_dict
DATASET_IDS = list(__items_dict().keys())
ERROR = ('Not valid dataset name and no similar found! '
'Try: data() to see available.')
def similarity(w1, w2, threshold=0.5):
"""compare two strings 'words', and
return ratio of smiliarity, be it larger than the threshold,
or 0 otherwise.
NOTE: if the result more like junk, increase the threshold value.
"""
ratio = SM(None, str(w1).lower(), str(w2).lower()).ratio()
return ratio if ratio > threshold else 0
def search_similar(s1, dlist=DATASET_IDS, MAX_SIMILARS=10):
"""Returns the top MAX_SIMILARS [(dataset_id : smilarity_ratio)] to s1"""
similars = {s2: similarity(s1, s2)
for s2 in dlist
if similarity(s1, s2)}
# a list of tuples [(similar_word, ratio) .. ]
top_match = Counter(similars).most_common(MAX_SIMILARS+1)
return top_match
def find_similar(query):
result = search_similar(query)
if result:
top_words, ratios = zip(*result)
print('Did you mean:')
print(', '.join(t for t in top_words))
# print(', '.join('{:.1f}'.format(r*100) for r in ratios))
else:
raise Exception(ERROR)
if __name__ == '__main__':
s = 'ansc'
find_similar(s)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/utils/html2text.py | datasets/downloads/PyDataset/pydataset/utils/html2text.py | #!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "3.200.3"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes", "Kevin Jay North"]
# TODO:
# Support decoded entities with unifiable.
try:
True
except NameError:
setattr(__builtins__, 'True', 1)
setattr(__builtins__, 'False', 0)
def has_key(x, y):
if hasattr(x, 'has_key'): return x.has_key(y)
else: return y in x
try:
import htmlentitydefs
import urlparse
import HTMLParser
except ImportError: #Python3
import html.entities as htmlentitydefs
import urllib.parse as urlparse
import html.parser as HTMLParser
try: #Python3
import urllib.request as urllib
except:
import urllib
import optparse, re, sys, codecs, types
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Escape all special characters. Output is less readable, but avoids corner case formatting issues.
ESCAPE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = True
# Use inline, rather than reference, formatting for images and links
INLINE_LINKS = True
# Number of pixels Google indents nested lists
GOOGLE_LIST_INDENT = 36
IGNORE_ANCHORS = False
IGNORE_IMAGES = False
IGNORE_EMPHASIS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u',
'lrm':'', 'rlm':''}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
def dumb_property_dict(style):
"""returns a hash of css attributes"""
return dict([(x.strip(), y.strip()) for x, y in [z.split(':', 1) for z in style.split(';') if ':' in z]]);
def dumb_css_parser(data):
"""returns a hash of css selectors, each of which contains a hash of css attributes"""
# remove @import sentences
data += ';'
importIndex = data.find('@import')
while importIndex != -1:
data = data[0:importIndex] + data[data.find(';', importIndex) + 1:]
importIndex = data.find('@import')
# parse the css. reverted from dictionary compehension in order to support older pythons
elements = [x.split('{') for x in data.split('}') if '{' in x.strip()]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for a, b in elements])
except ValueError:
elements = {} # not that important
return elements
def element_style(attrs, style_def, parent_style):
"""returns a hash of the 'final' style attributes of the element"""
style = parent_style.copy()
if 'class' in attrs:
for css_class in attrs['class'].split():
css_style = style_def['.' + css_class]
style.update(css_style)
if 'style' in attrs:
immediate_style = dumb_property_dict(attrs['style'])
style.update(immediate_style)
return style
def google_list_style(style):
"""finds out whether this is an ordered or unordered list"""
if 'list-style-type' in style:
list_style = style['list-style-type']
if list_style in ['disc', 'circle', 'square', 'none']:
return 'ul'
return 'ol'
def google_has_height(style):
"""check if the style of the element has the 'height' attribute explicitly defined"""
if 'height' in style:
return True
return False
def google_text_emphasis(style):
"""return a list of all emphasis modifiers of the element"""
emphasis = []
if 'text-decoration' in style:
emphasis.append(style['text-decoration'])
if 'font-style' in style:
emphasis.append(style['font-style'])
if 'font-weight' in style:
emphasis.append(style['font-weight'])
return emphasis
def google_fixed_width_font(style):
"""check if the css of the current element defines a fixed width font"""
font_family = ''
if 'font-family' in style:
font_family = style['font-family']
if 'Courier New' == font_family or 'Consolas' == font_family:
return True
return False
def list_numbering_start(attrs):
"""extract numbering from list element attributes"""
if 'start' in attrs:
return int(attrs['start']) - 1
else:
return 0
class HTML2Text(HTMLParser.HTMLParser):
def __init__(self, out=None, baseurl=''):
HTMLParser.HTMLParser.__init__(self)
# Config options
self.unicode_snob = UNICODE_SNOB
self.escape_snob = ESCAPE_SNOB
self.links_each_paragraph = LINKS_EACH_PARAGRAPH
self.body_width = BODY_WIDTH
self.skip_internal_links = SKIP_INTERNAL_LINKS
self.inline_links = INLINE_LINKS
self.google_list_indent = GOOGLE_LIST_INDENT
self.ignore_links = IGNORE_ANCHORS
self.ignore_images = IGNORE_IMAGES
self.ignore_emphasis = IGNORE_EMPHASIS
self.google_doc = False
self.ul_item_mark = '*'
self.emphasis_mark = '_'
self.strong_mark = '**'
if out is None:
self.out = self.outtextf
else:
self.out = out
self.outtextlist = [] # empty list to store output characters before they are "joined"
try:
self.outtext = unicode()
except NameError: # Python3
self.outtext = str()
self.quiet = 0
self.p_p = 0 # number of newline character to print before next output
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.maybe_automatic_link = None
self.absolute_url_matcher = re.compile(r'^[a-zA-Z+]+://')
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.code = False
self.br_toggle = ''
self.lastWasNL = 0
self.lastWasList = False
self.style = 0
self.style_def = {}
self.tag_stack = []
self.emphasis = 0
self.drop_white_space = 0
self.inheader = False
self.abbr_title = None # current abbreviation definition
self.abbr_data = None # last inner HTML (for abbr being defined)
self.abbr_list = {} # stack of abbreviations to write later
self.baseurl = baseurl
try: del unifiable_n[name2cp('nbsp')]
except KeyError: pass
unifiable['nbsp'] = ' _place_holder;'
def feed(self, data):
data = data.replace("</' + 'script>", "</ignore>")
HTMLParser.HTMLParser.feed(self, data)
def handle(self, data):
self.feed(data)
self.feed("")
return self.optwrap(self.close())
def outtextf(self, s):
self.outtextlist.append(s)
if s: self.lastWasNL = s[-1] == '\n'
def close(self):
HTMLParser.HTMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
self.outtext = self.outtext.join(self.outtextlist)
if self.unicode_snob:
nbsp = unichr(name2cp('nbsp'))
else:
nbsp = u' '
self.outtext = self.outtext.replace(u' _place_holder;', nbsp)
return self.outtext
def handle_charref(self, c):
self.o(self.charref(c), 1)
def handle_entityref(self, c):
self.o(self.entityref(c), 1)
def handle_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def handle_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not has_key(attrs, 'href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if has_key(a, 'href') and a['href'] == attrs['href']:
if has_key(a, 'title') or has_key(attrs, 'title'):
if (has_key(a, 'title') and has_key(attrs, 'title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def drop_last(self, nLetters):
if not self.quiet:
self.outtext = self.outtext[:-nLetters]
def handle_emphasis(self, start, tag_style, parent_style):
"""handles various text emphases"""
tag_emphasis = google_text_emphasis(tag_style)
parent_emphasis = google_text_emphasis(parent_style)
# handle Google's text emphasis
strikethrough = 'line-through' in tag_emphasis and self.hide_strikethrough
bold = 'bold' in tag_emphasis and not 'bold' in parent_emphasis
italic = 'italic' in tag_emphasis and not 'italic' in parent_emphasis
fixed = google_fixed_width_font(tag_style) and not \
google_fixed_width_font(parent_style) and not self.pre
if start:
# crossed-out text must be handled before other attributes
# in order not to output qualifiers unnecessarily
if bold or italic or fixed:
self.emphasis += 1
if strikethrough:
self.quiet += 1
if italic:
self.o(self.emphasis_mark)
self.drop_white_space += 1
if bold:
self.o(self.strong_mark)
self.drop_white_space += 1
if fixed:
self.o('`')
self.drop_white_space += 1
self.code = True
else:
if bold or italic or fixed:
# there must not be whitespace before closing emphasis mark
self.emphasis -= 1
self.space = 0
self.outtext = self.outtext.rstrip()
if fixed:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o('`')
self.code = False
if bold:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(2)
self.drop_white_space -= 1
else:
self.o(self.strong_mark)
if italic:
if self.drop_white_space:
# empty emphasis, drop it
self.drop_last(1)
self.drop_white_space -= 1
else:
self.o(self.emphasis_mark)
# space is only allowed after *all* emphasis marks
if (bold or italic) and not self.emphasis:
self.o(" ")
if strikethrough:
self.quiet -= 1
def handle_tag(self, tag, attrs, start):
#attrs = fixattrs(attrs)
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
if self.google_doc:
# the attrs parameter is empty for a closing tag. in addition, we
# need the attributes of the parent nodes in order to get a
# complete style description for the current element. we assume
# that google docs export well formed html.
parent_style = {}
if start:
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
tag_style = element_style(attrs, self.style_def, parent_style)
self.tag_stack.append((tag, attrs, tag_style))
else:
dummy, attrs, tag_style = self.tag_stack.pop()
if self.tag_stack:
parent_style = self.tag_stack[-1][2]
if hn(tag):
self.p()
if start:
self.inheader = True
self.o(hn(tag)*"#" + ' ')
else:
self.inheader = False
return # prevent redundant emphasis marks on headers
if tag in ['p', 'div']:
if self.google_doc:
if start and google_has_height(tag_style):
self.p()
else:
self.soft_br()
else:
self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag == "style":
if start: self.style += 1
else: self.style -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u'] and not self.ignore_emphasis: self.o(self.emphasis_mark)
if tag in ['strong', 'b'] and not self.ignore_emphasis: self.o(self.strong_mark)
if tag in ['del', 'strike', 's']:
if start:
self.o("<"+tag+">")
else:
self.o("</"+tag+">")
if self.google_doc:
if not self.inheader:
# handle some font attributes, but leave headers clean
self.handle_emphasis(start, tag_style, parent_style)
if tag in ["code", "tt"] and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "abbr":
if start:
self.abbr_title = None
self.abbr_data = ''
if has_key(attrs, 'title'):
self.abbr_title = attrs['title']
else:
if self.abbr_title != None:
self.abbr_list[self.abbr_data] = self.abbr_title
self.abbr_title = None
self.abbr_data = ''
if tag == "a" and not self.ignore_links:
if start:
if has_key(attrs, 'href') and not (self.skip_internal_links and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.maybe_automatic_link = attrs['href']
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if self.maybe_automatic_link:
self.maybe_automatic_link = None
elif a:
if self.inline_links:
self.o("](" + escape_md(a['href']) + ")")
else:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + str(a['count']) + "]")
if tag == "img" and start and not self.ignore_images:
if has_key(attrs, 'src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
self.o("![" + escape_md(alt) + "]")
if self.inline_links:
self.o("(" + escape_md(attrs['href']) + ")")
else:
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("[" + str(attrs['count']) + "]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
# Google Docs create sub lists as top level lists
if (not self.list) and (not self.lastWasList):
self.p()
if start:
if self.google_doc:
list_style = google_list_style(tag_style)
else:
list_style = tag
numbering_start = list_numbering_start(attrs)
self.list.append({'name':list_style, 'num':numbering_start})
else:
if self.list: self.list.pop()
self.lastWasList = True
else:
self.lastWasList = False
if tag == 'li':
self.pbr()
if start:
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
if self.google_doc:
nest_count = self.google_nest_count(tag_style)
else:
nest_count = len(self.list)
self.o(" " * nest_count) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o(self.ul_item_mark + " ")
elif li['name'] == "ol":
li['num'] += 1
self.o(str(li['num'])+". ")
self.start = 1
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0:
self.p_p = 1
def p(self):
self.p_p = 2
def soft_br(self):
self.pbr()
self.br_toggle = ' '
def o(self, data, puredata=0, force=0):
if self.abbr_data is not None:
self.abbr_data += data
if not self.quiet:
if self.google_doc:
# prevent white space immediately after 'begin emphasis' marks ('**' and '_')
lstripped_data = data.lstrip()
if self.drop_white_space and not (self.pre or self.code):
data = lstripped_data
if lstripped_data != '':
self.drop_white_space = 0
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
if not data.startswith("\n"): # <pre>stuff...
data = "\n" + data
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
if not self.list:
bq += " "
#else: list content is already partially indented
# for i in xrange(len(self.list)): # no python 3
for i in range(len(self.list)):
bq += " "
data = data.replace("\n", "\n"+bq)
if self.startpre:
self.startpre = 0
if self.list:
data = data.lstrip("\n") # use existing initial indentation
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out((self.br_toggle+'\n'+bq)*self.p_p)
self.space = 0
self.br_toggle = ''
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and self.links_each_paragraph) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+ str(link['count']) +"]: " + urlparse.urljoin(self.baseurl, link['href']))
if has_key(link, 'title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
if self.abbr_list and force == "end":
for abbr, definition in self.abbr_list.items():
self.out(" *[" + abbr + "]: " + definition + "\n")
self.p_p = 0
self.out(data)
self.outcount += 1
def handle_data(self, data):
if r'\/script>' in data: self.quiet -= 1
if self.style:
self.style_def.update(dumb_css_parser(data))
if not self.maybe_automatic_link is None:
href = self.maybe_automatic_link
if href == data and self.absolute_url_matcher.match(href):
self.o("<" + data + ">")
return
else:
self.o("[")
self.maybe_automatic_link = None
if not self.code and not self.pre:
data = escape_md_section(data, snob=self.escape_snob)
self.o(data, 1)
def unknown_decl(self, data): pass
def charref(self, name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not self.unicode_snob and c in unifiable_n.keys():
return unifiable_n[c]
else:
try:
return unichr(c)
except NameError: #Python3
return chr(c)
def entityref(self, c):
if not self.unicode_snob and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c + ';'
else:
try:
return unichr(name2cp(c))
except NameError: #Python3
return chr(name2cp(c))
def replaceEntities(self, s):
s = s.group(1)
if s[0] == "#":
return self.charref(s[1:])
else: return self.entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(self, s):
return self.r_unescape.sub(self.replaceEntities, s)
def google_nest_count(self, style):
"""calculate the nesting count of google doc lists"""
nest_count = 0
if 'margin-left' in style:
nest_count = int(style['margin-left'][:-2]) / self.google_list_indent
return nest_count
def optwrap(self, text):
"""Wrap all paragraphs in the provided text."""
if not self.body_width:
return text
assert wrap, "Requires Python 2.3."
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if not skipwrap(para):
result += "\n".join(wrap(para, self.body_width))
if para.endswith(' '):
result += " \n"
newlines = 1
else:
result += "\n\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
ordered_list_matcher = re.compile(r'\d+\.\s')
unordered_list_matcher = re.compile(r'[-\*\+]\s')
md_chars_matcher = re.compile(r"([\\\[\]\(\)])")
md_chars_matcher_all = re.compile(r"([`\*_{}\[\]\(\)#!])")
md_dot_matcher = re.compile(r"""
^ # start of line
(\s*\d+) # optional whitespace and a number
(\.) # dot
(?=\s) # lookahead assert whitespace
""", re.MULTILINE | re.VERBOSE)
md_plus_matcher = re.compile(r"""
^
(\s*)
(\+)
(?=\s)
""", flags=re.MULTILINE | re.VERBOSE)
md_dash_matcher = re.compile(r"""
^
(\s*)
(-)
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
# or another dash (header or hr)
""", flags=re.MULTILINE | re.VERBOSE)
slash_chars = r'\`*_{}[]()#+-.!'
md_backslash_matcher = re.compile(r'''
(\\) # match one slash
(?=[%s]) # followed by a char that requires escaping
''' % re.escape(slash_chars),
flags=re.VERBOSE)
def skipwrap(para):
# If the text begins with four spaces or one tab, it's a code block; don't wrap
if para[0:4] == ' ' or para[0] == '\t':
return True
# If the text begins with only two "--", possibly preceded by whitespace, that's
# an emdash; so wrap.
stripped = para.lstrip()
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
return False
# I'm not sure what this is for; I thought it was to detect lists, but there's
# a <br>-inside-<span> case in one of the tests that also depends upon it.
if stripped[0:1] == '-' or stripped[0:1] == '*':
return True
# If the text begins with a single -, *, or +, followed by a space, or an integer,
# followed by a ., followed by a space (in either case optionally preceeded by
# whitespace), it's a list; don't wrap.
if ordered_list_matcher.match(stripped) or unordered_list_matcher.match(stripped):
return True
return False
def wrapwrite(text):
text = text.encode('utf-8')
try: #Python3
sys.stdout.buffer.write(text)
except AttributeError:
sys.stdout.write(text)
def html2text(html, baseurl=''):
h = HTML2Text(baseurl=baseurl)
return h.handle(html)
def unescape(s, unicode_snob=False):
h = HTML2Text()
h.unicode_snob = unicode_snob
return h.unescape(s)
def escape_md(text):
"""Escapes markdown-sensitive characters within other markdown constructs."""
return md_chars_matcher.sub(r"\\\1", text)
def escape_md_section(text, snob=False):
"""Escapes markdown-sensitive characters across whole document sections."""
text = md_backslash_matcher.sub(r"\\\1", text)
if snob:
text = md_chars_matcher_all.sub(r"\\\1", text)
text = md_dot_matcher.sub(r"\1\\\2", text)
text = md_plus_matcher.sub(r"\1\\\2", text)
text = md_dash_matcher.sub(r"\1\\\2", text)
return text
def main():
baseurl = ''
p = optparse.OptionParser('%prog [(filename|url) [encoding]]',
version='%prog ' + __version__)
p.add_option("--ignore-emphasis", dest="ignore_emphasis", action="store_true",
default=IGNORE_EMPHASIS, help="don't include any formatting for emphasis")
p.add_option("--ignore-links", dest="ignore_links", action="store_true",
default=IGNORE_ANCHORS, help="don't include any formatting for links")
p.add_option("--ignore-images", dest="ignore_images", action="store_true",
default=IGNORE_IMAGES, help="don't include any formatting for images")
p.add_option("-g", "--google-doc", action="store_true", dest="google_doc",
default=False, help="convert an html-exported Google Document")
p.add_option("-d", "--dash-unordered-list", action="store_true", dest="ul_style_dash",
default=False, help="use a dash rather than a star for unordered list items")
p.add_option("-e", "--asterisk-emphasis", action="store_true", dest="em_style_asterisk",
default=False, help="use an asterisk rather than an underscore for emphasized text")
p.add_option("-b", "--body-width", dest="body_width", action="store", type="int",
default=BODY_WIDTH, help="number of characters per output line, 0 for no wrap")
p.add_option("-i", "--google-list-indent", dest="list_indent", action="store", type="int",
default=GOOGLE_LIST_INDENT, help="number of pixels Google indents nested lists")
p.add_option("-s", "--hide-strikethrough", action="store_true", dest="hide_strikethrough",
default=False, help="hide strike-through text. only relevant when -g is specified as well")
p.add_option("--escape-all", action="store_true", dest="escape_snob",
default=False, help="Escape all special characters. Output is less readable, but avoids corner case formatting issues.")
(options, args) = p.parse_args()
# process input
encoding = "utf-8"
if len(args) > 0:
file_ = args[0]
if len(args) == 2:
encoding = args[1]
if len(args) > 2:
p.error('Too many arguments')
if file_.startswith('http://') or file_.startswith('https://'):
baseurl = file_
j = urllib.urlopen(baseurl)
data = j.read()
if encoding is None:
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
encoding = enc(j.headers, data)[0]
if encoding == 'us-ascii':
encoding = 'utf-8'
else:
data = open(file_, 'rb').read()
if encoding is None:
try:
from chardet import detect
except ImportError:
detect = lambda x: {'encoding': 'utf-8'}
encoding = detect(data)['encoding']
else:
data = sys.stdin.read()
data = data.decode(encoding)
h = HTML2Text(baseurl=baseurl)
# handle options
if options.ul_style_dash: h.ul_item_mark = '-'
if options.em_style_asterisk:
h.emphasis_mark = '*'
h.strong_mark = '__'
h.body_width = options.body_width
h.list_indent = options.list_indent
h.ignore_emphasis = options.ignore_emphasis
h.ignore_links = options.ignore_links
h.ignore_images = options.ignore_images
h.google_doc = options.google_doc
h.hide_strikethrough = options.hide_strikethrough
h.escape_snob = options.escape_snob
wrapwrite(h.handle(data))
if __name__ == "__main__":
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/datasets/downloads/PyDataset/pydataset/utils/__init__.py | datasets/downloads/PyDataset/pydataset/utils/__init__.py | #!/usr/bin/env python
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/load.py | models/load.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Make model predictions using this load.py script. This loads in all models in this
directory and makes predictions on a target folder. Note that files in this target
directory will be featurized with the default features as specified by the settings.json.
Usage: python3 load.py [target directory] [sampletype] [target model directory]
Example: python3 load.py /Users/jim/desktop/allie/load_dir audio /Users/jim/desktop/gender_tpot_classifier
Alt Usage: python3 load.py
--> this just loads all the models and makes predictions in the ./load_dir
'''
import os, json, pickle, time, sys, shutil
import pandas as pd
import numpy as np
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def model_schema():
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()
}
return models
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetypes=list(set(filetypes))
return filetypes
def get_features(models, actual_model_dir, sampletype):
models=models['%s_models'%(sampletype)]
features=list()
for i in range(len(models)):
os.chdir(actual_model_dir+'/'+models[i])
temp_settings=json.load(open('settings.json'))
features=features+temp_settings['default_%s_features'%(sampletype)]
# get only the necessary features for all models
default_features=list(set(features))
return default_features
def featurize(features_dir, load_dir, model_dir, filetypes, models):
# contextually load the proper features based on the model information
actual_model_dir=prev_dir(features_dir)+'/models/'+model_dir
# get default features
sampletype=model_dir.split('_')[0]
default_features=get_features(models, actual_model_dir, sampletype)
# now change to proper directory for featurization
if model_dir=='audio_models' and 'audio' in filetypes:
os.chdir(features_dir+'/audio_features')
elif model_dir=='text_models' and 'text' in filetypes:
models=models['text_models']
os.chdir(features_dir+'/text_features')
elif model_dir=='image_models' and 'image' in filetypes:
models=models['image_models']
os.chdir(features_dir+'/image_features')
elif model_dir=='video_models' and 'video' in filetypes:
models=models['video_models']
os.chdir(features_dir+'/video_features')
elif model_dir=='csv_models' and 'csv' in filetypes:
models=models['csv_models']
os.chdir(features_dir+'/csv_features')
# call featurization API via default features
for i in range(len(default_features)):
print(os.getcwd())
os.system('python3 featurize.py %s %s'%(load_dir, default_features[i]))
def find_files(model_dir):
print(model_dir)
jsonfiles=list()
csvfiles=list()
if model_dir == 'audio_models':
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.wav') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'text_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.txt') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'image_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.png') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'video_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.mp4') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir =='csv_models':
# csv files are a little different here
listdir=os.listdir()
for i in range(len(listdir)):
csvfile='featurized_'+listdir[i]
if listdir[i].endswith('.csv') and csvfile in listdir:
csvfiles.append(csvfile)
else:
jsonfiles=[]
print(jsonfiles)
return jsonfiles, csvfiles
def make_predictions(sampletype, transformer, clf, modeltype, jsonfiles, csvfiles, default_features, classes, modeldata, model_dir):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
sampletype=sampletype.split('_')[0]
if sampletype != 'csv':
for k in range(len(jsonfiles)):
try:
g=json.load(open(jsonfiles[k]))
print(sampletype)
print(g)
features=list()
print(default_features)
for j in range(len(default_features)):
print(sampletype)
features=features+g['features'][sampletype][default_features[j]]['features']
labels=g['features'][sampletype][default_features[0]]['labels']
print(transformer)
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features).reshape(1, -1))).reshape(1, -1)
else:
features=np.array(features).reshape(1,-1)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
# y_pred=np.rint(y_pred)
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif modeltype == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif modeltype== 'devol':
features=features.reshape(features.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(features).flatten()
elif modeltype=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(features).flatten()
elif mtype == 'r':
y_pred=clf.predict(feaures).flatten()
elif modeltype =='neuraxle':
y_pred=clf.transform(features)
elif modeltype=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
# update model in schema
# except:
# print('error %s'%(modeltype.upper()))
# try:
# get class from classes (assuming classification)
'''
X={'male': [1],
'female': [2],
'other': [3]}
then do a search of the values
names=list(X) --> ['male', 'female', 'other']
i1=X.values().index([1]) --> 0
names[i1] --> male
'''
# print(modeldata)
outputs=dict()
for i in range(len(classes)):
outputs[classes[i]]=[i]
if modeltype == 'autokeras':
maxval=max(y_pred)
index_=list(y_pred).index(maxval)
y_pred=[index_]
names=list(outputs)
i1=list(outputs.values()).index(y_pred)
class_=classes[i1]
print(y_pred)
print(outputs)
print(class_)
print(i1)
try:
models=g['models']
except:
models=models=model_schema()
temp=models[sampletype]
if class_ not in list(temp):
temp[class_]= [modeldata]
else:
tclass=temp[class_]
try:
# make a list if it is not already to be compatible with deprecated versions
tclass.append(modeldata)
except:
tclass=[tclass]
tclass.append(modeldata)
temp[class_]=tclass
models[sampletype]=temp
g['models']=models
print(class_)
# update database
jsonfilename=open(jsonfiles[k],'w')
json.dump(g,jsonfilename)
jsonfilename.close()
except:
print('error making jsonfile %s'%(jsonfiles[k].upper()))
else:
try:
for k in range(len(csvfiles)):
if len(csvfiles[k].split('featurized')) == 2:
features=pd.read_csv(csvfiles[k])
oldfeatures=features
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features)))
else:
features=np.array(features)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
y_pred = clf.predict(data)
os.chdir(curdir)
elif modeltype == 'ludwig':
data=pd.read_csv('test.csv').drop(labels=['class_'], axis=1)
pred=clf.predict(data)['class__predictions']
y_pred=np.array(list(pred), dtype=np.int64)
elif modeltype== 'devol':
features=features.reshape(features.shape+ (1,)+ (1,))
y_pred=clf.predict_classes(features).flatten()
elif modeltype=='keras':
if mtype == 'c':
y_pred=clf.predict_classes(features).flatten()
elif mtype == 'r':
y_pred=clf.predict(feaures).flatten()
elif modeltype =='neuraxle':
y_pred=clf.transform(features)
elif modeltype=='safe':
# have to make into a pandas dataframe
test_data=pd.read_csv('test.csv').drop(columns=['class_'], axis=1)
y_pred=clf.predict(test_data)
# update model in schema
y_pred=pd.DataFrame(y_pred)
oldfeatures['class_']=y_pred
print(type(y_pred))
oldfeatures.to_csv(csvfiles[k].replace('featurized','predictions'), index=False)
except:
pass
def load_model(folder_name):
listdir=os.listdir()
# load in a transform if necessary
for i in range(len(listdir)):
if listdir[i].endswith('transform.pickle'):
print(listdir[i])
transform_=open(listdir[i],'rb')
transformer=pickle.load(transform_)
transform_.close()
break
else:
transformer=''
jsonfile=open(folder_name+'.json')
g=json.load(jsonfile)
jsonfile.close()
# get model name
modelname=g['model name']
classes=g['classes']
model_type=g['model type']
print(model_type)
# g['model type']
# load model for getting metrics
if model_type not in ['alphapy', 'atm', 'autopytorch', 'autokeras', 'ludwig', 'keras', 'devol']:
loadmodel=open(modelname, 'rb')
clf=pickle.load(loadmodel)
loadmodel.close()
elif model_type == 'atm':
from atm import Model
clf=Model.load(modelname)
elif model_type == 'autokeras':
import tensorflow as tf
import autokeras as ak
from tensorflow.keras.models import load_model
print(modelname)
clf = load_model(modelname, custom_objects=ak.CUSTOM_OBJECTS)
elif model_type=='autopytorch':
import torch
clf=torch.load(modelname)
elif model_type == 'ludwig':
from ludwig.api import LudwigModel
clf=LudwigModel.load('ludwig_files/experiment_run/model/')
elif model_type in ['devol', 'keras']:
from keras.models import load_model
clf = load_model(modelname)
else:
clf=''
return transformer, clf, model_type, classes, g
def find_models(target_model_dir, sampletype):
curdir=os.getcwd()
if sampletype == False:
listdir=os.listdir()
directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models']
models_=dict()
for i in range(len(directories)):
model_names=list()
try:
os.chdir(curdir)
os.chdir(directories[i])
listdir=os.listdir()
folders=list()
for j in range(len(listdir)):
if listdir[j].find('.') < 0:
folders.append(listdir[j])
print(folders)
curdir2=os.getcwd()
for j in range(len(folders)):
try:
os.chdir(curdir2)
os.chdir(folders[j])
os.chdir('model')
listdir2=os.listdir()
jsonfile=folders[j]+'.json'
for k in range(len(listdir2)):
if listdir2[k] == jsonfile:
g=json.load(open(jsonfile))
model_names.append(jsonfile[0:-5])
except:
pass
print(model_names)
except:
print('error')
models_[directories[i]]=model_names
else:
# copy the target_model_dir to the right folder
models_=dict()
models_['audio_models']=list()
models_['text_models']=list()
models_['text_models']=list()
models_['image_models']=list()
models_['csv_models']=list()
try:
shutil.copytree(target_model_dir, curdir+'/'+sampletype+'_models/'+target_model_dir.split('/')[-1])
except:
pass
models_[sampletype+'_models']=[target_model_dir.split('/')[-1]]
print('------------------------------')
print(' IDENTIFIED MODELS ')
print('------------------------------')
print(models_)
# time.sleep(50)
return models_
# get folders
curdir=os.getcwd()
basedir=prev_dir(curdir)
os.chdir(basedir)
# load settings
settings=json.load(open('settings.json'))
# get the base audio, text, image, and video features from required models
default_audio_features=settings['default_audio_features']
default_text_features=settings['default_text_features']
default_image_features=settings['default_image_features']
default_video_features=settings['default_video_features']
default_csv_features=settings['default_csv_features']
features_dir=basedir+'/features'
model_dir=basedir+'/models'
try:
# specify a specific model to make a prediction around
# e.g. /Users/jim/desktop/audiofiles
load_dir=sys.argv[1]
except:
# if not specified, defaults to the ./load_dir folder
load_dir=basedir+'/load_dir'
try:
# get the sampletype, for example 'audio'
sampletype = sys.argv[2]
except:
# if no sampletype specified, it will discover all file types
sampletype=False
try:
target_model_dir=sys.argv[3]
except:
target_model_dir=False
# now get all the filetypes if not specified
os.chdir(load_dir)
listdir=os.listdir()
if sampletype != False and sampletype in ['audio','image','text','image','video','csv']:
# e.g. sampletype =='audio'
filetypes=[sampletype]
else:
# get file tyes ['audio','image','text','image','video','csv']
filetypes=classifyfolder(listdir)
# find all machine learning models
os.chdir(model_dir)
models=find_models(target_model_dir, sampletype)
model_dirs=list(models)
# now that we have all the models we can begin to load all of them
for i in range(len(model_dirs)):
if model_dirs[i].split('_')[0] in filetypes:
print('-----------------------')
print('FEATURIZING %s'%(model_dirs[i].upper()))
print('-----------------------')
# Note this contextually featurizes based on all the models and the
# minimum number of featurizations necessary to accomodate all model predictions
featurize(features_dir, load_dir, model_dirs[i], filetypes, models)
# now model everything
for i in range(len(model_dirs)):
try:
if model_dirs[i].split('_')[0] in filetypes:
print('-----------------------')
print('MODELING %s'%(model_dirs[i].upper()))
print('-----------------------')
os.chdir(model_dir)
os.chdir(model_dirs[i])
models_=models[model_dirs[i]]
print(model_dirs[i])
# loop through models
for j in range(len(models_)):
os.chdir(model_dir)
os.chdir(model_dirs[i])
print('--> predicting %s'%(models_[j]))
os.chdir(models_[j])
os.chdir('model')
transformer, clf, modeltype, classes, modeldata = load_model(models_[j])
default_features_model=json.load(open(models_[j]+'.json'))['settings']["default_"+model_dirs[i].split('_')[0]+"_features"]
os.chdir(load_dir)
jsonfiles, csvfiles=find_files(model_dirs[i])
make_predictions(model_dirs[i], transformer, clf, modeltype, jsonfiles, csvfiles, default_features_model, classes, modeldata, model_dir+'/'+model_dirs[i]+'/'+models_[j])
except:
print('error') | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/sort.py | models/sort.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Sort audio in load_dir based on model predictions.
Note you currently have to manually edit this file for it to be useful.
Usage: python3 sort.py
'''
import os,json, shutil
from tqdm import tqdm
import time
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
os.chdir(prev_dir(os.getcwd())+'/load_dir/')
listdir=os.listdir()
jsonfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
jsonfiles.append(listdir[i])
predictions=list()
for i in tqdm(range(len(jsonfiles))):
try:
g=json.load(open(jsonfiles[i]))
models=list(g['models']['audio'])
predictions=predictions+models
except:
print('error')
unique_names=list(set(predictions))
input_=input('what would you like to sort by? \n%s'%(unique_names))
os.mkdir(input_)
curdir=os.getcwd()
for i in tqdm(range(len(jsonfiles))):
try:
g=json.load(open(jsonfiles[i]))
models=list(g['models']['audio'])
if input_ in models:
shutil.copy(curdir+'/'+jsonfiles[i].replace('.json','.wav'), curdir+'/'+input_+'/'+jsonfiles[i].replace('.json','.wav'))
except:
print('error')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/model2csv.py | models/model2csv.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Creates an excel sheet of all currently trained models with their model performances;
useful to summarize all modeling sessions quickly; outputs to current directory.
Usage: python3 model2csv.py
'''
import os, json
import pandas as pd
def id_folder():
curdir=os.getcwd()
directories=['audio_models', 'text_models', 'image_models', 'video_models', 'csv_models']
metrics_list=list()
model_names=list()
for i in range(len(directories)):
try:
os.chdir(curdir)
os.chdir(directories[i])
listdir=os.listdir()
folders=list()
for j in range(len(listdir)):
if listdir[j].find('.') < 0:
folders.append(listdir[j])
curdir2=os.getcwd()
for j in range(len(folders)):
os.chdir(curdir2)
os.chdir(folders[j])
os.chdir('model')
listdir2=os.listdir()
jsonfile=folders[j]+'.json'
for k in range(len(listdir2)):
if listdir2[k] == jsonfile:
g=json.load(open(jsonfile))
metrics_=g['metrics']
metrics_list.append(metrics_)
model_names.append(jsonfile[0:-5])
except:
pass
# print(directories[i])
# print('does not exist...')
return metrics_list, model_names
curdir=os.getcwd()
metrics_list, model_names=id_folder()
# regression models
meanabsolute_errors=list()
meansquared_errors=list()
median_errors=list()
r2_scores=list()
regression_models=list()
for i in range(len(model_names)):
try:
meanabsolute_errors.append(metrics_list[i]['mean_absolute_error'])
meansquared_errors.append(metrics_list[i]['mean_squared_error'])
median_errors.append(metrics_list[i]['median_absolute_error'])
r2_scores.append(metrics_list[i]['r2_score'])
regression_models.append(model_names[i])
except:
pass
# classification models
accuracies=list()
roc_curve=list()
classification_models=list()
for i in range(len(model_names)):
try:
accuracies.append(metrics_list[i]['accuracy'])
roc_curve.append(metrics_list[i]['roc_auc'])
classification_models.append(model_names[i])
except:
pass
classification_data={'model names': classification_models,
'accuracies': accuracies,
'roc_auc': roc_curve}
regression_data={'model_names': regression_models,
'mean_absolute_errors': meanabsolute_errors,
'mean_squared_errors': meansquared_errors,
'r2_scores': r2_scores}
os.chdir(curdir)
df=pd.DataFrame.from_dict(classification_data)
df.to_csv('classification_models.csv', index=False)
df=pd.DataFrame.from_dict(regression_data)
df.to_csv('regression_models.csv', index=False) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/create_readme.py | models/create_readme.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Creates a readme for a machine learning repository; currently used by the modeling API.
Usage: python3 create_readme.py [modelpath]
'''
import os, json, sys
# go to the proper directory
directory=sys.argv[1]
os.chdir(directory)
# now get current directory
curdir=os.getcwd()
listdir=os.listdir()
if 'settings.json' in listdir:
settings=json.load(open('settings.json'))
settings_g='## Settings \n'
settings_g=settings_g+'```\n'
settings_g=settings_g+str(settings)+'\n'
settings_g=settings_g+'```\n'
settings_g=settings_g+'\n'
else:
settings=''
if 'visualizations' in listdir:
vis='## Visualizations \n'
vis=vis+'### Clustering \n'
vis=vis+'\n'
vis=vis+'\n'
vis=vis+'### Feature ranking \n'
vis=vis+'\n'
vis=vis+'\n'
vis=vis+''
else:
vis=''
os.chdir('model')
listdir=os.listdir()
# get visualizations for model training
if 'confusion_matrix.png' in listdir:
confusion_matrix=True
else:
confusion_matrix=False
if 'roc_curve.png' in listdir:
roc_curve=True
else:
roc_curve=False
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
statsfile=listdir[i]
stats=json.load(open(listdir[i]))
break
os.chdir(curdir)
common_name=statsfile.split('_')[0]
readme_file=open('readme.md','w')
readme_file.write('# %s model \n'%(common_name.upper()))
readme_file.write('This is a %s model created on %s classifying %s. It was trained using the %s script, and achieves the following accuracy scores: \n```\n%s\n```\n'%(common_name, stats['created date'], str(stats['classes']), stats['model type'],str(stats['metrics'])))
readme_file.write(vis)
readme_file.write('\n')
if roc_curve==True:
readme_file.write('\n')
if confusion_matrix==True:
readme_file.write('\n')
readme_file.write(settings_g)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/clean.py | models/clean.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Cleans the current directory to have only the necessary .py files for core function;
useful because the folder can get messy during modeling sometimes.
Usage: python3 clean.py
'''
import os, shutil
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i] not in ['audio_models', 'readme.md'] and listdir[i].endswith('.py') == False:
if listdir[i].find('.') == -1:
shutil.rmtree(listdir[i])
else:
os.remove(listdir[i])
print('removed %s'%(listdir[i]))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/models/validate.py | models/validate.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Can be useful to count the number of model predictions in a certain class after prediction
is complete to get an intuition of accuracy of model on new datasets.
Note you currently have to manually edit this file for it to be useful.
Usage: python3 validate.py
'''
import os,json
from tqdm import tqdm
import time
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
os.chdir(prev_dir(os.getcwd())+'/load_dir/')
listdir=os.listdir()
jsonfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
jsonfiles.append(listdir[i])
predictions=list()
for i in tqdm(range(len(jsonfiles))):
try:
g=json.load(open(jsonfiles[i]))
models=list(g['models']['audio'])
predictions=predictions+models
except:
print('error')
unique_names=list(set(predictions))
for i in range(len(unique_names)):
print(unique_names[i])
print(predictions.count(unique_names[i]))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/rename.py | train_dir/rename.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Renames all the files in a particular directory (both audio files and .JSON files).
Note you can manually change this to other file types.
Usage: python3 rename_files.py [folder]
Example: python3 rename.py /Users/jim/desktop/allie/train_dir/males
'''
import os,uuid, sys
directory=sys.argv[1]
os.chdir(directory)
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].endswith('.wav'):
newname=str(uuid.uuid4())
os.rename(listdir[i],newname+'.wav')
if listdir[i][0:-4]+'.json' in listdir:
os.rename(listdir[i][0:-4]+'.json', newname+'.json')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/create_csv.py | train_dir/create_csv.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Usage: python3 create_csv.py [folderpathA] [folderpathB] [folderpath...N]
Example: python3 create_csv.py /Users/jim/desktop/allie/train_dir/males
--> creates a file output.csv in ./train_dir
'''
import os, sys, time
import pandas as pd
def detect_files(listdir, directory):
audios=list()
images=list()
texts=list()
videos=list()
for i in range(len(listdir)):
if listdir[i].endswith('.wav'):
audios.append(directory+'/'+listdir[i])
elif listdir[i].endswith('.png'):
images.append(directory+'/'+listdir[i])
elif listdir[i].endswith('.txt'):
texts.append(directory+'/'+listdir[i])
elif listdir[i].endswith('.mp4'):
videos.append(directory+'/'+listdir[i])
array_=[len(audios), len(images), len(texts), len(videos)]
maxval=max(array_)
labels=list()
label=directory.split('/')[-1]
for i in range(maxval):
labels.append(label)
ind=array_.index(maxval)
if ind == 0:
data={'data': audios}
elif ind == 1:
data={'data': images}
elif ind == 2:
data={'data': texts}
elif ind == 3:
data={'data': videos}
data['labels']=labels
return data, maxval
def get_ind(data, start):
array_=[]
for i in range(len(data)):
array_.append(i+start)
return array_
def get_dataframe(directory, start):
os.chdir(directory)
listdir=os.listdir()
start=0
data, start=detect_files(listdir, directory)
data=pd.DataFrame(data)
ind1=get_ind(data, 0)
data=pd.DataFrame(data, index=ind1)
return data, start
curdir=os.getcwd()
directories=list()
i=1
while True:
try:
directories.append(sys.argv[i])
i=i+1
except:
break
start=0
datas=list()
for i in range(len(directories)):
data, start=get_dataframe(directories[i], start)
datas.append(data)
for i in range(len(datas)-1):
data=pd.concat([data, datas[i]])
# combine spreadsheets
os.chdir(curdir)
data.to_csv('output.csv', index=False)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/delete_json.py | train_dir/delete_json.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Deletes all .JSON files from all folders in the train_dir
(useful to re-featurize sets of files).
Usage: python3 delete_json.py
Example: python3 delete_json.py
'''
import os
folders=list()
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].find('.')<0:
folders.append(listdir[i])
print(folders)
# remove all json files
curdir=os.getcwd()
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
print(listdir[i])
os.remove(listdir[i])
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/get_stats_folder.py | train_dir/get_stats_folder.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Makes a table in Microsoft Word for all the audio features present in a file in a particular folder.
This is useful for peer-reviewed publications (for supplementary tables).
Usage: python3 get_stats_folder.py
Example: python3 get_stats_folder.py
Following this tutorial with modifications: https://towardsdatascience.com/how-to-generate-ms-word-tables-with-python-6ca584df350e
'''
import os
def get_folders():
listdir=os.listdir()
folders=list()
for i in range(len(listdir)):
if listdir[i].find('.') == -1:
folders.append(listdir[i])
return folders
curdir=os.getcwd()
folders=get_folders()
for i in range(len(folders)):
os.system('python3 get_stats.py %s'%(curdir+'/'+folders[i]))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/combine_datasets.py | train_dir/combine_datasets.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
How to use:
python3 combine_datasests.py
Must edit the dir_1, dir_2, dir_1_folders, and dir_2_folders
where:
dir_1 = first directory to combine all datasets in class 1 (e.g. male)
dir_2 = second directory to combine all datasets into class 2 (e.g. female)
dir_1_folders = a list of all the folders in the current directory tied to class 1 (to combine - e.g. male)
dir_2_folders = a list of all the folders in the current directory tied to class 2 (to combine - e.g. female)
'''
import os, shutil
from tqdm import tqdm
def copy_files(directory_):
listdir=os.listdir()
newdir=os.getcwd()
for i in tqdm(range(len(listdir)), desc=newdir):
if listdir[i].endswith('.json') or listdir[i].endswith('.wav'):
shutil.copy(os.getcwd()+'/'+listdir[i], directory_+'/'+listdir[i])
curdir=os.getcwd()
dir_1=curdir+'/directory1_combined'
dir_2=curdir+'/directory2_combined'
# folders to combine into directory 1
dir_1_folders=['directory1_dataset1', 'directory1_dataset2']
for i in range(len(dir_1_folders)):
os.chdir(curdir)
os.chdir(dir_1_folders[i])
copy_files(dir_1)
# folders to combine into directory 2
dir_2_folders=['directory2_dataset1', 'directory2_dataset2']
for i in range(len(dir_2_folders)):
os.chdir(curdir)
os.chdir(dir_2_folders[i])
copy_files(dir_2)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/make_csv_regression.py | train_dir/make_csv_regression.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Make many CSV files for modeling,
good to combine with https://github.com/jim-schwoebel/allie/blob/master/training/regression_all.py
Usage: python3 make_csv_regression.py [csvfile] [targetcol]
Example: python3 make_csv_regression.py test2.csv urls
--> creates many spreadsheets in the ./train_dir for regression modeling
'''
import os, sys
import pandas as pd
def replace_nonstrings(string_):
# alphabet to keep characters
alphabet=['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','_',
'1','2','3','4','5','6','7','8','9','0']
string_=string_.lower().replace(' ','_')
newstring=''
for j in range(len(string_)):
if string_[j] not in alphabet:
pass
else:
newstring=newstring+string_[j]
if len(newstring) > 50:
newstring=newstring[0:50]
return newstring
csvfile=sys.argv[1]
target=sys.argv[2]
data=pd.read_csv(csvfile)
cols=list(data)
for i in range(len(cols)-1):
ind1=cols.index(target)
ind2=cols.index(cols[i+1])
delcols=list()
for j in range(len(cols)):
if j in [ind1,ind2]:
pass
else:
delcols.append(cols[j])
print(delcols)
newdata=data.drop(delcols, axis=1)
newstring=replace_nonstrings(cols[i+1])
newdata.to_csv(newstring+'.csv', index=False)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/remove_unfeaturized.py | train_dir/remove_unfeaturized.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Deletes all audio files that are unfeaturized after featurization. This can be used a cleaning step.
Usage: python3 remove_unfeatruized.py
Example: python3 remove_unfeaturized.py
'''
import os
folders=list()
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].find('.')<0:
folders.append(listdir[i])
print(folders)
# remove all json files
curdir=os.getcwd()
for i in range(len(folders)):
os.chdir(curdir)
os.chdir(folders[i])
listdir=os.listdir()
count=0
for j in range(len(listdir)):
if listdir[j].endswith('.wav') and listdir[j][0:-4]+'.json' not in listdir:
print(folders[i])
print(listdir[j])
os.remove(listdir[j])
count=count+1
print(count)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/delete_features.py | train_dir/delete_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Deletes a specified set of features from a .JSON file (in all folders in train_dir),
as specified by the user.
Usage: python3 delete_features.py [sampletype] [feature_set]
Example: python3 delete_features.py audio librosa_features
'''
import os, json, sys
from tqdm import tqdm
sampletype=sys.argv[1]
feature_set=sys.argv[2]
folders=list()
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].find('.')<0:
folders.append(listdir[i])
print(folders)
# remove all json files
curdir=os.getcwd()
for i in tqdm(range(len(folders))):
os.chdir(curdir)
os.chdir(folders[i])
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
# print(listdir[i])
# os.remove(listdir[i])
try:
data=json.load(open(listdir[i]))
del data['features'][sampletype][feature_set]
jsonfile=open(listdir[i],'w')
json.dump(data,jsonfile)
jsonfile.close()
except:
pass
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/create_dataset.py | train_dir/create_dataset.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
How to use:
python3 create_dataset.py [csvfile] [targetname]
python3 create_dataset.py What_is_your_total_household_income.csv 'What is your total household income?'
'''
import pandas as pd
import numpy as np
import os, shutil, sys
def determine_categorical(data_values):
numvalues=len(set(list(data_values)))
uniquevals=list(set(list(data_values)))
categorical=False
if numvalues <= 10:
categorical = True
else:
categorical = False
return categorical, uniquevals
def replace_nonstrings(string_):
# alphabet to keep characters
alphabet=['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z',' ','_',
'1','2','3','4','5','6','7','8','9','0']
string_=string_.lower().replace(' ','_')
newstring=''
for j in range(len(string_)):
if string_[j] not in alphabet:
pass
else:
newstring=newstring+string_[j]
if len(newstring) > 50:
newstring=newstring[0:50]
return newstring
csvfile=sys.argv[1]
target=sys.argv[2]
data=pd.read_csv(csvfile)
urls=(data['url'])
data_values=list(data[target])
average=float(np.average(np.array(data_values)))
categorical, uniquevals = determine_categorical(data_values)
target=replace_nonstrings(target)
if categorical == False:
try:
os.mkdir(target+'_above')
except:
shutil.rmtree(target+'_above')
try:
os.mkdir(target+'_below')
except:
shutil.rmtree(target+'_below')
else:
for i in range(len(uniquevals)):
newstring=replace_nonstrings(str(uniquevals[i]))
try:
os.mkdir(target+'_'+newstring)
except:
shutil.rmtree(target+'_'+newstring)
for i in range(len(data)):
if categorical == False:
if data_values[i] > average:
shutil.copy(urls[i], os.getcwd()+'/'+target+'_above'+'/'+urls[i].split('/')[-1])
try:
shutil.copy(urls[i][0:-4]+'.json', os.getcwd()+'/'+target+'_above'+'/'+urls[i].split('/')[-1][0:-4]+'.json')
except:
pass
else:
shutil.copy(urls[i], os.getcwd()+'/'+target+'_below'+'/'+urls[i].split('/')[-1])
try:
shutil.copy(urls[i][0:-4]+'.json', os.getcwd()+'/'+target+'_below'+'/'+urls[i].split('/')[-1][0:-4]+'.json')
except:
pass
else:
for j in range(len(uniquevals)):
if data_values[i] == uniquevals[j]:
newstring=replace_nonstrings(str(uniquevals[j]))
shutil.copy(urls[i], os.getcwd()+'/'+target+'_'+newstring+'/'+urls[i].split('/')[-1])
try:
shutil.copy(urls[i][0:-4]+'.json', os.getcwd()+'/'+target+'_'+newstring+'/'+urls[i].split('/')[-1][0:-4]+'.json')
except:
pass
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/make_new.py | train_dir/make_new.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Usage: python3 make_new.py [csvfile] [newdir] [targetvar]
Example: python3 make_new.py new.csv /Users/jim/desktop/train_dir/one url
'''
import pandas as pd
import os
csvfile=sys.argv[1]
newdir=sys.argv[2]
curdir=os.getcwd()
try:
os.chdir(newdir)
except:
os.mkdir(newdir)
os.chdir(curdir)
target=sys.argv[3]
data=pd.read_csv(csvfile)
urls=data[target]
newurls=list()
for i in range(len(urls)):
newurls.append(newdir+'/'+target[i].split('/')[-1])
data[target]=newurls
data.to_csv('new.csv', index=False)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/train_dir/get_stats.py | train_dir/get_stats.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Makes a table in Microsoft Word for all the audio features present in a file in a particular folder.
This is useful for peer-reviewed publications (for supplementary tables).
Usage: python3 get_stats.py [folder]
Example: python3 get_stats.py females
Following this tutorial with modifications: https://towardsdatascience.com/how-to-generate-ms-word-tables-with-python-6ca584df350e
'''
from docx import Document
from docx.shared import Cm, Pt
import numpy as np
import os, json, time, sys
def describe_text(jsonfile):
# get dictionary
g=json.load(open(jsonfile))
features=g['features']['audio']
featuretypes=list(features)
print(featuretypes)
features_=list()
labels_=list()
for j in range(len(featuretypes)):
rename_labels=list()
temp_labels=features[featuretypes[j]]['labels']
for k in range(len(temp_labels)):
if featuretypes[j] == 'pause2_features':
rename_labels.append(temp_labels[k]+ ' (pause_features)')
else:
rename_labels.append(temp_labels[k]+' (%s)'%(featuretypes[j]))
try:
features_=features_+features[featuretypes[j]]['features']
except:
features_=features_+[0]
labels_=labels_+rename_labels
description=dict(zip(labels_,features_))
return description
def get_descriptive_statistics(dict_, labels_):
for j in range(len(labels_)):
try:
dict_[labels[j]]=str(np.mean(np.array(dict_[labels[j]])))+' (+/- '+str(np.std(np.array(dict_[labels[j]])))+')'
except:
dict_.pop(labels[j])
return dict_
# go to the right folder
directory=sys.argv[1]
os.chdir(directory)
listdir=os.listdir()
jsonfiles=list()
for i in range(len(listdir)):
if listdir[i].endswith('.json'):
jsonfiles.append(listdir[i])
# got all the jsonfiles, now add to each feature
print(jsonfiles)
description=describe_text(jsonfiles[0])
labels=list(description)
dict_=dict()
for i in range(len(labels)):
dict_[labels[i]]=[]
# now go through all the json files
for i in range(len(jsonfiles)):
stats=describe_text(jsonfiles[i])
print(stats)
for j in range(len(labels)):
try:
dict_[labels[j]]=dict_[labels[j]]+[stats[labels[j]]]
except:
pass
dict_=get_descriptive_statistics(dict_, labels)
text_stats=dict_
# make the table! (alphabetized)
text_stats['A_Feature']='Average (+/- standard deviation)'
text_stats = dict(sorted(text_stats.items()))
# customizing the table
word_document = Document()
document_name = directory
table = word_document.add_table(0, 0) # we add rows iteratively
table.style = 'TableGrid'
first_column_width = 5
second_column_with = 10
table.add_column(Cm(first_column_width))
table.add_column(Cm(second_column_with))
for index, stat_item in enumerate(text_stats.items()):
table.add_row()
stat_name, stat_result = stat_item
row = table.rows[index]
row.cells[0].text = str(stat_name)
row.cells[1].text = str(stat_result)
# word_document.add_page_break()
word_document.save(directory + '.docx')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/standard_array.py | features/standard_array.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
______ _ ___ ______ _____
| ___| | | / _ \ | ___ \_ _|
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | |
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/
This is the standard feature array for Allie (version 1.0).
Note this will be imported to get back data in all featurization methods
to ensure maximal code reusability.
'''
import os, time, psutil, json, platform
from datetime import datetime
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def make_features(sampletype):
# only add labels when we have actual labels.
features={'audio':dict(),
'text': dict(),
'image':dict(),
'video':dict(),
'csv': dict()}
transcripts={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
# getting settings can be useful to see if settings are the same in every
# featurization, as some featurizations can rely on certain settings to be consistent
prevdir=prev_dir(os.getcwd())
try:
settings=json.load(open(prevdir+'/settings.json'))
except:
# this is for folders that may be 2 layers deep in train_dir
settings=json.load(open(prev_dir(prevdir)+'/settings.json'))
data={'sampletype': sampletype,
'transcripts': transcripts,
'features': features,
'models': models,
'labels': [],
'errors': [],
'settings': settings,
}
return data
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/multispeaker_features.py | features/audio_features/multispeaker_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['multispeaker_features']
Creates an output array of the number of speakers of an audio file with a deep learning model.
Note that this works well on short audio clips (<10 seconds) but can be inaccurate on longer audio clips (>10 seconds).
'''
import numpy as np
import soundfile as sf
import argparse, os, keras, sklearn, librosa, sys
def get_speakernum(filename, model, mean_, scale_):
'''
taken from https://github.com/faroit/CountNet
(research paper - https://arxiv.org/abs/1712.04555).
Note this is the number of concurrent speakers (in parallel),
and can be used to detect ambient noise.
Note also that it may be better to break up speech into 5 second
segments here for better accuracy, as the model is biased for this
particular case.
'''
print(filename)
eps = np.finfo(np.float).eps
# load standardisation parameters
scaler = sklearn.preprocessing.StandardScaler()
scaler.mean_=mean_
scaler.scale_=scale_
# compute audio
audio, rate = sf.read(filename, always_2d=True)
# downmix to mono
audio = np.mean(audio, axis=1)
# compute STFT
X = np.abs(librosa.stft(audio, n_fft=400, hop_length=160)).T
# apply standardization
X = scaler.transform(X)
# cut to input shape length (500 frames x 201 STFT bins)
X = X[:model.input_shape[1], :]
# apply normalization
Theta = np.linalg.norm(X, axis=1) + eps
X /= np.mean(Theta)
# add sample dimension
Xs = X[np.newaxis, ...]
# predict output
ys = model.predict(Xs, verbose=0)
print("Speaker Count Estimate: ", np.argmax(ys, axis=1)[0])
return np.argmax(ys, axis=1)[0]
def get_wavfiles(listdir):
wavfiles=list()
for i in range(len(listdir)):
if listdir[i][-4:]=='.wav':
wavfiles.append(listdir[i])
return wavfiles
def multispeaker_featurize(audiofile):
curdir=os.getcwd()
model = keras.models.load_model(curdir+'/helpers/RNN_keras2.h5')
with np.load(curdir+'/helpers/scaler.npz') as data:
mean_ = data['arr_0']
scale_ = data['arr_1']
speaker_number=get_speakernum(audiofile, model, mean_,scale_)
features=[speaker_number]
labels=['speaker_number']
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/yamnet_features.py | features/audio_features/yamnet_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pspeech_features']
Python Speech Features is a library for fast extraction of speech features like mfcc coefficients and
log filter bank energies. Note that this library is much faster than LibROSA and other libraries,
so it is useful to featurize very large datasets.
For more information, check out the documentation: https://github.com/jameslyons/python_speech_features
'''
import tensorflow as tf
import tensorflow_hub as hub
import numpy as np
import io, os, shutil, csv, pyaudio, wave
import soundfile as sf
from tqdm import tqdm
'''
https://tfhub.dev/google/yamnet/1
'''
# Find the name of the class with the top score when mean-aggregated across frames.
def class_names_from_csv(class_map_csv_text):
"""Returns list of class names corresponding to score vector."""
class_map_csv = io.StringIO(class_map_csv_text)
class_names = [display_name for (class_index, mid, display_name) in csv.reader(class_map_csv)]
class_names = class_names[1:] #Skip CSV header
return class_names
# get labels for later
def get_labels(vector, label, label2):
sample_list=list()
for i in range(len(vector)):
sample_list.append(label+str(i+1)+'_'+label2)
return sample_list
def yamnet_featurize(wavfile, help_dir):
model = hub.load(help_dir+'/yamnet_1')
file_path = wavfile
audio_data, sample_rate = sf.read(file_path)
waveform = audio_data
# Run the model, check the output.
scores, embeddings, log_mel_spectrogram = model(waveform)
scores.shape.assert_is_compatible_with([None, 521])
embeddings.shape.assert_is_compatible_with([None, 1024])
log_mel_spectrogram.shape.assert_is_compatible_with([None, 64])
class_map_path = model.class_map_path().numpy()
class_names = class_names_from_csv(tf.io.read_file(class_map_path).numpy().decode('utf-8'))
features_mean = scores.numpy().mean(axis=0)
features_std = scores.numpy().std(axis=0)
features_max = scores.numpy().max(axis=0)
features_min = scores.numpy().min(axis=0)
features_median = np.median(scores.numpy(), axis=0)
features=np.concatenate((features_mean, features_std, features_max, features_min, features_median), axis=0, out=None, dtype=None, casting="same_kind")
labels=[]
for i in range(len(class_names)):
labels.append(class_names[i]+'_mean')
for i in range(len(class_names)):
labels.append(class_names[i]+'_std')
for i in range(len(class_names)):
labels.append(class_names[i]+'_max')
for i in range(len(class_names)):
labels.append(class_names[i]+'_min')
for i in range(len(class_names)):
labels.append(class_names[i]+'_medians')
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/audioset_features.py | features/audio_features/audioset_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['audioset_features']
Simple script to extract features using the VGGish model released by Google.
Note that these are time-independent and the embedding has been adjusted so that
the length is constant. Follows instructions from
https://github.com/tensorflow/models/tree/master/research/audioset
'''
################################################################################
## IMPORT STATEMENTS ##
################################################################################
import os, shutil, json, time
import sounddevice as sd
import soundfile as sf
import numpy as np
import tensorflow as tf
################################################################################
## HELPER FUNCTIONS ##
################################################################################
def setup_audioset(curdir):
# Clone TensorFlow models repo into a 'models' directory.
if 'models' in os.listdir():
shutil.rmtree('models')
os.system('git clone https://github.com/tensorflow/models.git')
time.sleep(5)
os.chdir(curdir+'/models/research/audioset/vggish')
# add modified file in the current folder
os.remove('vggish_inference_demo.py')
shutil.copy(curdir+'/helpers/vggish_inference_demo.py', os.getcwd()+'/vggish_inference_demo.py')
# Download data files into same directory as code.
os.system('curl -O https://storage.googleapis.com/audioset/vggish_model.ckpt')
os.system('curl -O https://storage.googleapis.com/audioset/vggish_pca_params.npz')
# Installation ready, let's test it.
# If we see "Looks Good To Me", then we're all set.
os.system('python3 vggish_smoke_test.py')
# copy back into main directory and delete unnecessary models
shutil.copytree(curdir+'/models/research/audioset/vggish', curdir+'/audioset')
shutil.rmtree(curdir+'/models')
# go back to main directory
os.chdir(curdir)
def audioset_featurize(filename, audioset_dir, process_dir):
# get current directory
os.chdir(audioset_dir)
curdir=os.getcwd()
# download audioset files if audioset not in current directory
if 'audioset' not in os.listdir():
#try:
setup_audioset(curdir)
#except:
#print('there was an error installing audioset')
# textfile definition to dump terminal outputs
jsonfile=filename[0:-4]+'.json'
# audioset folder
curdir=os.getcwd()
os.chdir(curdir+'/audioset')
if 'processdir' not in os.listdir():
os.mkdir('processdir')
# need a .wav file here
if filename[-4:]=='.mp3':
os.system('python3 vggish_inference_demo.py --mp3_file %s/%s'%(process_dir, filename))
elif filename[-4:]=='.wav':
os.system('python3 vggish_inference_demo.py --wav_file %s/%s'%(process_dir, filename))
# now reference this .JSON file
os.chdir(os.getcwd()+'/processdir')
datafile=json.load(open(jsonfile))
print(list(datafile))
features=datafile['features']
# output VGGish feature array and compressed means/stds
labels=list()
for i in range(len(features)):
labels.append('audioset_feature_%s'%(str(i+1)))
os.chdir(process_dir)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/librosa_features.py | features/audio_features/librosa_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['librosa_features']
Extracts acoustic features using the LibROSA library;
saves them as mean, standard devaition, amx, min, and median
in different classes: onset, rhythm, spectral, and power categories.
Note this is quite a powerful audio feature set that can be used
for a variety of purposes.
For more information, check out libROSA's documentation: https://librosa.org/
'''
import librosa, os
if librosa.__version__ != '0.6.2':
os.system('pip3 install librosa==0.6.2')
import librosa
import numpy as np
# get statistical features in numpy
def stats(matrix):
mean=np.mean(matrix)
std=np.std(matrix)
maxv=np.amax(matrix)
minv=np.amin(matrix)
median=np.median(matrix)
output=np.array([mean,std,maxv,minv,median])
return output
# get labels for later
def stats_labels(label, sample_list):
mean=label+'_mean'
std=label+'_std'
maxv=label+'_maxv'
minv=label+'_minv'
median=label+'_median'
sample_list.append(mean)
sample_list.append(std)
sample_list.append(maxv)
sample_list.append(minv)
sample_list.append(median)
return sample_list
# featurize with librosa following documentation
# https://librosa.github.io/librosa/feature.html
def librosa_featurize(filename, categorize):
# if categorize == True, output feature categories
print('librosa featurizing: %s'%(filename))
# initialize lists
onset_labels=list()
y, sr = librosa.load(filename)
# FEATURE EXTRACTION
######################################################
# extract major features using librosa
mfcc=librosa.feature.mfcc(y)
poly_features=librosa.feature.poly_features(y)
chroma_cens=librosa.feature.chroma_cens(y)
chroma_cqt=librosa.feature.chroma_cqt(y)
chroma_stft=librosa.feature.chroma_stft(y)
tempogram=librosa.feature.tempogram(y)
spectral_centroid=librosa.feature.spectral_centroid(y)[0]
spectral_bandwidth=librosa.feature.spectral_bandwidth(y)[0]
spectral_contrast=librosa.feature.spectral_contrast(y)[0]
spectral_flatness=librosa.feature.spectral_flatness(y)[0]
spectral_rolloff=librosa.feature.spectral_rolloff(y)[0]
onset=librosa.onset.onset_detect(y)
onset=np.append(len(onset),stats(onset))
# append labels
onset_labels.append('onset_length')
onset_labels=stats_labels('onset_detect', onset_labels)
tempo=librosa.beat.tempo(y)[0]
onset_features=np.append(onset,tempo)
# append labels
onset_labels.append('tempo')
onset_strength=librosa.onset.onset_strength(y)
onset_labels=stats_labels('onset_strength', onset_labels)
zero_crossings=librosa.feature.zero_crossing_rate(y)[0]
rmse=librosa.feature.rmse(y)[0]
# FEATURE CLEANING
######################################################
# onset detection features
onset_features=np.append(onset_features,stats(onset_strength))
# rhythm features (384) - take the first 13
rhythm_features=np.concatenate(np.array([stats(tempogram[0]),
stats(tempogram[1]),
stats(tempogram[2]),
stats(tempogram[3]),
stats(tempogram[4]),
stats(tempogram[5]),
stats(tempogram[6]),
stats(tempogram[7]),
stats(tempogram[8]),
stats(tempogram[9]),
stats(tempogram[10]),
stats(tempogram[11]),
stats(tempogram[12])]))
rhythm_labels=list()
for i in range(13):
rhythm_labels=stats_labels('rhythm_'+str(i), rhythm_labels)
# spectral features (first 13 mfccs)
spectral_features=np.concatenate(np.array([stats(mfcc[0]),
stats(mfcc[1]),
stats(mfcc[2]),
stats(mfcc[3]),
stats(mfcc[4]),
stats(mfcc[5]),
stats(mfcc[6]),
stats(mfcc[7]),
stats(mfcc[8]),
stats(mfcc[9]),
stats(mfcc[10]),
stats(mfcc[11]),
stats(mfcc[12]),
stats(poly_features[0]),
stats(poly_features[1]),
stats(spectral_centroid),
stats(spectral_bandwidth),
stats(spectral_contrast),
stats(spectral_flatness),
stats(spectral_rolloff)]))
spectral_labels=list()
for i in range(13):
spectral_labels=stats_labels('mfcc_'+str(i), spectral_labels)
for i in range(2):
spectral_labels=stats_labels('poly_'+str(i), spectral_labels)
spectral_labels=stats_labels('spectral_centroid', spectral_labels)
spectral_labels=stats_labels('spectral_bandwidth', spectral_labels)
spectral_labels=stats_labels('spectral_contrast', spectral_labels)
spectral_labels=stats_labels('spectral_flatness', spectral_labels)
spectral_labels=stats_labels('spectral_rolloff', spectral_labels)
# power features
power_features=np.concatenate(np.array([stats(zero_crossings),
stats(rmse)]))
power_labels=list()
power_labels=stats_labels('zero_crossings',power_labels)
power_labels=stats_labels('RMSE', power_labels)
# you can also concatenate the features
if categorize == True:
# can output feature categories if true
features={'onset':onset_features,
'rhythm':rhythm_features,
'spectral':spectral_features,
'power':power_features}
labels={'onset':onset_labels,
'rhythm':rhythm_labels,
'spectral':spectral_labels,
'power': power_labels}
else:
# can output numpy array of everything if we don't need categorizations
features = np.concatenate(np.array([onset_features,
rhythm_features,
spectral_features,
power_features]))
labels=onset_labels+rhythm_labels+spectral_labels+power_labels
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pyaudio_features.py | features/audio_features/pyaudio_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pyaudio_features']
Extract 170 pyaudioanalysis features
https://github.com/tyiannak/pyAudioAnalysis
'''
import os,json, shutil
import numpy as np
def stats(matrix):
mean=np.mean(matrix)
std=np.std(matrix)
maxv=np.amax(matrix)
minv=np.amin(matrix)
median=np.median(matrix)
output=np.array([mean,std,maxv,minv,median])
return output
def pyaudio_featurize(file, basedir):
# use pyaudioanalysis library to export features
# exported as file[0:-4].json
curdir=os.getcwd()
shutil.copy(curdir+'/'+file, basedir+'/helpers/'+file)
os.chdir(basedir+'/helpers/')
os.system('python3 %s/helpers/pyaudio_help.py %s'%(basedir, file))
jsonfile=file[0:-4]+'.json'
g=json.load(open(jsonfile))
features=g['features']
labels=g['labels']
os.remove(jsonfile)
os.chdir(curdir)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/praat_features.py | features/audio_features/praat_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['praat_features']
Inspired by https://github.com/drfeinberg/genderless -
Praat features that are not affected by changing genders.
'''
import glob, os, json
import parselmouth
from parselmouth.praat import call
def praat_featurize(voiceID):
voiceID = voiceID
sound = parselmouth.Sound(voiceID) # read the sound
broad_pitch = call(sound, "To Pitch", 0.0, 50, 600) #create a praat pitch object
minF0 = call(broad_pitch, "Get minimum", 0, 0, "hertz", "Parabolic") # get min pitch
maxF0 = call(broad_pitch, "Get maximum", 0, 0, "hertz", "Parabolic") # get max pitch
floor = minF0 * 0.9
ceiling = maxF0 * 1.1
pitch = call(sound, "To Pitch", 0.0, floor, ceiling) # create a praat pitch object
duration = call(sound, "Get total duration") # duration
meanF0 = call(pitch, "Get mean", 0, 0, "hertz") # get mean pitch
stdevF0 = call(pitch, "Get standard deviation", 0 ,0, "hertz") # get standard deviation
harmonicity = call(sound, "To Harmonicity (cc)", 0.01, minF0, 0.1, 1.0)
hnr = call(harmonicity, "Get mean", 0, 0)
pointProcess = call(sound, "To PointProcess (periodic, cc)", minF0, maxF0)
localJitter = call(pointProcess, "Get jitter (local)", 0, 0, 0.0001, 0.02, 1.3)
localabsoluteJitter = call(pointProcess, "Get jitter (local, absolute)", 0, 0, 0.0001, 0.02, 1.3)
rapJitter = call(pointProcess, "Get jitter (rap)", 0, 0, 0.0001, 0.02, 1.3)
ppq5Jitter = call(pointProcess, "Get jitter (ppq5)", 0, 0, 0.0001, 0.02, 1.3)
ddpJitter = call(pointProcess, "Get jitter (ddp)", 0, 0, 0.0001, 0.02, 1.3)
localShimmer = call([sound, pointProcess], "Get shimmer (local)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
localdbShimmer = call([sound, pointProcess], "Get shimmer (local_dB)",
0, 0, 0.0001, 0.02, 1.3, 1.6)
apq3Shimmer = call([sound, pointProcess], "Get shimmer (apq3)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
aqpq5Shimmer = call([sound, pointProcess], "Get shimmer (apq5)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq11Shimmer = call([sound, pointProcess], "Get shimmer (apq11)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
ddaShimmer = call([sound, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
if meanF0 > 170 and meanF0 < 300:
max_formant = 5500
elif meanF0 <=170:
max_formant = 5000
elif meanF0 >= 300:
max_formant = 8000
formants = call(sound, "To Formant (burg)", 0.0025, 5, max_formant, 0.025, 50)
numPoints = call(pointProcess, "Get number of points")
f1_list = []
f2_list = []
f3_list = []
f4_list = []
# Measure formants only at glottal pulses
for point in range(0, numPoints):
point += 1
t = call(pointProcess, "Get time from index", point)
f1 = call(formants, "Get value at time", 1, t, 'Hertz', 'Linear')
f2 = call(formants, "Get value at time", 2, t, 'Hertz', 'Linear')
f3 = call(formants, "Get value at time", 3, t, 'Hertz', 'Linear')
f4 = call(formants, "Get value at time", 4, t, 'Hertz', 'Linear')
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list = [f1 for f1 in f1_list if str(f1) != 'nan']
f2_list = [f2 for f2 in f2_list if str(f2) != 'nan']
f3_list = [f3 for f3 in f3_list if str(f3) != 'nan']
f4_list = [f4 for f4 in f4_list if str(f4) != 'nan']
# calculate mean formants across pulses
if len(f1_list) > 0:
f1_mean = sum(f1_list) / len(f1_list)
else:
f1_mean = 0
if len(f2_list) > 0:
f2_mean = sum(f2_list) / len(f2_list)
else:
f2_mean = 0
if len(f3_list) > 0:
f3_mean = sum(f3_list) / len(f3_list)
else:
f3_mean = 0
if len(f4_list) > 0:
f4_mean = sum(f4_list) / len(f4_list)
else:
f4_mean = 0
measurements = [duration, meanF0, stdevF0, hnr, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter,
localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer,
f1_mean, f2_mean, f3_mean, f4_mean]
labels=['duration', 'meanF0', 'stdevF0', 'hnr', 'localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter', 'ddpJitter',
'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'aqpq5Shimmer', 'apq11Shimmer', 'ddaShimmer', 'f1_mean', 'f2_mean', 'f3_mean', 'f4_mean']
return measurements, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/gemaps_time_features.py | features/audio_features/gemaps_time_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['gemaps_time_features']
This is the time series features for eGeMAPS.
This is using OpenSMILE's new python library: https://github.com/audeering/opensmile-python
'''
import opensmile, json
def gemaps_time_featurize(wav_file):
# initialize features and labels
labels=list()
features=list()
# extract LLD
smile_LLD = opensmile.Smile(
feature_set=opensmile.FeatureSet.eGeMAPSv01b,
feature_level=opensmile.FeatureLevel.LowLevelDescriptors,
)
y_LLD = smile_LLD.process_file(wav_file)
labels_LLD=list(y_LLD)
for i in range(len(labels_LLD)):
features.append(list(y_LLD[labels_LLD[i]]))
labels.append(labels_LLD[i])
smile_LLD_deltas = opensmile.Smile(
feature_set=opensmile.FeatureSet.eGeMAPSv01b,
feature_level=opensmile.FeatureLevel.LowLevelDescriptors_Deltas,
)
y_LLD_deltas = smile_LLD_deltas.process_file(wav_file)
labels_LLD_deltas=list(y_LLD_deltas)
for i in range(len(labels_LLD_deltas)):
features.append(list(y_LLD_deltas[labels_LLD_deltas[i]]))
labels.append(labels_LLD_deltas[i])
smile_functionals = opensmile.Smile(
feature_set=opensmile.FeatureSet.eGeMAPSv01b,
feature_level=opensmile.FeatureLevel.Functionals,
)
y_functionals = smile_functionals.process_file(wav_file)
labels_y_functionals=list(y_functionals)
for i in range(len(labels_y_functionals)):
features.append(list(y_functionals[labels_y_functionals[i]]))
labels.append(labels_y_functionals[i])
return features, labels
# features, labels = gemaps_time_featurize('test.wav')
# print(labels)
# data=dict()
# for i in range(len(labels)):
# data[labels[i]]=features[i]
# g=open('test.json','w')
# json.dump(data,g)
# g.close()
# print(features)
# print(labels)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/sox_features.py | features/audio_features/sox_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['sox_features']
Get features using SoX library, a workaround by outputting CLI in txt file and
uses a function to extract these features.
'''
import os
import numpy as np
def clean_text(text):
text=text.lower()
chars=['a','b','c','d','e','f','g','h','i','j','k','l','m',
'o','p','q','r','s','t','u','v','w','x','y','z',' ',
':', '(',')','-','=',"'.'"]
for i in range(len(chars)):
text=text.replace(chars[i],'')
text=text.split('\n')
new_text=list()
# now get new text
for i in range(len(text)):
try:
new_text.append(float(text[i].replace('\n','').replace('n','')))
except:
pass
#print(text[i].replace('\n','').replace('n',''))
return new_text
def sox_featurize(filename):
# soxi and stats files
soxifile=filename[0:-4].replace(' ','_')+'_soxi.txt'
statfile=filename[0:-4].replace(' ','_')+'_stats.txt'
if filename.endswith('.mp3'):
wavfile= filename[0:-4]+'.wav'
os.system('ffmpeg -i %s %s'%(filename,wavfile))
os.system('soxi %s > %s'%(wavfile, soxifile))
os.system('sox %s -n stat > %s 2>&1'%(wavfile, statfile))
os.remove(wavfile)
else:
os.system('soxi %s > %s'%(filename, soxifile))
os.system('sox %s -n stat > %s 2>&1'%(filename, statfile))
# get basic info
s1=open(soxifile).read()
s1_labels=['channels','samplerate','precision',
'filesize','bitrate','sample encoding']
s1=clean_text(s1)
s2=open(statfile).read()
s2_labels=['samples read','length','scaled by','maximum amplitude',
'minimum amplitude','midline amplitude','mean norm','mean amplitude',
'rms amplitude','max delta','min delta','mean delta',
'rms delta','rough freq','vol adj']
s2=clean_text(s2)
labels=s1_labels+s2_labels
features=np.array(s1+s2)
os.remove(soxifile)
os.remove(statfile)
return features,labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/mixed_features.py | features/audio_features/mixed_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['mixed_features']
Creates a mixed feature array according to the schema provided @ mixed_feature_0.json
'''
import librosa_features as lf
import helpers.transcribe as ts
import random, math, os, sys, json
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/text_features')
import nltk_features as nf
def mixed_featurize(wavfile, transcript, help_dir):
print(os.getcwd())
g=json.load(open(help_dir+'/mixed/mixed_feature_0.json'))
labels=g['labels']
inds=g['mixed_inds']
# get features
librosa_features, librosa_labels=lf.librosa_featurize(wavfile, False)
nltk_features, nltk_labels=nf.nltk_featurize(transcript)
features=list()
for j in range(len(inds)):
nltk_feature=inds[j][0]
librosa_feature=inds[j][1]
try:
feature=nltk_feature/librosa_feature
except:
# put zero value if the feature is not available
feature=0
features.append(feature)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/specimage2_features.py | features/audio_features/specimage2_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['specimage2_features']
Uses a spectrogram and features extracted from the spectrogram as feature vectors.
'''
import parselmouth, sys, os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
def prev_dir(directory):
g=directory.split('/')
# print(g)
lastdir=g[len(g)-1]
i1=directory.find(lastdir)
directory=directory[0:i1]
return directory
# import to get image feature script
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/image_features')
haar_dir=prevdir+'image_features/helpers/haarcascades'
import image_features as imf
os.chdir(directory)
def specimage2_featurize(wavfile, cur_dir, haar_dir):
sns.set() # Use seaborn's default style to make attractive graphs
# Plot nice figures using Python's "standard" matplotlib library
snd = parselmouth.Sound(wavfile)
def draw_pitch(pitch):
# Extract selected pitch contour, and
# replace unvoiced samples by NaN to not plot
pitch_values = pitch.selected_array['frequency']
pitch_values[pitch_values==0] = np.nan
plt.plot(pitch.xs(), pitch_values, 'o', markersize=5, color='w')
plt.plot(pitch.xs(), pitch_values, 'o', markersize=2)
plt.grid(False)
plt.ylim(0, pitch.ceiling)
plt.ylabel("fundamental frequency [Hz]")
def draw_spectrogram(spectrogram, dynamic_range=70):
X, Y = spectrogram.x_grid(), spectrogram.y_grid()
sg_db = 10 * np.log10(spectrogram.values)
plt.pcolormesh(X, Y, sg_db, vmin=sg_db.max() - dynamic_range, cmap='afmhot')
plt.ylim([spectrogram.ymin, spectrogram.ymax])
plt.xlabel("time [s]")
plt.ylabel("frequency [Hz]")
def draw_intensity(intensity):
plt.plot(intensity.xs(), intensity.values.T, linewidth=3, color='w')
plt.plot(intensity.xs(), intensity.values.T, linewidth=1)
plt.grid(False)
plt.ylim(0)
plt.ylabel("intensity [dB]")
pitch = snd.to_pitch()
# If desired, pre-emphasize the sound fragment before calculating the spectrogram
pre_emphasized_snd = snd.copy()
pre_emphasized_snd.pre_emphasize()
spectrogram = pre_emphasized_snd.to_spectrogram(window_length=0.03, maximum_frequency=8000)
plt.figure()
draw_spectrogram(spectrogram)
plt.twinx()
draw_pitch(pitch)
plt.xlim([snd.xmin, snd.xmax])
# plt.show() # or plt.savefig("spectrogram_0.03.pdf")
imgfile=wavfile[0:-4]+'.png'
plt.savefig(imgfile)
plt.close()
img = Image.open(wavfile[0:-4]+'.png').convert('LA')
img.save(wavfile[0:-4]+'.png')
features, labels=imf.image_featurize(cur_dir, haar_dir, imgfile)
# remove temporary image file
# os.remove(wavfile[0:-4]+'.png')
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/hubert_features.py | features/audio_features/hubert_features.py | import torch, sys
from transformers import HubertModel, HubertConfig
from transformers import Wav2Vec2Processor, HubertForCTC
import soundfile as sf
import numpy as np
def hubert_featurize(file, hubert_model, hubert_processor, size):
audio_input, _ = sf.read(file)
input_values = hubert_processor(audio_input, return_tensors="pt", sampling_rate=16000).input_values # Batch size 1
logits = hubert_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
features=list(np.array(predicted_ids).flatten())
labels=list()
for i in range(len(features)):
labels.append('hubert_%s'%(str(i)))
if len(features) == size:
pass
elif len(features) > size:
features=features[0:size]
elif len(features) < size:
# zero out values that were not there
difference=len(features)-size
for i in range(len(difference)):
features.append(0)
labels.append('hubert_%s'%(str(len(features)+i+1)))
return features, labels
# features, labels= featurize_hubert(sys.argv[1], hubert_model, hubert_processor, 100)
# print(dict(zip(labels, features)))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/allosaurus_features.py | features/audio_features/allosaurus_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['allosaurus_features']
Extracts acoustic features using the Allosaurus library.
For more information, check out Allosaurus's documentation: https://github.com/xinjli/allosaurus
'''
import os
try:
from allosaurus.app import read_recognizer
except:
os.system('pip3 install allosaurus==0.3.1')
from allosaurus.app import read_recognizer
import numpy as np
'''
sample features:
[0, 4, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 3, 1, 0, 0, 0, 6, 0, 0, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 2, 0, 0, 0, 0, 0, 8, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sample labels
['I', 'a', 'aː', 'ã', 'ă', 'b', 'bʲ', 'bʲj', 'bʷ', 'bʼ', 'bː', 'b̞', 'b̤', 'b̥', 'c', 'd', 'dʒ', 'dʲ', 'dː', 'd̚', 'd̥', 'd̪', 'd̯', 'd͡z', 'd͡ʑ', 'd͡ʒ', 'd͡ʒː', 'd͡ʒ̤', 'e', 'eː', 'e̞', 'f', 'fʲ', 'fʷ', 'fː', 'g', 'gʲ', 'gʲj', 'gʷ', 'gː', 'h', 'hʷ', 'i', 'ij', 'iː', 'i̞', 'i̥', 'i̯', 'j', 'k', 'kx', 'kʰ', 'kʲ', 'kʲj', 'kʷ', 'kʷʼ', 'kʼ', 'kː', 'k̟ʲ', 'k̟̚', 'k͡p̚', 'l', 'lʲ', 'lː', 'l̪', 'm', 'mʲ', 'mʲj', 'mʷ', 'mː', 'n', 'nj', 'nʲ', 'nː', 'n̪', 'n̺', 'o', 'oː', 'o̞', 'o̥', 'p', 'pf', 'pʰ', 'pʲ', 'pʲj', 'pʷ', 'pʷʼ', 'pʼ', 'pː', 'p̚', 'q', 'r', 'rː', 's', 'sʲ', 'sʼ', 'sː', 's̪', 't', 'ts', 'tsʰ', 'tɕ', 'tɕʰ', 'tʂ', 'tʂʰ', 'tʃ', 'tʰ', 'tʲ', 'tʷʼ', 'tʼ', 'tː', 't̚', 't̪', 't̪ʰ', 't̪̚', 't͡s', 't͡sʼ', 't͡ɕ', 't͡ɬ', 't͡ʃ', 't͡ʃʲ', 't͡ʃʼ', 't͡ʃː', 'u', 'uə', 'uː', 'u͡w', 'v', 'vʲ', 'vʷ', 'vː', 'v̞', 'v̞ʲ', 'w', 'x', 'x̟ʲ', 'y', 'z', 'zj', 'zʲ', 'z̪', 'ä', 'æ', 'ç', 'çj', 'ð', 'ø', 'ŋ', 'ŋ̟', 'ŋ͡m', 'œ', 'œ̃', 'ɐ', 'ɐ̞', 'ɑ', 'ɑ̱', 'ɒ', 'ɓ', 'ɔ', 'ɔ̃', 'ɕ', 'ɕː', 'ɖ̤', 'ɗ', 'ə', 'ɛ', 'ɛ̃', 'ɟ', 'ɡ', 'ɡʲ', 'ɡ̤', 'ɡ̥', 'ɣ', 'ɣj', 'ɤ', 'ɤɐ̞', 'ɤ̆', 'ɥ', 'ɦ', 'ɨ', 'ɪ', 'ɫ', 'ɯ', 'ɯ̟', 'ɯ̥', 'ɰ', 'ɱ', 'ɲ', 'ɳ', 'ɴ', 'ɵ', 'ɸ', 'ɹ', 'ɹ̩', 'ɻ', 'ɻ̩', 'ɽ', 'ɾ', 'ɾj', 'ɾʲ', 'ɾ̠', 'ʀ', 'ʁ', 'ʁ̝', 'ʂ', 'ʃ', 'ʃʲː', 'ʃ͡ɣ', 'ʈ', 'ʉ̞', 'ʊ', 'ʋ', 'ʋʲ', 'ʌ', 'ʎ', 'ʏ', 'ʐ', 'ʑ', 'ʒ', 'ʒ͡ɣ', 'ʔ', 'ʝ', 'ː', 'β', 'β̞', 'θ', 'χ', 'ә', 'ḁ']
'''
def allosaurus_featurize(wavfile):
# load your model
model = read_recognizer()
labels=['I', 'a', 'aː', 'ã', 'ă', 'b', 'bʲ', 'bʲj', 'bʷ', 'bʼ', 'bː', 'b̞', 'b̤', 'b̥', 'c', 'd', 'dʒ', 'dʲ', 'dː', 'd̚', 'd̥', 'd̪', 'd̯', 'd͡z', 'd͡ʑ', 'd͡ʒ', 'd͡ʒː', 'd͡ʒ̤', 'e', 'eː', 'e̞', 'f', 'fʲ', 'fʷ', 'fː', 'g', 'gʲ', 'gʲj', 'gʷ', 'gː', 'h', 'hʷ', 'i', 'ij', 'iː', 'i̞', 'i̥', 'i̯', 'j', 'k', 'kx', 'kʰ', 'kʲ', 'kʲj', 'kʷ', 'kʷʼ', 'kʼ', 'kː', 'k̟ʲ', 'k̟̚', 'k͡p̚', 'l', 'lʲ', 'lː', 'l̪', 'm', 'mʲ', 'mʲj', 'mʷ', 'mː', 'n', 'nj', 'nʲ', 'nː', 'n̪', 'n̺', 'o', 'oː', 'o̞', 'o̥', 'p', 'pf', 'pʰ', 'pʲ', 'pʲj', 'pʷ', 'pʷʼ', 'pʼ', 'pː', 'p̚', 'q', 'r', 'rː', 's', 'sʲ', 'sʼ', 'sː', 's̪', 't', 'ts', 'tsʰ', 'tɕ', 'tɕʰ', 'tʂ', 'tʂʰ', 'tʃ', 'tʰ', 'tʲ', 'tʷʼ', 'tʼ', 'tː', 't̚', 't̪', 't̪ʰ', 't̪̚', 't͡s', 't͡sʼ', 't͡ɕ', 't͡ɬ', 't͡ʃ', 't͡ʃʲ', 't͡ʃʼ', 't͡ʃː', 'u', 'uə', 'uː', 'u͡w', 'v', 'vʲ', 'vʷ', 'vː', 'v̞', 'v̞ʲ', 'w', 'x', 'x̟ʲ', 'y', 'z', 'zj', 'zʲ', 'z̪', 'ä', 'æ', 'ç', 'çj', 'ð', 'ø', 'ŋ', 'ŋ̟', 'ŋ͡m', 'œ', 'œ̃', 'ɐ', 'ɐ̞', 'ɑ', 'ɑ̱', 'ɒ', 'ɓ', 'ɔ', 'ɔ̃', 'ɕ', 'ɕː', 'ɖ̤', 'ɗ', 'ə', 'ɛ', 'ɛ̃', 'ɟ', 'ɡ', 'ɡʲ', 'ɡ̤', 'ɡ̥', 'ɣ', 'ɣj', 'ɤ', 'ɤɐ̞', 'ɤ̆', 'ɥ', 'ɦ', 'ɨ', 'ɪ', 'ɫ', 'ɯ', 'ɯ̟', 'ɯ̥', 'ɰ', 'ɱ', 'ɲ', 'ɳ', 'ɴ', 'ɵ', 'ɸ', 'ɹ', 'ɹ̩', 'ɻ', 'ɻ̩', 'ɽ', 'ɾ', 'ɾj', 'ɾʲ', 'ɾ̠', 'ʀ', 'ʁ', 'ʁ̝', 'ʂ', 'ʃ', 'ʃʲː', 'ʃ͡ɣ', 'ʈ', 'ʉ̞', 'ʊ', 'ʋ', 'ʋʲ', 'ʌ', 'ʎ', 'ʏ', 'ʐ', 'ʑ', 'ʒ', 'ʒ͡ɣ', 'ʔ', 'ʝ', 'ː', 'β', 'β̞', 'θ', 'χ', 'ә', 'ḁ']
phone_transcript=model.recognize(wavfile).split()
print(phone_transcript)
features=list()
for i in range(len(labels)):
features.append(phone_transcript.count(labels[i]))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/prosody_features.py | features/audio_features/prosody_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['prosody_features']
This uses a voice activity detector to extract out prosody features like pause length and
other variables.
'''
import argparse, json, os, sys
sys.path.append(os.getcwd()+'/helpers/DigiPsych_Prosody')
from helpers.DigiPsych_Prosody.prosody import Voice_Prosody
import pandas as pd
from datetime import datetime
def prosody_featurize(audiofile,fsize):
df = pd.DataFrame()
vp = Voice_Prosody()
if audiofile.endswith('.wav'):
print('Prosody featurizing:',audiofile)
feat_dict = vp.featurize_audio(audiofile,int(fsize))
features=list(feat_dict.values())[0:-1]
labels=list(feat_dict)[0:-1]
print(features)
print(labels)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/praat_time_features.py | features/audio_features/praat_time_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['praat_time_features']
These are the time series features for Praat here.
Inspired by https://github.com/drfeinberg/genderless -
Praat features that are not affected by changing genders.
'''
import glob, os, json
import parselmouth
from parselmouth.praat import call
import numpy as np
def praat_time_featurize(wav_file):
voiceID = wav_file
sound = parselmouth.Sound(voiceID) # read the sound
broad_pitch = call(sound, "To Pitch", 0.0, 50, 600) # create a praat pitch object
minF0 = call(broad_pitch, "Get minimum", 0, 0, "hertz", "Parabolic") # get min pitch
maxF0 = call(broad_pitch, "Get maximum", 0, 0, "hertz", "Parabolic") # get max pitch
floor = minF0 * 0.9
ceiling = maxF0 * 1.1
pitch = call(sound, "To Pitch", 0.0, floor, ceiling) # create a praat pitch object
duration = call(sound, "Get total duration") # duration
meanF0 = call(pitch, "Get mean", 0, 0, "hertz") # get mean pitch
stdevF0 = call(pitch, "Get standard deviation", 0, 0, "hertz") # get standard deviation
harmonicity = call(sound, "To Harmonicity (cc)", 0.01, minF0, 0.1, 1.0)
hnr = call(harmonicity, "Get mean", 0, 0)
pointProcess = call(sound, "To PointProcess (periodic, cc)", minF0, maxF0)
localJitter = call(pointProcess, "Get jitter (local)", 0, 0, 0.0001, 0.02, 1.3)
localabsoluteJitter = call(pointProcess, "Get jitter (local, absolute)", 0, 0, 0.0001, 0.02, 1.3)
rapJitter = call(pointProcess, "Get jitter (rap)", 0, 0, 0.0001, 0.02, 1.3)
ppq5Jitter = call(pointProcess, "Get jitter (ppq5)", 0, 0, 0.0001, 0.02, 1.3)
ddpJitter = call(pointProcess, "Get jitter (ddp)", 0, 0, 0.0001, 0.02, 1.3)
localShimmer = call([sound, pointProcess], "Get shimmer (local)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
localdbShimmer = call([sound, pointProcess], "Get shimmer (local_dB)",
0, 0, 0.0001, 0.02, 1.3, 1.6)
apq3Shimmer = call([sound, pointProcess], "Get shimmer (apq3)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
aqpq5Shimmer = call([sound, pointProcess], "Get shimmer (apq5)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
apq11Shimmer = call([sound, pointProcess], "Get shimmer (apq11)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
ddaShimmer = call([sound, pointProcess], "Get shimmer (dda)", 0, 0, 0.0001, 0.02, 1.3, 1.6)
if meanF0 > 170 and meanF0 < 300:
max_formant = 5500
elif meanF0 <= 170:
max_formant = 5000
elif meanF0 >= 300:
max_formant = 8000
formants = call(sound, "To Formant (burg)", 0.0025, 5, max_formant, 0.025, 50)
numPoints = call(pointProcess, "Get number of points")
f1_list = []
f2_list = []
f3_list = []
f4_list = []
b1_list = [] #ER 202012
b2_list = [] #ER 202012
b3_list = [] #ER 202012
b4_list = [] #ER 202012
# Measure formants only at glottal pulses
for point in range(0, numPoints):
point += 1
t = call(pointProcess, "Get time from index", point)
f1 = call(formants, "Get value at time", 1, t, 'Hertz', 'Linear')
f2 = call(formants, "Get value at time", 2, t, 'Hertz', 'Linear')
f3 = call(formants, "Get value at time", 3, t, 'Hertz', 'Linear')
f4 = call(formants, "Get value at time", 4, t, 'Hertz', 'Linear')
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
f1_list.append(f1)
f2_list.append(f2)
f3_list.append(f3)
f4_list.append(f4)
b1 = call(formants, "Get bandwidth at time", 1, t, 'Hertz', 'Linear') #ER 202012
b2 = call(formants, "Get bandwidth at time", 2, t, 'Hertz', 'Linear') #ER 202012
b3 = call(formants, "Get bandwidth at time", 3, t, 'Hertz', 'Linear') #ER 202012
b4 = call(formants, "Get bandwidth at time", 4, t, 'Hertz', 'Linear') #ER 202012
b1_list.append(b1)
b2_list.append(b2)
b3_list.append(b3)
b4_list.append(b4)
f1_all = np.asarray(f1_list) #ER 202012
f2_all = np.asarray(f2_list) #ER 202012
f3_all = np.asarray(f3_list) #ER 202012
f4_all = np.asarray(f4_list) #ER 202012
b1_all = np.asarray(b1_list) #ER 202012
b2_all = np.asarray(b2_list) #ER 202012
b3_all = np.asarray(b3_list) #ER 202012
b4_all = np.asarray(b4_list) #ER 202012
f1_list = [f1 for f1 in f1_list if str(f1) != 'nan']
f2_list = [f2 for f2 in f2_list if str(f2) != 'nan']
f3_list = [f3 for f3 in f3_list if str(f3) != 'nan']
f4_list = [f4 for f4 in f4_list if str(f4) != 'nan']
# calculate mean formants across pulses
if len(f1_list) > 0:
f1_mean = sum(f1_list) / len(f1_list)
else:
f1_mean = 0
if len(f2_list) > 0:
f2_mean = sum(f2_list) / len(f2_list)
else:
f2_mean = 0
if len(f3_list) > 0:
f3_mean = sum(f3_list) / len(f3_list)
else:
f3_mean = 0
if len(f4_list) > 0:
f4_mean = sum(f4_list) / len(f4_list)
else:
f4_mean = 0
intensity = sound.to_intensity() #ER 202012
measurements = [duration, meanF0, stdevF0, hnr, localJitter, localabsoluteJitter, rapJitter, ppq5Jitter, ddpJitter,
localShimmer, localdbShimmer, apq3Shimmer, aqpq5Shimmer, apq11Shimmer, ddaShimmer,
f1_mean, f2_mean, f3_mean, f4_mean]
labels = ['duration', 'meanF0', 'stdevF0', 'hnr', 'localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter',
'ddpJitter',
'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'aqpq5Shimmer', 'apq11Shimmer', 'ddaShimmer', 'f1_mean',
'f2_mean', 'f3_mean', 'f4_mean']
measurements_time_series = [pitch.selected_array['frequency'], pitch.selected_array['strength'], pitch.ts(),
harmonicity.as_array()[0], harmonicity.ts(),
f1_all, f2_all, f3_all, f4_all, b1_all, b2_all, b3_all, b4_all, formants.ts(),
intensity.as_array()[0], intensity.ts()] #ER 202012
labels_time_series = ['pitch_time_series', 'pitch_strength_time_series', 'pitch_t_time_series',
'harmonicity_time_series', 'harmonicity_t_time_series',
'formant1_time_series', 'f2_time_series', 'f3_time_series', 'f4_time_series',
'bandwidth1_time_series', 'b2_time_series', 'b3_time_series', 'b4_time_series', 'formants_t_time_series',
'intensity_time_series', 'intensity_t_time_series'] #ER 202012
features=measurements
for i in range(len(measurements_time_series)):
features.append(list(measurements_time_series[i]))
labels=labels+labels_time_series
return features, labels
# features, labels = praat_featurize('test.wav')
# print(labels)
# data=dict()
# for i in range(len(labels)):
# data[labels[i]]=features[i]
# g=open('test.json','w')
# json.dump(data,g)
# g.close()
# print(features)
# print(labels)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pspeech_features.py | features/audio_features/pspeech_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pspeech_features']
Python Speech Features is a library for fast extraction of speech features like mfcc coefficients and
log filter bank energies. Note that this library is much faster than LibROSA and other libraries,
so it is useful to featurize very large datasets.
For more information, check out the documentation: https://github.com/jameslyons/python_speech_features
'''
import numpy as np
from python_speech_features import mfcc
from python_speech_features import logfbank
from python_speech_features import ssc
import scipy.io.wavfile as wav
import os
# get labels for later
def get_labels(vector, label, label2):
sample_list=list()
for i in range(len(vector)):
sample_list.append(label+str(i+1)+'_'+label2)
return sample_list
def pspeech_featurize(file):
# convert if .mp3 to .wav or it will fail
convert=False
if file[-4:]=='.mp3':
convert=True
os.system('ffmpeg -i %s %s'%(file, file[0:-4]+'.wav'))
file = file[0:-4] +'.wav'
(rate,sig) = wav.read(file)
mfcc_feat = mfcc(sig,rate)
fbank_feat = logfbank(sig,rate)
ssc_feat=ssc(sig, rate)
one_=np.mean(mfcc_feat, axis=0)
one=get_labels(one_, 'mfcc_', 'means')
two_=np.std(mfcc_feat, axis=0)
two=get_labels(one_, 'mfcc_', 'stds')
three_=np.amax(mfcc_feat, axis=0)
three=get_labels(one_, 'mfcc_', 'max')
four_=np.amin(mfcc_feat, axis=0)
four=get_labels(one_, 'mfcc_', 'min')
five_=np.median(mfcc_feat, axis=0)
five=get_labels(one_, 'mfcc_', 'medians')
six_=np.mean(fbank_feat, axis=0)
six=get_labels(six_, 'fbank_', 'means')
seven_=np.std(fbank_feat, axis=0)
seven=get_labels(six_, 'fbank_', 'stds')
eight_=np.amax(fbank_feat, axis=0)
eight=get_labels(six_, 'fbank_', 'max')
nine_=np.amin(fbank_feat, axis=0)
nine=get_labels(six_, 'fbank_', 'min')
ten_=np.median(fbank_feat, axis=0)
ten=get_labels(six_, 'fbank_', 'medians')
eleven_=np.mean(ssc_feat, axis=0)
eleven=get_labels(eleven_, 'spectral_centroid_', 'means')
twelve_=np.std(ssc_feat, axis=0)
twelve=get_labels(eleven_, 'spectral_centroid_', 'stds')
thirteen_=np.amax(ssc_feat, axis=0)
thirteen=get_labels(eleven_, 'spectral_centroid_', 'max')
fourteen_=np.amin(ssc_feat, axis=0)
fourteen=get_labels(eleven_, 'spectral_centroid_', 'min')
fifteen_=np.median(ssc_feat, axis=0)
fifteen=get_labels(eleven_, 'spectral_centroid_', 'medians')
labels=one+two+three+four+five+six+seven+eight+nine+ten+eleven+twelve+thirteen+fourteen+fifteen
features=np.append(one_,two_)
features=np.append(features, three_)
features=np.append(features, four_)
features=np.append(features, five_)
features=np.append(features, six_)
features=np.append(features, seven_)
features=np.append(features, eight_)
features=np.append(features, nine_)
features=np.append(features, ten_)
features=np.append(features, eleven_)
features=np.append(features, twelve_)
features=np.append(features, thirteen_)
features=np.append(features, fourteen_)
features=np.append(features, fifteen_)
if convert==True:
os.remove(file)
# print(features.shape)
# print(len(labels))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pspeechtime_features.py | features/audio_features/pspeechtime_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pspeechtime_features']
This collects the time series features from python_speech_features.
See the documentation for more information: https://github.com/jameslyons/python_speech_features
'''
import numpy as np
from python_speech_features import mfcc
from python_speech_features import logfbank
from python_speech_features import ssc
import scipy.io.wavfile as wav
import os
# get labels for later
def get_labels(vector, label, label2):
sample_list=list()
for i in range(len(vector)):
sample_list.append(label+str(i+1)+'_'+label2)
return sample_list
def pspeech_featurize(file):
# convert if .mp3 to .wav or it will fail
convert=False
if file[-4:]=='.mp3':
convert=True
os.system('ffmpeg -i %s %s'%(file, file[0:-4]+'.wav'))
file = file[0:-4] +'.wav'
(rate,sig) = wav.read(file)
mfcc_feat = mfcc(sig,rate).flatten().tolist()
# fbank_feat = logfbank(sig,rate).flatten()
# ssc_feat= ssc(sig, rate).flatten()
while len(mfcc_feat) < 25948:
mfcc_feat.append(0)
features=mfcc_feat
one=get_labels(mfcc_feat, 'mfcc_', 'time_25ms')
# two=get_labels(fbank_feat, 'fbank_', 'time_25ms')
# three=get_labels(ssc_feat, 'ssc_', 'time_25ms')
labels=one
# labels=one+two+three
if convert==True:
os.remove(file)
print(len(labels))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/speechmetrics_features.py | features/audio_features/speechmetrics_features.py | # the case of absolute metrics
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['speechmetrics_features']
A wide array of open source audio quality measures to assess the quality of audio files. Note there
are no audio file references necessary to extract these metrics.
taken from https://github.com/aliutkus/speechmetrics
'''
import os
# adding this in because some installations may not include speechmetrics
try:
import speechmetrics
except:
curdir=os.getcwd()
os.system('pip3 install git+https://github.com/aliutkus/speechmetrics#egg=speechmetrics[cpu]')
os.system('git clone https://github.com/jfsantos/SRMRpy')
os.chdir('SRMRpy')
os.system('python3 setup.py install')
os.chdir(curdir)
import speechmetrics
def speechmetrics_featurize(wavfile):
window_length = 5 # seconds
metrics = speechmetrics.load('absolute', window_length)
scores = metrics(wavfile)
scores['mosnet'] = float(scores['mosnet'])
scores['srmr'] = float(scores['srmr'])
features = list(scores.values())
labels = list(scores)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/standard_features.py | features/audio_features/standard_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['standard_features']
A standard feature array extracted using LibROSA's library.
'''
import librosa, os, uuid
import numpy as np
from pydub import AudioSegment
def audio_featurize(wavfile):
#initialize features
hop_length = 512
n_fft=2048
#load file
y, sr = librosa.load(wavfile)
#extract mfcc coefficients
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
#extract mean, standard deviation, min, and max value in mfcc frame, do this across all mfccs
mfcc_features=np.array([np.mean(mfcc[0]),np.std(mfcc[0]),np.amin(mfcc[0]),np.amax(mfcc[0]),
np.mean(mfcc[1]),np.std(mfcc[1]),np.amin(mfcc[1]),np.amax(mfcc[1]),
np.mean(mfcc[2]),np.std(mfcc[2]),np.amin(mfcc[2]),np.amax(mfcc[2]),
np.mean(mfcc[3]),np.std(mfcc[3]),np.amin(mfcc[3]),np.amax(mfcc[3]),
np.mean(mfcc[4]),np.std(mfcc[4]),np.amin(mfcc[4]),np.amax(mfcc[4]),
np.mean(mfcc[5]),np.std(mfcc[5]),np.amin(mfcc[5]),np.amax(mfcc[5]),
np.mean(mfcc[6]),np.std(mfcc[6]),np.amin(mfcc[6]),np.amax(mfcc[6]),
np.mean(mfcc[7]),np.std(mfcc[7]),np.amin(mfcc[7]),np.amax(mfcc[7]),
np.mean(mfcc[8]),np.std(mfcc[8]),np.amin(mfcc[8]),np.amax(mfcc[8]),
np.mean(mfcc[9]),np.std(mfcc[9]),np.amin(mfcc[9]),np.amax(mfcc[9]),
np.mean(mfcc[10]),np.std(mfcc[10]),np.amin(mfcc[10]),np.amax(mfcc[10]),
np.mean(mfcc[11]),np.std(mfcc[11]),np.amin(mfcc[11]),np.amax(mfcc[11]),
np.mean(mfcc[12]),np.std(mfcc[12]),np.amin(mfcc[12]),np.amax(mfcc[12]),
np.mean(mfcc_delta[0]),np.std(mfcc_delta[0]),np.amin(mfcc_delta[0]),np.amax(mfcc_delta[0]),
np.mean(mfcc_delta[1]),np.std(mfcc_delta[1]),np.amin(mfcc_delta[1]),np.amax(mfcc_delta[1]),
np.mean(mfcc_delta[2]),np.std(mfcc_delta[2]),np.amin(mfcc_delta[2]),np.amax(mfcc_delta[2]),
np.mean(mfcc_delta[3]),np.std(mfcc_delta[3]),np.amin(mfcc_delta[3]),np.amax(mfcc_delta[3]),
np.mean(mfcc_delta[4]),np.std(mfcc_delta[4]),np.amin(mfcc_delta[4]),np.amax(mfcc_delta[4]),
np.mean(mfcc_delta[5]),np.std(mfcc_delta[5]),np.amin(mfcc_delta[5]),np.amax(mfcc_delta[5]),
np.mean(mfcc_delta[6]),np.std(mfcc_delta[6]),np.amin(mfcc_delta[6]),np.amax(mfcc_delta[6]),
np.mean(mfcc_delta[7]),np.std(mfcc_delta[7]),np.amin(mfcc_delta[7]),np.amax(mfcc_delta[7]),
np.mean(mfcc_delta[8]),np.std(mfcc_delta[8]),np.amin(mfcc_delta[8]),np.amax(mfcc_delta[8]),
np.mean(mfcc_delta[9]),np.std(mfcc_delta[9]),np.amin(mfcc_delta[9]),np.amax(mfcc_delta[9]),
np.mean(mfcc_delta[10]),np.std(mfcc_delta[10]),np.amin(mfcc_delta[10]),np.amax(mfcc_delta[10]),
np.mean(mfcc_delta[11]),np.std(mfcc_delta[11]),np.amin(mfcc_delta[11]),np.amax(mfcc_delta[11]),
np.mean(mfcc_delta[12]),np.std(mfcc_delta[12]),np.amin(mfcc_delta[12]),np.amax(mfcc_delta[12])])
return mfcc_features
def exportfile(newAudio,time1,time2,filename,i):
#Exports to a wav file in the current path.
newAudio2 = newAudio[time1:time2]
g=os.listdir()
if filename[0:-4]+'_'+str(i)+'.wav' in g:
filename2=str(uuid.uuid4())+'_segment'+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2,format="wav")
else:
filename2=str(uuid.uuid4())+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2, format="wav")
return filename2
def audio_time_features(filename):
#recommend >0.50 seconds for timesplit
timesplit=0.50
hop_length = 512
n_fft=2048
y, sr = librosa.load(filename)
duration=float(librosa.core.get_duration(y))
#Now splice an audio signal into individual elements of 100 ms and extract
#all these features per 100 ms
segnum=round(duration/timesplit)
deltat=duration/segnum
timesegment=list()
time=0
for i in range(segnum):
#milliseconds
timesegment.append(time)
time=time+deltat*1000
if filename[-4:]=='.wav':
newAudio = AudioSegment.from_wav(filename)
elif filename[-4:]=='.mp3':
newAudio = AudioSegment.from_mp3(filename)
filelist=list()
for i in range(len(timesegment)-1):
filename=exportfile(newAudio,timesegment[i],timesegment[i+1],filename,i)
filelist.append(filename)
featureslist=np.array([0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0])
#save 100 ms segments in current folder (delete them after)
for j in range(len(filelist)):
try:
features=audio_featurize(filelist[i])
featureslist=featureslist+features
os.remove(filelist[j])
except:
print('error splicing')
featureslist.append('silence')
os.remove(filelist[j])
# now scale the featureslist array by the length to get mean in each category
featureslist=featureslist/segnum
return featureslist
def standard_featurize(filename):
features=np.append(audio_featurize(filename), audio_time_features(filename))
# labels
labels=['mfcc_1_mean_20ms','mfcc_1_std_20ms', 'mfcc_1_min_20ms', 'mfcc_1_max_20ms',
'mfcc_2_mean_20ms','mfcc_2_std_20ms', 'mfcc_2_min_20ms', 'mfcc_2_max_20ms',
'mfcc_3_mean_20ms','mfcc_3_std_20ms', 'mfcc_3_min_20ms', 'mfcc_3_max_20ms',
'mfcc_4_mean_20ms','mfcc_4_std_20ms', 'mfcc_4_min_20ms', 'mfcc_4_max_20ms',
'mfcc_5_mean_20ms','mfcc_5_std_20ms', 'mfcc_5_min_20ms', 'mfcc_5_max_20ms',
'mfcc_6_mean_20ms','mfcc_6_std_20ms', 'mfcc_6_min_20ms', 'mfcc_6_max_20ms',
'mfcc_7_mean_20ms','mfcc_7_std_20ms', 'mfcc_7_min_20ms', 'mfcc_7_max_20ms',
'mfcc_8_mean_20ms','mfcc_8_std_20ms', 'mfcc_8_min_20ms', 'mfcc_8_max_20ms',
'mfcc_9_mean_20ms','mfcc_9_std_20ms', 'mfcc_9_min_20ms', 'mfcc_9_max_20ms',
'mfcc_10_mean_20ms','mfcc_10_std_20ms', 'mfcc_10_min_20ms', 'mfcc_10_max_20ms',
'mfcc_11_mean_20ms','mfcc_11_std_20ms', 'mfcc_11_min_20ms', 'mfcc_11_max_20ms',
'mfcc_12_mean_20ms','mfcc_12_std_20ms', 'mfcc_12_min_20ms', 'mfcc_12_max_20ms',
'mfcc_13_mean_20ms','mfcc_13_std_20ms', 'mfcc_13_min_20ms', 'mfcc_13_max_20ms',
'mfcc_1_delta_mean_20ms','mfcc_1_delta_std_20ms', 'mfcc_1_delta_min_20ms', 'mfcc_1_delta_max_20ms',
'mfcc_2_delta_mean_20ms','mfcc_2_delta_std_20ms', 'mfcc_2_delta_min_20ms', 'mfcc_2_delta_max_20ms',
'mfcc_3_delta_mean_20ms','mfcc_3_delta_std_20ms', 'mfcc_3_delta_min_20ms', 'mfcc_3_delta_max_20ms',
'mfcc_4_delta_mean_20ms','mfcc_4_delta_std_20ms', 'mfcc_4_delta_min_20ms', 'mfcc_4_delta_max_20ms',
'mfcc_5_delta_mean_20ms','mfcc_5_delta_std_20ms', 'mfcc_5_delta_min_20ms', 'mfcc_5_delta_max_20ms',
'mfcc_6_delta_mean_20ms','mfcc_6_delta_std_20ms', 'mfcc_6_delta_min_20ms', 'mfcc_6_delta_max_20ms',
'mfcc_7_delta_mean_20ms','mfcc_7_delta_std_20ms', 'mfcc_7_delta_min_20ms', 'mfcc_7_delta_max_20ms',
'mfcc_8_delta_mean_20ms','mfcc_8_delta_std_20ms', 'mfcc_8_delta_min_20ms', 'mfcc_8_delta_max_20ms',
'mfcc_9_delta_mean_20ms','mfcc_9_delta_std_20ms', 'mfcc_9_delta_min_20ms', 'mfcc_9_delta_max_20ms',
'mfcc_10_delta_mean_20ms','mfcc_10_delta_std_20ms', 'mfcc_10_delta_min_20ms', 'mfcc_10_delta_max_20ms',
'mfcc_11_delta_mean_20ms','mfcc_11_delta_std_20ms', 'mfcc_11_delta_min_20ms', 'mfcc_11_delta_max_20ms',
'mfcc_12_delta_mean_20ms','mfcc_12_delta_std_20ms', 'mfcc_12_delta_min_20ms', 'mfcc_12_delta_max_20ms',
'mfcc_13_delta_mean_20ms','mfcc_13_delta_std_20ms', 'mfcc_13_delta_min_20ms', 'mfcc_13_delta_max_20ms',
'mfcc_1_mean_500ms','mfcc_1_std_500ms', 'mfcc_1_min_500ms', 'mfcc_1_max_500ms',
'mfcc_2_mean_500ms','mfcc_2_std_500ms', 'mfcc_2_min_500ms', 'mfcc_2_max_500ms',
'mfcc_3_mean_500ms','mfcc_3_std_500ms', 'mfcc_3_min_500ms', 'mfcc_3_max_500ms',
'mfcc_4_mean_500ms','mfcc_4_std_500ms', 'mfcc_4_min_500ms', 'mfcc_4_max_500ms',
'mfcc_5_mean_500ms','mfcc_5_std_500ms', 'mfcc_5_min_500ms', 'mfcc_5_max_500ms',
'mfcc_6_mean_500ms','mfcc_6_std_500ms', 'mfcc_6_min_500ms', 'mfcc_6_max_500ms',
'mfcc_7_mean_500ms','mfcc_7_std_500ms', 'mfcc_7_min_500ms', 'mfcc_7_max_500ms',
'mfcc_8_mean_500ms','mfcc_8_std_500ms', 'mfcc_8_min_500ms', 'mfcc_8_max_500ms',
'mfcc_9_mean_500ms','mfcc_9_std_500ms', 'mfcc_9_min_500ms', 'mfcc_9_max_500ms',
'mfcc_10_mean_500ms','mfcc_10_std_500ms', 'mfcc_10_min_500ms', 'mfcc_10_max_500ms',
'mfcc_11_mean_500ms','mfcc_11_std_500ms', 'mfcc_11_min_500ms', 'mfcc_11_max_500ms',
'mfcc_12_mean_500ms','mfcc_12_std_500ms', 'mfcc_12_min_500ms', 'mfcc_12_max_500ms',
'mfcc_13_mean_500ms','mfcc_13_std_500ms', 'mfcc_13_min_500ms', 'mfcc_13_max_500ms',
'mfcc_1_delta_mean_500ms','mfcc_1_delta_std_500ms', 'mfcc_1_delta_min_500ms', 'mfcc_1_delta_max_500ms',
'mfcc_2_delta_mean_500ms','mfcc_2_delta_std_500ms', 'mfcc_2_delta_min_500ms', 'mfcc_2_delta_max_500ms',
'mfcc_3_delta_mean_500ms','mfcc_3_delta_std_500ms', 'mfcc_3_delta_min_500ms', 'mfcc_3_delta_max_500ms',
'mfcc_4_delta_mean_500ms','mfcc_4_delta_std_500ms', 'mfcc_4_delta_min_500ms', 'mfcc_4_delta_max_500ms',
'mfcc_5_delta_mean_500ms','mfcc_5_delta_std_500ms', 'mfcc_5_delta_min_500ms', 'mfcc_5_delta_max_500ms',
'mfcc_6_delta_mean_500ms','mfcc_6_delta_std_500ms', 'mfcc_6_delta_min_500ms', 'mfcc_6_delta_max_500ms',
'mfcc_7_delta_mean_500ms','mfcc_7_delta_std_500ms', 'mfcc_7_delta_min_500ms', 'mfcc_7_delta_max_500ms',
'mfcc_8_delta_mean_500ms','mfcc_8_delta_std_500ms', 'mfcc_8_delta_min_500ms', 'mfcc_8_delta_max_500ms',
'mfcc_9_delta_mean_500ms','mfcc_9_delta_std_500ms', 'mfcc_9_delta_min_500ms', 'mfcc_9_delta_max_500ms',
'mfcc_10_delta_mean_500ms','mfcc_10_delta_std_500ms', 'mfcc_10_delta_min_500ms', 'mfcc_10_delta_max_500ms',
'mfcc_11_delta_mean_500ms','mfcc_11_delta_std_500ms', 'mfcc_11_delta_min_500ms', 'mfcc_11_delta_max_500ms',
'mfcc_12_delta_mean_500ms','mfcc_12_delta_std_500ms', 'mfcc_12_delta_min_500ms', 'mfcc_12_delta_max_500ms',
'mfcc_13_delta_mean_500ms','mfcc_13_delta_std_500ms', 'mfcc_13_delta_min_500ms', 'mfcc_13_delta_max_500ms']
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/surfboard_features.py | features/audio_features/surfboard_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['surfboard_features']
Extracts acoustic features using the Surfboard library.
Note this is quite a powerful audio feature set that can be used
for a variety of purposes.
For more information, check out Surfboard's documentation: https://github.com/novoic/surfboard
'''
import os, pandas
import numpy as np
import pandas as pd
import uuid, shutil
# https://github.com/novoic/surfboard
def surfboard_featurize(wavfile, helpdir):
# if categorize == True, output feature categories
# create a temporary folder
curdir=os.getcwd()
wav_folder=str(uuid.uuid4())
# copy one file over
os.mkdir(wav_folder)
shutil.copy(os.getcwd()+'/'+wavfile, os.getcwd()+'/'+wav_folder+'/'+wavfile)
wav_folderpath=os.getcwd()+'/'+wav_folder
# options are ['all_features.yaml', 'chroma_components.yaml', 'parkinsons_features.yaml','spectral_features.yaml']
config='parkinsons_features.yaml'
os.system('surfboard compute-features -i %s -o %s/surfboard_features.csv -F %s/surfboard/example_configs/%s -j 4'%(wav_folderpath, wav_folderpath, helpdir, config))
os.chdir(wav_folderpath)
g=pd.read_csv('surfboard_features.csv')
features=list(g.iloc[0,:][0:-1])
labels=['f0_contour_mean', 'f0_contour_std', 'f0_contour_skewness', 'f0_contour_kurtosis', 'f0_contour_first_derivative_mean', 'f0_contour_first_derivative_std', 'f0_contour_first_derivative_skewness', 'f0_contour_first_derivative_kurtosis', 'f0_contour_second_derivative_mean', 'f0_contour_second_derivative_std', 'f0_contour_second_derivative_skewness', 'f0_contour_second_derivative_kurtosis', 'f0_contour_first_quartile', 'f0_contour_second_quartile', 'f0_contour_third_quartile', 'f0_contour_q2_q1_range', 'f0_contour_q3_q2_range', 'f0_contour_q3_q1_range', 'f0_contour_percentile_1', 'f0_contour_percentile_99', 'f0_contour_percentile_1_99_range', 'f0_contour_linear_regression_offset', 'f0_contour_linear_regression_slope', 'f0_contour_linear_regression_mse', 'f0_mean', 'f0_std', 'log_energy', 'log_energy_slidingwindow_mean', 'log_energy_slidingwindow_std', 'log_energy_slidingwindow_skewness', 'log_energy_slidingwindow_kurtosis', 'log_energy_slidingwindow_first_derivative_mean', 'log_energy_slidingwindow_first_derivative_std', 'log_energy_slidingwindow_first_derivative_skewness', 'log_energy_slidingwindow_first_derivative_kurtosis', 'log_energy_slidingwindow_second_derivative_mean', 'log_energy_slidingwindow_second_derivative_std', 'log_energy_slidingwindow_second_derivative_skewness', 'log_energy_slidingwindow_second_derivative_kurtosis', 'log_energy_slidingwindow_first_quartile', 'log_energy_slidingwindow_second_quartile', 'log_energy_slidingwindow_third_quartile', 'log_energy_slidingwindow_q2_q1_range', 'log_energy_slidingwindow_q3_q2_range', 'log_energy_slidingwindow_q3_q1_range', 'log_energy_slidingwindow_percentile_1', 'log_energy_slidingwindow_percentile_99', 'log_energy_slidingwindow_percentile_1_99_range', 'log_energy_slidingwindow_linear_regression_offset', 'log_energy_slidingwindow_linear_regression_slope', 'log_energy_slidingwindow_linear_regression_mse', 'f1', 'f2', 'f3', 'f4', 'loudness', 'rms_mean', 'rms_std', 'rms_skewness', 'rms_kurtosis', 'rms_first_derivative_mean', 'rms_first_derivative_std', 'rms_first_derivative_skewness', 'rms_first_derivative_kurtosis', 'rms_second_derivative_mean', 'rms_second_derivative_std', 'rms_second_derivative_skewness', 'rms_second_derivative_kurtosis', 'rms_first_quartile', 'rms_second_quartile', 'rms_third_quartile', 'rms_q2_q1_range', 'rms_q3_q2_range', 'rms_q3_q1_range', 'rms_percentile_1', 'rms_percentile_99', 'rms_percentile_1_99_range', 'rms_linear_regression_offset', 'rms_linear_regression_slope', 'rms_linear_regression_mse', 'mfcc_mean_1', 'mfcc_mean_2', 'mfcc_mean_3', 'mfcc_mean_4', 'mfcc_mean_5', 'mfcc_mean_6', 'mfcc_mean_7', 'mfcc_mean_8', 'mfcc_mean_9', 'mfcc_mean_10', 'mfcc_mean_11', 'mfcc_mean_12', 'mfcc_mean_13', 'mfcc_std_1', 'mfcc_std_2', 'mfcc_std_3', 'mfcc_std_4', 'mfcc_std_5', 'mfcc_std_6', 'mfcc_std_7', 'mfcc_std_8', 'mfcc_std_9', 'mfcc_std_10', 'mfcc_std_11', 'mfcc_std_12', 'mfcc_std_13', 'mfcc_skewness_1', 'mfcc_skewness_2', 'mfcc_skewness_3', 'mfcc_skewness_4', 'mfcc_skewness_5', 'mfcc_skewness_6', 'mfcc_skewness_7', 'mfcc_skewness_8', 'mfcc_skewness_9', 'mfcc_skewness_10', 'mfcc_skewness_11', 'mfcc_skewness_12', 'mfcc_skewness_13', 'mfcc_kurtosis_1', 'mfcc_kurtosis_2', 'mfcc_kurtosis_3', 'mfcc_kurtosis_4', 'mfcc_kurtosis_5', 'mfcc_kurtosis_6', 'mfcc_kurtosis_7', 'mfcc_kurtosis_8', 'mfcc_kurtosis_9', 'mfcc_kurtosis_10', 'mfcc_kurtosis_11', 'mfcc_kurtosis_12', 'mfcc_kurtosis_13', 'mfcc_first_derivative_mean_1', 'mfcc_first_derivative_mean_2', 'mfcc_first_derivative_mean_3', 'mfcc_first_derivative_mean_4', 'mfcc_first_derivative_mean_5', 'mfcc_first_derivative_mean_6', 'mfcc_first_derivative_mean_7', 'mfcc_first_derivative_mean_8', 'mfcc_first_derivative_mean_9', 'mfcc_first_derivative_mean_10', 'mfcc_first_derivative_mean_11', 'mfcc_first_derivative_mean_12', 'mfcc_first_derivative_mean_13', 'mfcc_first_derivative_std_1', 'mfcc_first_derivative_std_2', 'mfcc_first_derivative_std_3', 'mfcc_first_derivative_std_4', 'mfcc_first_derivative_std_5', 'mfcc_first_derivative_std_6', 'mfcc_first_derivative_std_7', 'mfcc_first_derivative_std_8', 'mfcc_first_derivative_std_9', 'mfcc_first_derivative_std_10', 'mfcc_first_derivative_std_11', 'mfcc_first_derivative_std_12', 'mfcc_first_derivative_std_13', 'mfcc_first_derivative_skewness_1', 'mfcc_first_derivative_skewness_2', 'mfcc_first_derivative_skewness_3', 'mfcc_first_derivative_skewness_4', 'mfcc_first_derivative_skewness_5', 'mfcc_first_derivative_skewness_6', 'mfcc_first_derivative_skewness_7', 'mfcc_first_derivative_skewness_8', 'mfcc_first_derivative_skewness_9', 'mfcc_first_derivative_skewness_10', 'mfcc_first_derivative_skewness_11', 'mfcc_first_derivative_skewness_12', 'mfcc_first_derivative_skewness_13', 'mfcc_first_derivative_kurtosis_1', 'mfcc_first_derivative_kurtosis_2', 'mfcc_first_derivative_kurtosis_3', 'mfcc_first_derivative_kurtosis_4', 'mfcc_first_derivative_kurtosis_5', 'mfcc_first_derivative_kurtosis_6', 'mfcc_first_derivative_kurtosis_7', 'mfcc_first_derivative_kurtosis_8', 'mfcc_first_derivative_kurtosis_9', 'mfcc_first_derivative_kurtosis_10', 'mfcc_first_derivative_kurtosis_11', 'mfcc_first_derivative_kurtosis_12', 'mfcc_first_derivative_kurtosis_13', 'mfcc_second_derivative_mean_1', 'mfcc_second_derivative_mean_2', 'mfcc_second_derivative_mean_3', 'mfcc_second_derivative_mean_4', 'mfcc_second_derivative_mean_5', 'mfcc_second_derivative_mean_6', 'mfcc_second_derivative_mean_7', 'mfcc_second_derivative_mean_8', 'mfcc_second_derivative_mean_9', 'mfcc_second_derivative_mean_10', 'mfcc_second_derivative_mean_11', 'mfcc_second_derivative_mean_12', 'mfcc_second_derivative_mean_13', 'mfcc_second_derivative_std_1', 'mfcc_second_derivative_std_2', 'mfcc_second_derivative_std_3', 'mfcc_second_derivative_std_4', 'mfcc_second_derivative_std_5', 'mfcc_second_derivative_std_6', 'mfcc_second_derivative_std_7', 'mfcc_second_derivative_std_8', 'mfcc_second_derivative_std_9', 'mfcc_second_derivative_std_10', 'mfcc_second_derivative_std_11', 'mfcc_second_derivative_std_12', 'mfcc_second_derivative_std_13', 'mfcc_second_derivative_skewness_1', 'mfcc_second_derivative_skewness_2', 'mfcc_second_derivative_skewness_3', 'mfcc_second_derivative_skewness_4', 'mfcc_second_derivative_skewness_5', 'mfcc_second_derivative_skewness_6', 'mfcc_second_derivative_skewness_7', 'mfcc_second_derivative_skewness_8', 'mfcc_second_derivative_skewness_9', 'mfcc_second_derivative_skewness_10', 'mfcc_second_derivative_skewness_11', 'mfcc_second_derivative_skewness_12', 'mfcc_second_derivative_skewness_13', 'mfcc_second_derivative_kurtosis_1', 'mfcc_second_derivative_kurtosis_2', 'mfcc_second_derivative_kurtosis_3', 'mfcc_second_derivative_kurtosis_4', 'mfcc_second_derivative_kurtosis_5', 'mfcc_second_derivative_kurtosis_6', 'mfcc_second_derivative_kurtosis_7', 'mfcc_second_derivative_kurtosis_8', 'mfcc_second_derivative_kurtosis_9', 'mfcc_second_derivative_kurtosis_10', 'mfcc_second_derivative_kurtosis_11', 'mfcc_second_derivative_kurtosis_12', 'mfcc_second_derivative_kurtosis_13', 'mfcc_first_quartile_1', 'mfcc_first_quartile_2', 'mfcc_first_quartile_3', 'mfcc_first_quartile_4', 'mfcc_first_quartile_5', 'mfcc_first_quartile_6', 'mfcc_first_quartile_7', 'mfcc_first_quartile_8', 'mfcc_first_quartile_9', 'mfcc_first_quartile_10', 'mfcc_first_quartile_11', 'mfcc_first_quartile_12', 'mfcc_first_quartile_13', 'mfcc_second_quartile_1', 'mfcc_second_quartile_2', 'mfcc_second_quartile_3', 'mfcc_second_quartile_4', 'mfcc_second_quartile_5', 'mfcc_second_quartile_6', 'mfcc_second_quartile_7', 'mfcc_second_quartile_8', 'mfcc_second_quartile_9', 'mfcc_second_quartile_10', 'mfcc_second_quartile_11', 'mfcc_second_quartile_12', 'mfcc_second_quartile_13', 'mfcc_third_quartile_1', 'mfcc_third_quartile_2', 'mfcc_third_quartile_3', 'mfcc_third_quartile_4', 'mfcc_third_quartile_5', 'mfcc_third_quartile_6', 'mfcc_third_quartile_7', 'mfcc_third_quartile_8', 'mfcc_third_quartile_9', 'mfcc_third_quartile_10', 'mfcc_third_quartile_11', 'mfcc_third_quartile_12', 'mfcc_third_quartile_13', 'mfcc_q2_q1_range_1', 'mfcc_q2_q1_range_2', 'mfcc_q2_q1_range_3', 'mfcc_q2_q1_range_4', 'mfcc_q2_q1_range_5', 'mfcc_q2_q1_range_6', 'mfcc_q2_q1_range_7', 'mfcc_q2_q1_range_8', 'mfcc_q2_q1_range_9', 'mfcc_q2_q1_range_10', 'mfcc_q2_q1_range_11', 'mfcc_q2_q1_range_12', 'mfcc_q2_q1_range_13', 'mfcc_q3_q2_range_1', 'mfcc_q3_q2_range_2', 'mfcc_q3_q2_range_3', 'mfcc_q3_q2_range_4', 'mfcc_q3_q2_range_5', 'mfcc_q3_q2_range_6', 'mfcc_q3_q2_range_7', 'mfcc_q3_q2_range_8', 'mfcc_q3_q2_range_9', 'mfcc_q3_q2_range_10', 'mfcc_q3_q2_range_11', 'mfcc_q3_q2_range_12', 'mfcc_q3_q2_range_13', 'mfcc_q3_q1_range_1', 'mfcc_q3_q1_range_2', 'mfcc_q3_q1_range_3', 'mfcc_q3_q1_range_4', 'mfcc_q3_q1_range_5', 'mfcc_q3_q1_range_6', 'mfcc_q3_q1_range_7', 'mfcc_q3_q1_range_8', 'mfcc_q3_q1_range_9', 'mfcc_q3_q1_range_10', 'mfcc_q3_q1_range_11', 'mfcc_q3_q1_range_12', 'mfcc_q3_q1_range_13', 'mfcc_percentile_1_1', 'mfcc_percentile_1_2', 'mfcc_percentile_1_3', 'mfcc_percentile_1_4', 'mfcc_percentile_1_5', 'mfcc_percentile_1_6', 'mfcc_percentile_1_7', 'mfcc_percentile_1_8', 'mfcc_percentile_1_9', 'mfcc_percentile_1_10', 'mfcc_percentile_1_11', 'mfcc_percentile_1_12', 'mfcc_percentile_1_13', 'mfcc_percentile_99_1', 'mfcc_percentile_99_2', 'mfcc_percentile_99_3', 'mfcc_percentile_99_4', 'mfcc_percentile_99_5', 'mfcc_percentile_99_6', 'mfcc_percentile_99_7', 'mfcc_percentile_99_8', 'mfcc_percentile_99_9', 'mfcc_percentile_99_10', 'mfcc_percentile_99_11', 'mfcc_percentile_99_12', 'mfcc_percentile_99_13', 'mfcc_percentile_1_99_range_1', 'mfcc_percentile_1_99_range_2', 'mfcc_percentile_1_99_range_3', 'mfcc_percentile_1_99_range_4', 'mfcc_percentile_1_99_range_5', 'mfcc_percentile_1_99_range_6', 'mfcc_percentile_1_99_range_7', 'mfcc_percentile_1_99_range_8', 'mfcc_percentile_1_99_range_9', 'mfcc_percentile_1_99_range_10', 'mfcc_percentile_1_99_range_11', 'mfcc_percentile_1_99_range_12', 'mfcc_percentile_1_99_range_13', 'mfcc_linear_regression_offset_1', 'mfcc_linear_regression_offset_2', 'mfcc_linear_regression_offset_3', 'mfcc_linear_regression_offset_4', 'mfcc_linear_regression_offset_5', 'mfcc_linear_regression_offset_6', 'mfcc_linear_regression_offset_7', 'mfcc_linear_regression_offset_8', 'mfcc_linear_regression_offset_9', 'mfcc_linear_regression_offset_10', 'mfcc_linear_regression_offset_11', 'mfcc_linear_regression_offset_12', 'mfcc_linear_regression_offset_13', 'mfcc_linear_regression_slope_1', 'mfcc_linear_regression_slope_2', 'mfcc_linear_regression_slope_3', 'mfcc_linear_regression_slope_4', 'mfcc_linear_regression_slope_5', 'mfcc_linear_regression_slope_6', 'mfcc_linear_regression_slope_7', 'mfcc_linear_regression_slope_8', 'mfcc_linear_regression_slope_9', 'mfcc_linear_regression_slope_10', 'mfcc_linear_regression_slope_11', 'mfcc_linear_regression_slope_12', 'mfcc_linear_regression_slope_13', 'mfcc_linear_regression_mse_1', 'mfcc_linear_regression_mse_2', 'mfcc_linear_regression_mse_3', 'mfcc_linear_regression_mse_4', 'mfcc_linear_regression_mse_5', 'mfcc_linear_regression_mse_6', 'mfcc_linear_regression_mse_7', 'mfcc_linear_regression_mse_8', 'mfcc_linear_regression_mse_9', 'mfcc_linear_regression_mse_10', 'mfcc_linear_regression_mse_11', 'mfcc_linear_regression_mse_12', 'mfcc_linear_regression_mse_13', 'localJitter', 'localabsoluteJitter', 'rapJitter', 'ppq5Jitter', 'ddpJitter', 'localShimmer', 'localdbShimmer', 'apq3Shimmer', 'apq5Shimmer', 'apq11Shimmer', 'hnr', 'ppe', 'dfa']
os.chdir(curdir)
shutil.rmtree(wav_folder)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/loudness_features.py | features/audio_features/loudness_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['loudness_features']
Extracts loudness features.
For more information, check out PyLoudNorm documentation: https://github.com/csteinmetz1/pyloudnorm
'''
import soundfile as sf
import os
try:
import pyloudnorm as pyln
except:
os.system('pip3 install pyloudnorm==0.1.0')
import pyloudnorm as pyln
def loudness_featurize(audiofile):
'''
from the docs
https://github.com/danilobellini/audiolazy/blob/master/examples/formants.py
'''
data, rate = sf.read(audiofile) # load audio (with shape (samples, channels))
meter = pyln.Meter(rate) # create BS.1770 meter
loudness = meter.integrated_loudness(data) # measure loudness
# units in dB
features=[loudness]
labels=['Loudness']
print(dict(zip(labels,features)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/parallel_featurize.py | features/audio_features/parallel_featurize.py | '''
Import all the featurization scripts and allow the user to customize what embedding that
they would like to use for modeling purposes.
AudioSet is the only embedding that is a little bit wierd, as it is normalized to the length
of each audio file. There are many ways around this issue (such as normalizing to the length
of each second), however, I included all the original embeddings here in case the time series
information is useful to you.
'''
import json, os, sys
import numpy as np
import helpers.transcribe as ts
from tqdm import tqdm
import ray
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
# import to get image feature script
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/image_features')
haar_dir=prevdir+'/image_features/helpers/haarcascades'
import image_features as imf
sys.path.append(prevdir+'/text_features')
import nltk_features as nf
os.chdir(directory)
################################################
## Helper functions ##
################################################
def transcribe(file, default_audio_transcriber):
# get transcript
try:
if file[-4:]=='.wav':
transcript=ts.transcribe_sphinx(file)
elif file[-4] == '.mp3':
os.system('ffmpeg -i %s %s'%(file, file[0:-4]+'.wav'))
transcript=ts.transcribe_sphinx(file)
os.remove(file[-4:]+'.wav')
else:
transcript=file
except:
transcript=''
return transcript
def audio_featurize(feature_set, audiofile, transcript):
# long conditional on all the types of features that can happen and featurizes accordingly.
if feature_set == 'librosa_features':
features, labels = lf.librosa_featurize(audiofile, False)
elif feature_set == 'standard_features':
features, labels = sf.standard_featurize(audiofile)
elif feature_set == 'audioset_features':
features, labels = af.audioset_featurize(audiofile, basedir, foldername)
elif feature_set == 'sox_features':
features, labels = soxf.sox_featurize(audiofile)
elif feature_set == 'sa_features':
features, labels = saf.sa_featurize(audiofile)
elif feature_set == 'pyaudio_features':
features, labels = pf.pyaudio_featurize(audiofile, basedir)
elif feature_set == 'spectrogram_features':
features, labels= specf.spectrogram_featurize(audiofile)
elif feature_set == 'meta_features':
features, labels = mf.meta_featurize(audiofile, cur_dir, help_dir)
elif feature_set == 'opensmile_features':
features, labels = osm.opensmile_featurize(audiofile, basedir, 'GeMAPSv01a.conf')
elif feature_set == 'praat_features':
features, labels = prf.praat_featurize(audiofile)
elif feature_set == 'pspeech_features':
features, labels = psf.pspeech_featurize(audiofile)
elif feature_set == 'specimage_features':
features, labels = sif.specimage_featurize(audiofile,cur_dir, haar_dir)
elif feature_set == 'specimage2_features':
features, labels = sif2.specimage2_featurize(audiofile, cur_dir, haar_dir)
elif feature_set == 'myprosody_features':
print('Myprosody features are coming soon!! Currently debugging this feature set.')
# features, labels = mpf.myprosody_featurize(audiofile, cur_dir, help_dir)
elif feature_set == 'nltk_features':
features, labels = nf.nltk_featurize(transcript)
elif feature_set == 'mixed_features':
features, labels = mixf.mixed_featurize(audiofile, transcript, help_dir)
elif feature_set == 'audiotext_features':
features, labels = atf.audiotext_featurize(audiofile, transcript)
elif feature_set == 'prosody_features':
features, labels = prosf.prosody_featurize(audiofile, 20)
elif feature_set == 'pyworld_features':
features, labels = pywf.pyworld_featurize(audiofile)
# make sure all the features do not have any infinity or NaN
features=np.nan_to_num(np.array(features))
features=features.tolist()
return features, labels
@ray.remote
def audio_parallel_featurize(ifilename, foldername, default_audio_transcriber, audio_transcribe, feature_sets):
if ifilename[-4:]=='.m4a':
os.system('ffmpeg -i %s %s'%(listdir[i], listdir[i][0:-4]+'.wav'))
filename=ifilename[0:-4]+'.wav'
os.remove(ifilename)
try:
os.chdir(foldername)
sampletype='audio'
if ifilename[0:-4]+'.json' not in listdir:
# make new .JSON if it is not there with base array schema.
basearray=make_features(sampletype)
# get the audio transcript
if audio_transcribe==True:
transcript = transcribe(filename, default_audio_transcriber)
transcript_list=basearray['transcripts']
transcript_list['audio'][default_audio_transcriber]=transcript
basearray['transcripts']=transcript_list
else:
transcript=''
# featurize the audio file
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
features, labels = audio_featurize(feature_set, filename, transcript)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
print(features)
audio_features=basearray['features']['audio']
audio_features[feature_set]=data
basearray['features']['audio']=audio_features
basearray['labels']=[labelname]
# write to .JSON
jsonfile=open(ifilename[0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
elif ifilename[0:-4]+'.json' in listdir:
# load the .JSON file if it is there
basearray=json.load(open(ifilename[0:-4]+'.json'))
transcript_list=basearray['transcripts']
# only transcribe if you need to (checks within schema)
if audio_transcribe==True and default_audio_transcriber not in list(transcript_list['audio']):
transcript = transcribe(filename, default_audio_transcriber)
transcript_list['audio'][default_audio_transcriber]=transcript
basearray['transcripts']=transcript_list
elif audio_transcribe==True and default_audio_transcriber in list(transcript_list['audio']):
transcript = transcript_list['audio'][default_audio_transcriber]
else:
transcript=''
# only re-featurize if necessary (checks if relevant feature embedding exists)
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
if feature_set not in list(basearray['features']['audio']):
features, labels = audio_featurize(feature_set, filename, transcript)
print(features)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
basearray['features']['audio'][feature_set]=data
# only add the label if necessary
label_list=basearray['labels']
if labelname not in label_list:
label_list.append(labelname)
basearray['labels']=label_list
transcript_list=basearray['transcripts']
# overwrite .JSON
jsonfile=open(ifilename[0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
except:
print('error')
def make_features(sampletype):
# only add labels when we have actual labels.
features={'audio':dict(),
'text': dict(),
'image':dict(),
'video':dict(),
'csv': dict(),
}
transcripts={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()}
data={'sampletype': sampletype,
'transcripts': transcripts,
'features': features,
'models': models,
'labels': []}
return data
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
audio_transcribe=settings['transcribe_audio']
default_audio_transcriber=settings['default_audio_transcriber']
try:
# assume 1 type of feature_set
feature_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
feature_sets=settings['default_audio_features']
# ^^ feature set set by settings.json above ^^
# here are some options below to give an example of what is possible
# feature_sets=['librosa_features']
# feature_sets=['standard_features']
# feature_sets=['audioset_features']
# feature_sets=['sox_features']
# feature_sets=['sa_features']
# feature_sets=['pyaudio_features']
# feature_sets=['spectrogram_features']
# feature_sets= ['meta_features']
# feature_sets=['praat_features']
# feature_sets=['pspeech_features']
# feature_sets=['specimage_features']
# feature_sets=['specimage2_features']
# feature_sets=['myprosody_features']
# feature_sets= ['nltk_features']
# feature_sets= ['mixed_features']
# feature_sets= ['audiotext_features']
# feature_sets=['librosa_features', 'standard_features', 'audioset_features', 'sox_features',
# 'sa_features', 'pyaudio_features', 'spectrogram_features', 'meta_features',
# 'praat_features', 'pspeech_features', 'specimage_features', 'specimage2_features',
# 'myprosody_features', 'nltk_features', 'mixed_features', 'audiotext_features']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'librosa_features' in feature_sets:
import librosa_features as lf
if 'standard_features' in feature_sets:
import standard_features as sf
if 'audioset_features' in feature_sets:
import audioset_features as af
if 'sox_features' in feature_sets:
import sox_features as soxf
if 'pyaudio_features' in feature_sets:
import pyaudio_features as pf
if 'sa_features' in feature_sets:
import sa_features as saf
if 'spectrogram_features' in feature_sets:
import spectrogram_features as specf
if 'meta_features' in feature_sets:
import meta_features as mf
if 'opensmile_features' in feature_sets:
import opensmile_features as osm
if 'praat_features' in feature_sets:
import praat_features as prf
if 'prosody_features' in feature_sets:
import prosody_features as prosf
if 'pspeech_features' in feature_sets:
import pspeech_features as psf
if 'specimage_features' in feature_sets:
import specimage_features as sif
if 'specimage2_features' in feature_sets:
import specimage2_features as sif2
if 'myprosody_features' in feature_sets:
pass
# import myprosody_features as mpf
if 'mixed_features' in feature_sets:
import mixed_features as mixf
if 'audiotext_features' in feature_sets:
import audiotext_features as atf
if 'pyworld_features' in feature_sets:
import pyworld_features as pywf
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## Now go featurize! ##
################################################
ray.init()
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.wav', '.mp3', '.m4a']:
filename=listdir[i]
audio_parallel_featurize.remote(ifilename, foldername, default_audio_transcriber, audio_transcribe, feature_sets)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/sa_features.py | features/audio_features/sa_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['sa_features']
Uses the Signal Analysis library: https://brookemosby.github.io/Signal_Analysis/Signal_Analysis.features.html#module-Signal_Analysis.features.signal
'''
import os, librosa
try:
from Signal_Analysis.features.signal import get_F_0, get_HNR, get_Jitter, get_Pulses
except:
os.system('pip3 install Signal_Analysis==0.1.26')
from Signal_Analysis.features.signal import get_F_0, get_HNR, get_Jitter, get_Pulses
import numpy as np
def sa_featurize(audiofile):
'''
from the docs
https://brookemosby.github.io/Signal_Analysis/Signal_Analysis.features.html#module-Signal_Analysis.features.signal
'''
y, sr = librosa.core.load(audiofile)
duration = len(y)/sr
print(duration)
f0=get_F_0(y,sr)[0]
hnr=get_HNR(y,sr)
jitter=get_Jitter(y,sr)
jitter_features=list(jitter.values())
jitter_labels=list(jitter)
pulses=get_Pulses(y,sr)
pulses=len(pulses) / duration
features=[f0,hnr,pulses]+jitter_features
labels=['FundamentalFrequency','HarmonicstoNoiseRatio','PulsesPerSec']+jitter_labels
print(dict(zip(labels,features)))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/spectrogram_features.py | features/audio_features/spectrogram_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['spectrogram_features']
140416 features in frequency domain for spectrograms. Resampled down to 32000 Hz and mono signal.
On Log mel spectrogram scale.
'''
import os
import librosa
import numpy as np
from sklearn.decomposition import PCA
def spectrogram_featurize(file_path):
n_fft = 1024
sr = 32000
mono = True
log_spec = False
n_mels = 128
hop_length = 192
fmax = None
if mono:
sig, sr = librosa.load(file_path, sr=sr, mono=True)
sig = sig[np.newaxis]
else:
sig, sr = librosa.load(file_path, sr=sr, mono=False)
spectrograms = []
for y in sig:
# compute stft
stft = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, win_length=None, window='hann', center=True, pad_mode='reflect')
# keep only amplitures
stft = np.abs(stft)
# log spectrogram
stft = np.log10(stft + 1)
# apply mel filterbank
spectrogram = librosa.feature.melspectrogram(S=stft, sr=sr, n_mels=n_mels, fmax=fmax)
# keep spectrogram
spectrograms.append(np.asarray(spectrogram))
labels=list()
mean_features=np.mean(np.array(spectrograms), axis=2)[0]
for i in range(len(mean_features)):
labels.append('log_spectrogram_mean_feature_%s'%(str(i+1)))
std_features=np.std(np.array(spectrograms), axis=2)[0]
for i in range(len(std_features)):
labels.append('log_spectrogram_std_feature_%s'%(str(i+1)))
# np.ndarray.flatten
features=np.append(mean_features, std_features)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/opensmile_features.py | features/audio_features/opensmile_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['opensmile_features']
Featurizes data with the OpenSMILE Toolkit: https://www.audeering.com/opensmile/
Note that this is a proprietary feature set and can be only used for research purposes.
Also note that you can specify a range of feature extractors within the script itself:
feature_extractors=['avec2013.conf', 'emobase2010.conf', 'IS10_paraling.conf', 'IS13_ComParE.conf', 'IS10_paraling_compat.conf', 'emobase.conf',
'emo_large.conf', 'IS11_speaker_state.conf', 'IS12_speaker_trait_compat.conf', 'IS09_emotion.conf', 'IS12_speaker_trait.conf',
'prosodyShsViterbiLoudness.conf', 'ComParE_2016.conf', 'GeMAPSv01a.conf']
The default setting is "GeMAPSv01a.conf" as this is the standard array used for vocal biomarker research studies.
'''
import numpy as np
import json, os, time, shutil
def parseArff(arff_file):
'''
Parses Arff File created by OpenSmile Feature Extraction
'''
f = open(arff_file,'r', encoding='utf-8')
data = []
labels = []
for line in f:
if '@attribute' in line:
temp = line.split(" ")
feature = temp[1]
labels.append(feature)
if ',' in line:
temp = line.split(",")
for item in temp:
data.append(item)
temp = arff_file.split('/')
temp = temp[-1]
data[0] = temp[:-5] + '.wav'
newdata=list()
newlabels=list()
for i in range(len(data)):
try:
newdata.append(float(data[i]))
newlabels.append(labels[i])
except:
pass
return newdata,newlabels
def opensmile_featurize(audiofile, basedir, feature_extractor):
# options
feature_extractors=['avec2013.conf', 'emobase2010.conf', 'IS10_paraling.conf', 'IS13_ComParE.conf', 'IS10_paraling_compat.conf', 'emobase.conf',
'emo_large.conf', 'IS11_speaker_state.conf', 'IS12_speaker_trait_compat.conf', 'IS09_emotion.conf', 'IS12_speaker_trait.conf',
'prosodyShsViterbiLoudness.conf', 'ComParE_2016.conf', 'GeMAPSv01a.conf']
os.rename(audiofile,audiofile.replace(' ','_'))
audiofile=audiofile.replace(' ','_')
arff_file=audiofile[0:-4]+'.arff'
curdir=os.getcwd()
opensmile_folder=basedir+'/helpers/opensmile/opensmile-2.3.0'
print(opensmile_folder)
print(feature_extractor)
print(audiofile)
print(arff_file)
if feature_extractor== 'GeMAPSv01a.conf':
command='SMILExtract -C %s/config/gemaps/%s -I %s -O %s'%(opensmile_folder, feature_extractor, audiofile, arff_file)
print(command)
os.system(command)
else:
os.system('SMILExtract -C %s/config/%s -I %s -O %s'%(opensmile_folder, feature_extractor, audiofile, arff_file))
features, labels = parseArff(arff_file)
# remove temporary arff_file
os.remove(arff_file)
os.chdir(curdir)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pause_features.py | features/audio_features/pause_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pause_features']
This extracts utterance times, pause numbers, time to first phonation,
and time to last phonation as features.
'''
import sys, os
from pyvad import vad, trim, split
import librosa
import matplotlib.pyplot as plt
import numpy as np
# make sure the right version of numba is installed
os.system('pip3 install numba==0.48')
def pause_featurize(wavfile, transcript):
'''
taken from https://github.com/F-Tag/python-vad/blob/master/example.ipynb
'''
data, fs = librosa.core.load(wavfile)
duration=librosa.get_duration(y=data, sr=fs)
time = np.linspace(0, len(data)/fs, len(data))
newdata=list()
for i in range(len(data)):
if data[i] > 1:
newdata.append(1.0)
elif data[i] < -1:
newdata.append(-1.0)
else:
newdata.append(data[i])
vact = vad(np.array(newdata), fs, fs_vad = 16000, hop_length = 30, vad_mode=2)
vact = list(vact)
while len(time) > len(vact):
vact.append(0.0)
utterances=list()
for i in range(len(vact)):
if vact[i] != vact[i-1]:
# voice shift
if vact[i] == 1:
start = i
else:
# this means it is end
end = i
try:
utterances.append([start/fs,end/fs])
except:
pass
pauses=list()
pause_lengths=list()
for i in range(len(utterances)-1):
pauses.append([utterances[i][1], utterances[i+1][0]])
pause_length=utterances[i+1][0] - utterances[i][1]
pause_lengths.append(pause_length)
# get descriptive stats of pause leengths
average_pause = np.mean(np.array(pause_lengths))
std_pause = np.std(np.array(pause_lengths))
if len(utterances) > 0:
first_phonation=utterances[0][0]
last_phonation=utterances[len(utterances)-1][1]
else:
first_phonation=0
last_phonation=0
words=transcript.lower().split()
if len(utterances)-1 != -1:
features = [utterances, pauses, len(utterances), len(pauses), average_pause, std_pause, first_phonation, last_phonation, (len(utterances)/duration)*60, (len(words)/duration)*60, duration]
else:
features = [utterances, pauses, len(utterances), 0, 0, 0, first_phonation, last_phonation, (len(utterances)/duration)*60, (len(words)/duration)*60, duration]
labels = ['UtteranceTimes', 'PauseTimes', 'UtteranceNumber', 'PauseNumber', 'AveragePauseLength', 'StdPauseLength', 'TimeToFirstPhonation','TimeToLastPhonation', 'UtterancePerMin', 'WordsPerMin', 'Duration']
# print(features)
# print(labels)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pyaudiolex_features.py | features/audio_features/pyaudiolex_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pyaudio_features']
These are the time series features of the PyAudioAnalysis library.
For more information, see the documentation: https://github.com/tyiannak/pyAudioAnalysis
'''
import argparse, json, os, sys
sys.path.append(os.getcwd()+'/helpers/pyAudioLex')
from helpers.pyAudioLex import audio_ as audio
import pandas as pd
from datetime import datetime
def pyaudiolex_featurize(filename):
# features
results = audio.audio_featurize(filename)
labels=list(results)
features=list(results.values())
# combine all features and values into proper format for Allie
new_features=list()
new_labels=list()
for i in range(len(labels)):
# print(labels[i])
for j in range(len(features[i])):
new_labels.append(labels[i]+'_window_'+str(j))
new_features.append(features[i][j])
features=new_features
labels=new_labels
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/specimage_features.py | features/audio_features/specimage_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['specimage_features']
Uses a spectrogram and features extracted from the spectrogram as feature vectors.
'''
import os, sys
import helpers.audio_plot as ap
def prev_dir(directory):
g=directory.split('/')
# print(g)
lastdir=g[len(g)-1]
i1=directory.find(lastdir)
directory=directory[0:i1]
return directory
# import to get image feature script
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/image_features')
haar_dir=prevdir+'image_features/helpers/haarcascades'
import image_features as imf
os.chdir(directory)
def specimage_featurize(wavfile, cur_dir, haar_dir):
# create temporary image
imgfile=ap.plot_spectrogram(wavfile)
features, labels=imf.image_featurize(cur_dir, haar_dir, imgfile)
# remove temporary image file
os.remove(wavfile[0:-4]+'.png')
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/audiotext_features.py | features/audio_features/audiotext_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['audiotext_features']
Featurizes data with text feautures extracted from the transcript.
These text features include nltk_features, textacy_features, spacy_features, and text_features.
'''
import librosa_features as lf
import helpers.transcribe as ts
import numpy as np
import random, math, os, sys, json, time
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/text_features')
import nltk_features as nf
import textacy_features as tfe
import spacy_features as spf
import text_features as tfea
def audiotext_featurize(wavfile, transcript):
try:
# get features
try:
nltk_features, nltk_labels=nf.nltk_featurize(transcript)
except:
nltk_labels=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'space', 'numbers', 'capletters', 'cc', 'cd', 'dt', 'ex', 'in', 'jj', 'jjr', 'jjs', 'ls', 'md', 'nn', 'nnp', 'nns', 'pdt', 'pos', 'prp', 'prp2', 'rbr', 'rbs', 'rp', 'to', 'uh', 'vb', 'vbd', 'vbg', 'vbn', 'vbp', 'vbz', 'wdt', 'wp', 'wrb', 'polarity', 'subjectivity', 'repeat']
nltk_features=list(np.zeros(len(nltk_labels)))
try:
textacy_features, textacy_labels=tfe.textacy_featurize(transcript)
except:
textacy_labels=['uniquewords', 'n_sents', 'n_words', 'n_chars', 'n_syllables', 'n_unique_words', 'n_long_words', 'n_monosyllable_words', 'n_polysyllable_words', 'flesch_kincaid_grade_level', 'flesch_kincaid_grade_level', 'flesch_reading_ease', 'smog_index', 'gunning_fog_index', 'coleman_liau_index', 'automated_readability_index', 'lix', 'gulpease_index', 'wiener_sachtextformel']
textacy_features=list(np.zeros(len(textacy_labels)))
try:
spacy_features, spacy_labels=spf.spacy_featurize(transcript)
except:
spacy_labels=['PROPN', 'ADP', 'DET', 'NUM', 'PUNCT', 'SPACE', 'VERB', 'NOUN', 'ADV', 'CCONJ', 'PRON', 'ADJ', 'SYM', 'PART', 'INTJ', 'X', 'pos_other', 'NNP', 'IN', 'DT', 'CD', 'NNPS', ',', '_SP', 'VBZ', 'NN', 'RB', 'CC', '', 'NNS', '.', 'PRP', 'MD', 'VB', 'HYPH', 'VBD', 'JJ', ':', '-LRB-', '$', '-RRB-', 'VBG', 'VBN', 'NFP', 'RBR', 'POS', 'VBP', 'RP', 'JJS', 'PRP$', 'EX', 'JJR', 'WP', 'WDT', 'TO', 'WRB', "''", '``', 'PDT', 'AFX', 'RBS', 'UH', 'WP$', 'FW', 'XX', 'SYM', 'LS', 'ADD', 'tag_other', 'compound', 'ROOT', 'prep', 'det', 'pobj', 'nummod', 'punct', '', 'nsubj', 'advmod', 'cc', 'conj', 'aux', 'dobj', 'nmod', 'acl', 'appos', 'npadvmod', 'amod', 'agent', 'case', 'intj', 'prt', 'pcomp', 'ccomp', 'attr', 'dep', 'acomp', 'poss', 'auxpass', 'expl', 'mark', 'nsubjpass', 'quantmod', 'advcl', 'relcl', 'oprd', 'neg', 'xcomp', 'csubj', 'predet', 'parataxis', 'dative', 'preconj', 'csubjpass', 'meta', 'dep_other', '\ufeffXxx', 'Xxxxx', 'XXxxx', 'xx', 'X', 'Xxxx', 'Xxx', ',', '\n\n', 'xXxxx', 'xxx', 'xxxx', '\n', '.', ' ', '-', 'xxx.xxxx.xxx', '\n\n\n', ':', '\n ', 'dddd', '[', '#', 'dd', ']', 'd', 'XXX-d', '*', 'XXXX', 'XX', 'XXX', '\n\n\n\n', 'Xx', '\n\n\n ', '--', '\n\n ', ' ', ' ', ' ', "'x", 'x', 'X.', 'xxx--', ';', 'Xxx.', '(', ')', "'", '“', '”', 'Xx.', '!', "'xx", 'xx!--Xxx', "x'xxxx", '?', '_', "x'x", "x'xx", "Xxx'xxxx", 'Xxxxx--', 'xxxx--', '--xxxx', 'X--', 'xx--', 'xxxx”--xxx', 'xxx--“xxxx', "Xxx'x", ';--', 'xxx--_xxx', "xxx'x", 'xxx!--xxxx', 'xxxx?--_Xxx', "Xxxxx'x", 'xxxx--“xxxx', "xxxx'xxx", '--Xxxxx', ',--', '?--', 'xx--“xx', 'xx!--X', '.--', 'xxx--“xxx', ':--', 'Xxxxx--“xxxx', 'xxxx!--xxxx', 'xx”--xxx', 'xxxx--_xxx', 'xxxx--“xxx', '--xx', '--X', 'xxxx!--Xxx', '--xxx', 'xxx_.', 'xxxx--_xx', 'xxxx--_xx_xxxx', 'xx!--xxxx', 'xxxx!--xx', "X'xx", "xxxx'x", "X_'x", "xxx'xxx", '--Xxxx', "X'Xxxxx", "Xx'xxxx", '--Xxx', 'xxxx”--xxxx', 'xxxx!--', 'xxxx--“x', 'Xxxx!--Xxxx', 'xxx!--Xxx', 'Xxxxx.', 'xxxx_.', 'xx--“Xxxx', '\n\n ', 'Xxxxx”--xxx', 'xxxx”--xx', 'xxxx--“xx', "Xxxxx!--Xxx'x", "X'xxxx", 'Xxxxx?--', '--Xx', 'xxxx!”--Xx', "xxxx--“X'x", "xxxx'", 'xxx.--“Xxxx', 'xxxx--“X', 'xxxx!--X', 'Xxx”--xx', 'xxx”--xxx', 'xxx-_xxx', "x'Xxxxx", 'Xxxxx!--X', 'Xxxxx!--Xxx', 'dd-d.xxx', 'xxxx://xxx.xxxx.xxx/d/dd/', 'xXxxxx', 'xxxx://xxxx.xxx/xxxx', 'd.X.', '/', 'd.X.d', 'd.X', '%', 'Xd', 'xxxx://xxx.xxxx.xxx', 'ddd(x)(d', 'X.X.', 'ddd', 'xxxx@xxxx.xxx', 'xxxx://xxxx.xxx', '$', 'd,ddd', 'shape_other', 'mean sentence polarity', 'std sentence polarity', 'max sentence polarity', 'min sentence polarity', 'median sentence polarity', 'mean sentence subjectivity', 'std sentence subjectivity', 'max sentence subjectivity', 'min sentence subjectivity', 'median sentence subjectivity', 'character count', 'word count', 'sentence number', 'words per sentence', 'unique chunk noun text', 'unique chunk root text', 'unique chunk root head text', 'chunkdep ROOT', 'chunkdep pobj', 'chunkdep nsubj', 'chunkdep dobj', 'chunkdep conj', 'chunkdep appos', 'chunkdep attr', 'chunkdep nsubjpass', 'chunkdep dative', 'chunkdep pcomp', 'number of named entities', 'PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LAW', 'LANGUAGE', 'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL']
spacy_features=list(np.zeros(len(spacy_labels)))
try:
text_features,text_labels=tfea.text_featurize(transcript)
except:
text_labels=['filler ratio', 'type token ratio', 'standardized word entropy', 'question ratio', 'number ratio', 'Brunets Index', 'Honores statistic', 'datewords freq', 'word number', 'five word count', 'max word length', 'min word length', 'variance of vocabulary', 'std of vocabulary', 'sentencenum', 'periods', 'questions', 'interjections', 'repeatavg']
text_features=list(np.zeros(len(text_labels)))
# concatenate feature arrays
features=np.append(np.array(nltk_features),np.array(textacy_features))
features=np.append(features,np.array(spacy_features))
features=np.append(features, np.array(text_features))
# concatenate labels
labels=nltk_labels+textacy_labels+spacy_labels+text_labels
except:
labels=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',
'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'space', 'numbers', 'capletters',
'cc', 'cd', 'dt', 'ex', 'in', 'jj', 'jjr', 'jjs', 'ls', 'md', 'nn', 'nnp', 'nns', 'pdt',
'pos', 'prp', 'prp2', 'rbr', 'rbs', 'rp', 'to', 'uh', 'vb', 'vbd', 'vbg', 'vbn',
'vbp', 'vbz', 'wdt', 'wp', 'wrb', 'polarity', 'subjectivity', 'repeat',
'uniquewords', 'n_sents', 'n_words', 'n_chars', 'n_syllables', 'n_unique_words',
'n_long_words', 'n_monosyllable_words', 'n_polysyllable_words', 'flesch_kincaid_grade_level',
'flesch_kincaid_grade_level', 'flesch_reading_ease', 'smog_index', 'gunning_fog_index', 'coleman_liau_index',
'automated_readability_index', 'lix', 'gulpease_index', 'wiener_sachtextformel', 'PROPN', 'ADP',
'DET', 'NUM', 'PUNCT', 'SPACE', 'VERB', 'NOUN', 'ADV', 'CCONJ', 'PRON', 'ADJ', 'SYM', 'PART',
'INTJ', 'X', 'pos_other', 'NNP', 'IN', 'DT', 'CD', 'NNPS', ',', '_SP', 'VBZ', 'NN', 'RB', 'CC',
'', 'NNS', '.', 'PRP', 'MD', 'VB', 'HYPH', 'VBD', 'JJ', ':', '-LRB-', '$', '-RRB-', 'VBG',
'VBN', 'NFP', 'RBR', 'POS', 'VBP', 'RP', 'JJS', 'PRP$', 'EX', 'JJR', 'WP', 'WDT', 'TO', 'WRB',
"''", '``', 'PDT', 'AFX', 'RBS', 'UH', 'WP$', 'FW', 'XX', 'SYM', 'LS', 'ADD', 'tag_other',
'compound', 'ROOT', 'prep', 'det', 'pobj', 'nummod', 'punct', '', 'nsubj', 'advmod', 'cc',
'conj', 'aux', 'dobj', 'nmod', 'acl', 'appos', 'npadvmod', 'amod', 'agent', 'case', 'intj',
'prt', 'pcomp', 'ccomp', 'attr', 'dep', 'acomp', 'poss', 'auxpass', 'expl', 'mark', 'nsubjpass',
'quantmod', 'advcl', 'relcl', 'oprd', 'neg', 'xcomp', 'csubj', 'predet', 'parataxis', 'dative',
'preconj', 'csubjpass', 'meta', 'dep_other', '\ufeffXxx', 'Xxxxx', 'XXxxx', 'xx', 'X', 'Xxxx', 'Xxx',
',', '\n\n', 'xXxxx', 'xxx', 'xxxx', '\n', '.', ' ', '-', 'xxx.xxxx.xxx', '\n\n\n', ':', '\n ',
'dddd', '[', '#', 'dd', ']', 'd', 'XXX-d', '*', 'XXXX', 'XX', 'XXX', '\n\n\n\n', 'Xx', '\n\n\n ',
'--', '\n\n ', ' ', ' ', ' ', "'x", 'x', 'X.', 'xxx--', ';', 'Xxx.', '(', ')', "'", '“', '”',
'Xx.', '!', "'xx", 'xx!--Xxx', "x'xxxx", '?', '_', "x'x", "x'xx", "Xxx'xxxx", 'Xxxxx--', 'xxxx--',
'--xxxx', 'X--', 'xx--', 'xxxx”--xxx', 'xxx--“xxxx', "Xxx'x", ';--', 'xxx--_xxx', "xxx'x", 'xxx!--xxxx',
'xxxx?--_Xxx', "Xxxxx'x", 'xxxx--“xxxx', "xxxx'xxx", '--Xxxxx', ',--', '?--', 'xx--“xx', 'xx!--X',
'.--', 'xxx--“xxx', ':--', 'Xxxxx--“xxxx', 'xxxx!--xxxx', 'xx”--xxx', 'xxxx--_xxx', 'xxxx--“xxx',
'--xx', '--X', 'xxxx!--Xxx', '--xxx', 'xxx_.', 'xxxx--_xx', 'xxxx--_xx_xxxx', 'xx!--xxxx',
'xxxx!--xx', "X'xx", "xxxx'x", "X_'x", "xxx'xxx", '--Xxxx', "X'Xxxxx", "Xx'xxxx", '--Xxx',
'xxxx”--xxxx', 'xxxx!--', 'xxxx--“x', 'Xxxx!--Xxxx', 'xxx!--Xxx', 'Xxxxx.', 'xxxx_.',
'xx--“Xxxx', '\n\n ', 'Xxxxx”--xxx', 'xxxx”--xx', 'xxxx--“xx', "Xxxxx!--Xxx'x", "X'xxxx",
'Xxxxx?--', '--Xx', 'xxxx!”--Xx', "xxxx--“X'x", "xxxx'", 'xxx.--“Xxxx', 'xxxx--“X', 'xxxx!--X',
'Xxx”--xx', 'xxx”--xxx', 'xxx-_xxx', "x'Xxxxx", 'Xxxxx!--X', 'Xxxxx!--Xxx', 'dd-d.xxx',
'xxxx://xxx.xxxx.xxx/d/dd/', 'xXxxxx', 'xxxx://xxxx.xxx/xxxx', 'd.X.', '/', 'd.X.d',
'd.X', '%', 'Xd', 'xxxx://xxx.xxxx.xxx', 'ddd(x)(d', 'X.X.', 'ddd', 'xxxx@xxxx.xxx',
'xxxx://xxxx.xxx', '$', 'd,ddd', 'shape_other', 'mean sentence polarity', 'std sentence polarity',
'max sentence polarity', 'min sentence polarity', 'median sentence polarity', 'mean sentence subjectivity',
'std sentence subjectivity', 'max sentence subjectivity', 'min sentence subjectivity', 'median sentence subjectivity',
'character count', 'word count', 'sentence number', 'words per sentence', 'unique chunk noun text',
'unique chunk root text', 'unique chunk root head text', 'chunkdep ROOT', 'chunkdep pobj',
'chunkdep nsubj', 'chunkdep dobj', 'chunkdep conj', 'chunkdep appos', 'chunkdep attr',
'chunkdep nsubjpass', 'chunkdep dative', 'chunkdep pcomp', 'number of named entities',
'PERSON', 'NORP', 'FAC', 'ORG', 'GPE', 'LOC', 'PRODUCT', 'EVENT', 'WORK_OF_ART', 'LAW', 'LANGUAGE',
'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL', 'filler ratio', 'type token ratio',
'standardized word entropy', 'question ratio', 'number ratio', 'Brunets Index', 'Honores statistic',
'datewords freq', 'word number', 'five word count', 'max word length', 'min word length', 'variance of vocabulary',
'std of vocabulary', 'sentencenum', 'periods', 'questions', 'interjections', 'repeatavg']
features=list(np.zeros(len(labels)))
return features, labels | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/featurize.py | features/audio_features/featurize.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
Featurize folders of audio files with the default_audio_features.
Usage: python3 featurize.py [folder] [featuretype]
All featuretype options include:
["audioset_features", "audiotext_features", "librosa_features", "meta_features",
"mixed_features", "opensmile_features", "pause_features, "praat_features", "prosody_features",
"pspeech_features", "pyaudio_features", "pyaudiolex_features", "sa_features",
"sox_features", "specimage_features", "specimage2_features", "spectrogram_features",
"speechmetrics_features", "standard_features"]
Read more @ https://github.com/jim-schwoebel/allie/tree/master/features/audio_features
'''
################################################
## IMPORT STATEMENTS ##
################################################
import json, os, sys, time, random
import numpy as np
import helpers.transcribe as ts
import speech_recognition as sr
from tqdm import tqdm
from typing import Tuple
import wave
def read_wav_file(filename) -> Tuple[bytes, int]:
# from transcription docs:
# --> https://colab.research.google.com/github/scgupta/yearn2learn/blob/master/speech/asr/python_speech_recognition_notebook.ipynb#scrollTo=Ujeuvj35Ksv8
with wave.open(filename, 'rb') as w:
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
return buffer, rate
def simulate_stream(buffer: bytes, batch_size: int = 4096):
buffer_len = len(buffer)
offset = 0
while offset < buffer_len:
end_offset = offset + batch_size
buf = buffer[offset:end_offset]
yield buf
offset = end_offset
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
# import to get image feature script
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir)
from standard_array import make_features
os.chdir(directory)
################################################
## Helper functions ##
################################################
def transcribe(file, default_audio_transcriber, settingsdir, tokenizer, wav_model, processor, hubert_model, whisper_model):
# create all transcription methods here
print('%s transcribing: %s'%(default_audio_transcriber, file))
# use the audio file as the audio source
r = sr.Recognizer()
transcript_engine = default_audio_transcriber
with sr.AudioFile(file) as source:
audio = r.record(source) # read the entire audio file
if transcript_engine == 'pocketsphinx':
# recognize speech using Sphinx
try:
transcript= r.recognize_sphinx(audio)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == "whisper":
result = whisper_model.transcribe(file)
transcript=result['text']
elif transcript_engine == 'deepspeech_nodict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm --no-check-certificate')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --audio "%s" >> "%s"'%(deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'deepspeech_dict':
curdir=os.getcwd()
os.chdir(settingsdir+'/features/audio_features/helpers')
listdir=os.listdir()
deepspeech_dir=os.getcwd()
# download models if not in helper directory
if 'deepspeech-0.7.0-models.pbmm' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.pbmm --no-check-certificate')
if 'deepspeech-0.7.0-models.scorer' not in listdir:
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.0/deepspeech-0.7.0-models.scorer --no-check-certificate')
# initialize filenames
textfile=file[0:-4]+'.txt'
newaudio=file[0:-4]+'_newaudio.wav'
if deepspeech_dir.endswith('/'):
deepspeech_dir=deepspeech_dir[0:-1]
# go back to main directory
os.chdir(curdir)
# convert audio file to 16000 Hz mono audio
os.system('ffmpeg -i "%s" -acodec pcm_s16le -ac 1 -ar 16000 "%s" -y'%(file, newaudio))
command='deepspeech --model %s/deepspeech-0.7.0-models.pbmm --scorer %s/deepspeech-0.7.0-models.scorer --audio "%s" >> "%s"'%(deepspeech_dir, deepspeech_dir, newaudio, textfile)
print(command)
os.system(command)
# get transcript
transcript=open(textfile).read().replace('\n','')
# remove temporary files
os.remove(textfile)
os.remove(newaudio)
elif transcript_engine == 'wav2vec':
# load pretrained model
audio_input, _ = sf.read(file)
# transcribe
input_values = tokenizer(audio_input, return_tensors="pt").input_values
logits = wav_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcript = tokenizer.batch_decode(predicted_ids)[0].lower()
elif transcript_engine == 'hubert':
audio_input, _ = sf.read(file)
input_values = processor(audio_input, return_tensors="pt", sampling_rate=16000).input_values # Batch size 1
logits = hubert_model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcript = processor.decode(predicted_ids[0]).lower()
elif transcript_engine == 'google':
# recognize speech using Google Speech Recognition
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
print(GOOGLE_CLOUD_SPEECH_CREDENTIALS)
try:
transcript=r.recognize_google_cloud(audio, credentials_json=open(GOOGLE_CLOUD_SPEECH_CREDENTIALS).read())
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'wit':
# recognize speech using Wit.ai
WIT_AI_KEY = os.environ['WIT_AI_KEY']
try:
transcript=r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'azure':
# https://colab.research.google.com/github/scgupta/yearn2learn/blob/master/speech/asr/python_speech_recognition_notebook.ipynb#scrollTo=IzfBW4kczY9l
"""performs continuous speech recognition with input from an audio file"""
# <SpeechContinuousRecognitionWithFile>
transcript=''
done=False
def stop_cb(evt):
print('CLOSING on {}'.format(evt))
nonlocal done
done = True
def get_val(evt):
nonlocal transcript
transcript = transcript+ ' ' +evt.result.text
return transcript
speech_config = speechsdk.SpeechConfig(subscription=os.environ['AZURE_SPEECH_KEY'], region=os.environ['AZURE_REGION'])
speech_config.speech_recognition_language=os.environ['AZURE_SPEECH_RECOGNITION_LANGUAGE']
audio_config = speechsdk.audio.AudioConfig(filename=file)
speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config)
stream = speechsdk.audio.PushAudioInputStream()
# Connect callbacks to the events fired by the speech recognizer
speech_recognizer.recognizing.connect(lambda evt: print('interim text: "{}"'.format(evt.result.text)))
speech_recognizer.recognized.connect(lambda evt: print('azure-streaming-stt: "{}"'.format(get_val(evt))))
speech_recognizer.session_stopped.connect(lambda evt: print('SESSION STOPPED {}'.format(evt)))
speech_recognizer.canceled.connect(lambda evt: print('CANCELED {}'.format(evt)))
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# start continuous speech recognition
speech_recognizer.start_continuous_recognition()
# push buffer chunks to stream
buffer, rate = read_wav_file(file)
audio_generator = simulate_stream(buffer)
for chunk in audio_generator:
stream.write(chunk)
time.sleep(0.1) # to give callback a chance against this fast loop
# stop continuous speech recognition
stream.close()
while not done:
time.sleep(0.5)
speech_recognizer.stop_continuous_recognition()
time.sleep(0.5) # Let all callback run
elif transcript_engine == 'bing':
# recognize speech using Microsoft Bing Voice Recognition
BING_KEY = os.environ['BING_KEY']
try:
transcript=r.recognize_bing(audio, key=BING_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'houndify':
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = os.environ['HOUNDIFY_CLIENT_ID']
HOUNDIFY_CLIENT_KEY = os.environ['HOUNDIFY_CLIENT_KEY']
try:
transcript=r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID, client_key=HOUNDIFY_CLIENT_KEY)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
elif transcript_engine == 'ibm':
# recognize speech using IBM Speech to Text
IBM_USERNAME = os.environ['IBM_USERNAME']
IBM_PASSWORD = os.environ['IBM_PASSWORD']
try:
transcript=r.recognize_ibm(audio, username=IBM_USERNAME, password=IBM_PASSWORD)
except sr.UnknownValueError:
transcript=''
except sr.RequestError as e:
transcript=''
else:
print('no transcription engine specified')
transcript=''
# show transcript
print(transcript_engine.upper())
print('--> '+ transcript)
return transcript
def audio_featurize(feature_set, audiofile, transcript, hubert_processor, hubert_model):
# long conditional on all the types of features that can happen and featurizes accordingly.
if feature_set == 'allosaurus_features':
features, labels = allosaurus_features.allosaurus_featurize(audiofile)
elif feature_set == 'audioset_features':
features, labels = audioset_features.audioset_featurize(audiofile, basedir, foldername)
elif feature_set == 'audiotext_features':
features, labels = audiotext_features.audiotext_featurize(audiofile, transcript)
elif feature_set == 'hubert_features':
features, labels = hubert_features.hubert_featurize(audiofile, hubert_model, hubert_processor, 500)
elif feature_set == 'librosa_features':
features, labels = librosa_features.librosa_featurize(audiofile, False)
elif feature_set == 'loudness_features':
features, labels = loudness_features.loudness_featurize(audiofile)
elif feature_set == 'meta_features':
features, labels = meta_features.meta_featurize(audiofile, cur_dir, help_dir)
elif feature_set == 'mixed_features':
features, labels = mixed_features.mixed_featurize(audiofile, transcript, help_dir)
elif feature_set == 'myprosody_features':
print('Myprosody features are coming soon!! Currently debugging this feature set.')
# features, labels = myprosody_features.myprosody_featurize(audiofile, cur_dir, help_dir)
elif feature_set == 'multispeaker_features':
features, labels = multispeaker_features.multispeaker_featurize(audiofile)
elif feature_set == 'nltk_features':
features, labels = nltk_features.nltk_featurize(transcript)
elif feature_set == 'opensmile_features':
features, labels = opensmile_features.opensmile_featurize(audiofile, basedir, 'GeMAPSv01a.conf')
elif feature_set == 'pause_features':
features, labels = pause_features.pause_featurize(audiofile, transcript)
elif feature_set == 'praat_features':
features, labels = praat_features.praat_featurize(audiofile)
elif feature_set == 'prosody_features':
features, labels = prosody_features.prosody_featurize(audiofile, 20)
elif feature_set == 'pspeech_features':
features, labels = pspeech_features.pspeech_featurize(audiofile)
elif feature_set == 'pspeechtime_features':
features, labels = pspeechtime_features.pspeech_featurize(audiofile)
elif feature_set == 'pyaudio_features':
features, labels = pyaudio_features.pyaudio_featurize(audiofile, basedir)
elif feature_set == 'pyaudiolex_features':
features, labels = pyaudiolex_features.pyaudiolex_featurize(audiofile)
elif feature_set == 'pyworld_features':
features, labels = pyworld_features.pyworld_featurize(audiofile)
elif feature_set == 'sa_features':
features, labels = sa_features.sa_featurize(audiofile)
elif feature_set == 'sox_features':
features, labels = sox_features.sox_featurize(audiofile)
elif feature_set == 'specimage_features':
features, labels = specimage_features.specimage_featurize(audiofile,cur_dir, haar_dir)
elif feature_set == 'specimage2_features':
features, labels = specimage2_features.specimage2_featurize(audiofile, cur_dir, haar_dir)
elif feature_set == 'spectrogram_features':
features, labels= spectrogram_features.spectrogram_featurize(audiofile)
elif feature_set == 'speechmetrics_features':
features, labels=speechmetrics_features.speechmetrics_featurize(audiofile)
elif feature_set == 'standard_features':
features, labels = standard_features.standard_featurize(audiofile)
elif feature_set == 'surfboard_features':
features, labels = surfboard_features.surfboard_featurize(audiofile, help_dir)
elif feature_set == 'voxceleb_features':
features, labels = voxceleb_features.voxceleb_featurize(audiofile)
elif feature_set == 'yamnet_features':
features, labels = yamnet_features.yamnet_featurize(audiofile, help_dir)
# make sure all the features do not have any infinity or NaN
features=np.nan_to_num(np.array(features))
features=features.tolist()
return features, labels
################################################
## Load main settings ##
################################################
# directory=sys.argv[1]
basedir=os.getcwd()
settingsdir=prev_dir(basedir)
settingsdir=prev_dir(settingsdir)
settings=json.load(open(settingsdir+'/settings.json'))
os.chdir(basedir)
audio_transcribe=settings['transcribe_audio']
default_audio_transcribers=settings['default_audio_transcriber']
try:
# assume 1 type of feature_set
feature_sets=[sys.argv[2]]
except:
# if none provided in command line, then load deafult features
feature_sets=settings['default_audio_features']
################################################
## Import According to settings ##
################################################
# only load the relevant featuresets for featurization to save memory
if 'allosaurus_features' in feature_sets:
import allosaurus_features
if 'audioset_features' in feature_sets:
import audioset_features
if 'audiotext_features' in feature_sets:
import audiotext_features
if 'hubert_features' in feature_sets:
import hubert_features
import torch
from transformers import HubertModel, HubertConfig
from transformers import Wav2Vec2Processor, HubertForCTC
import soundfile as sf
hubert_processor_ = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft")
hubert_model_ = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
else:
hubert_model_ = ''
hubert_processor_ =''
if 'librosa_features' in feature_sets:
import librosa_features
if 'loudness_features' in feature_sets:
import loudness_features
if 'meta_features' in feature_sets:
import meta_features
os.system('pip3 install scikit-learn==0.19.1')
if 'mixed_features' in feature_sets:
import mixed_features
if 'multispeaker_features' in feature_sets:
import multispeaker_features
if 'myprosody_features' in feature_sets:
pass
# import myprosody_features as mpf
if 'opensmile_features' in feature_sets:
import opensmile_features
if 'pause_features' in feature_sets:
import pause_features
if 'pyaudio_features' in feature_sets:
import pyaudio_features
if 'pyaudiolex_features' in feature_sets:
import pyaudiolex_features
if 'praat_features' in feature_sets:
import praat_features
if 'prosody_features' in feature_sets:
import prosody_features
if 'pspeech_features' in feature_sets:
import pspeech_features
if 'pspeechtime_features' in feature_sets:
import pspeechtime_features
if 'pyworld_features' in feature_sets:
import pyworld_features
if 'sa_features' in feature_sets:
import sa_features
if 'sox_features' in feature_sets:
import sox_features
if 'specimage_features' in feature_sets:
import specimage_features
if 'specimage2_features' in feature_sets:
import specimage2_features
if 'spectrogram_features' in feature_sets:
import spectrogram_features
if 'speechmetrics_features' in feature_sets:
import speechmetrics_features
if 'standard_features' in feature_sets:
import standard_features
if 'surfboard_features' in feature_sets:
import surfboard_features
if 'voxceleb_features' in feature_sets:
import voxceleb_features
if 'yamnet_features' in feature_sets:
import yamnet_features
# transcription imports
if 'azure' in default_audio_transcribers:
import azure.cognitiveservices.speech as speechsdk
if 'wav2vec' in default_audio_transcribers:
import os, pandas as pd, soundfile as sf, torch, glob
from pathlib import Path
from transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer
tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
wav_model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-base-960h")
else:
tokenizer=''
wav_model=''
if 'hubert' in default_audio_transcribers and 'hubert_features' not in feature_sets:
import torch
from transformers import HubertModel, HubertConfig
from transformers import Wav2Vec2Processor, HubertForCTC
import soundfile as sf
# Hubert transcript
hubert_processor_ = Wav2Vec2Processor.from_pretrained("facebook/hubert-large-ls960-ft")
hubert_model_ = HubertForCTC.from_pretrained("facebook/hubert-large-ls960-ft")
elif 'whisper' in default_audio_transcribers:
import whisper
whisper_model_ = whisper.load_model("medium")
else:
hubert_processor_=''
hubert_model_=''
whisper_model_=''
################################################
## Get featurization folder ##
################################################
foldername=sys.argv[1]
os.chdir(foldername)
listdir=os.listdir()
random.shuffle(listdir)
cur_dir=os.getcwd()
help_dir=basedir+'/helpers/'
# get class label from folder name
labelname=foldername.split('/')
if labelname[-1]=='':
labelname=labelname[-2]
else:
labelname=labelname[-1]
################################################
## Now go featurize! ##
################################################
# featurize all files accoridng to librosa featurize
for i in tqdm(range(len(listdir)), desc=labelname):
if listdir[i][-4:] in ['.wav', '.mp3', '.m4a']:
filename=listdir[i]
if listdir[i][-4:]=='.m4a':
os.system('ffmpeg -i %s %s'%(listdir[i], listdir[i][0:-4]+'.wav'))
filename=listdir[i][0:-4]+'.wav'
os.remove(listdir[i])
try:
os.chdir(foldername)
sampletype='audio'
if listdir[i][0:-4]+'.json' not in listdir:
# make new .JSON if it is not there with base array schema.
basearray=make_features(sampletype)
# get the first audio transcriber and loop through transcript list
if audio_transcribe==True:
for j in range(len(default_audio_transcribers)):
default_audio_transcriber=default_audio_transcribers[j]
transcript = transcribe(filename, default_audio_transcriber, settingsdir, tokenizer, wav_model, hubert_processor_, hubert_model_, whisper_model_)
transcript_list=basearray['transcripts']
transcript_list['audio'][default_audio_transcriber]=transcript
basearray['transcripts']=transcript_list
else:
transcript=''
# featurize the audio file
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
features, labels = audio_featurize(feature_set, filename, transcript, hubert_processor_, hubert_model_)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
# print(features)
audio_features=basearray['features']['audio']
audio_features[feature_set]=data
basearray['features']['audio']=audio_features
basearray['labels']=[labelname]
# write to .JSON
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
elif listdir[i][0:-4]+'.json' in listdir:
# load the .JSON file if it is there
basearray=json.load(open(listdir[i][0:-4]+'.json'))
transcript_list=basearray['transcripts']
# only transcribe if you need to (checks within schema)
if audio_transcribe==True:
for j in range(len(default_audio_transcribers)):
# get the first audio transcriber and loop through transcript list
default_audio_transcriber=default_audio_transcribers[j]
if audio_transcribe==True and default_audio_transcriber not in list(transcript_list['audio']):
transcript = transcribe(filename, default_audio_transcriber, settingsdir, tokenizer, wav_model, hubert_processor_, hubert_model_, whisper_model_)
transcript_list['audio'][default_audio_transcriber]=transcript
basearray['transcripts']=transcript_list
elif audio_transcribe==True and default_audio_transcriber in list(transcript_list['audio']):
transcript = transcript_list['audio'][default_audio_transcriber]
else:
transcript=''
else:
transcript=''
# only re-featurize if necessary (checks if relevant feature embedding exists)
for j in range(len(feature_sets)):
feature_set=feature_sets[j]
if feature_set not in list(basearray['features']['audio']):
features, labels = audio_featurize(feature_set, filename, transcript, hubert_processor_, hubert_model_)
# print(features)
try:
data={'features':features.tolist(),
'labels': labels}
except:
data={'features':features,
'labels': labels}
basearray['features']['audio'][feature_set]=data
# only add the label if necessary
label_list=basearray['labels']
if labelname not in label_list:
label_list.append(labelname)
basearray['labels']=label_list
transcript_list=basearray['transcripts']
# overwrite .JSON
jsonfile=open(listdir[i][0:-4]+'.json','w')
json.dump(basearray, jsonfile)
jsonfile.close()
except:
print('error')
# now reload the old scikit-learn
if 'meta_features' in feature_sets:
import meta_features as mf
os.system('pip3 install scikit-learn==0.22.2.post1')
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/voxceleb_features.py | features/audio_features/voxceleb_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['voxceleb_features']
This helps to detect speaker identities.
'''
import os
import torchaudio
import numpy as np
from speechbrain.pretrained import EncoderClassifier
classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
def remove_empty_strings(input_list):
while '' in input_list:
input_list.remove('')
return input_list
def detect_speaker(wavfile_1: str, wavfile_2: str):
# classifier = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb")
# signal, fs =torchaudio.load(wavfile_1)
# embeddings = classifier.encode_batch(signal)
# print(embeddings)
verification = SpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="pretrained_models/spkrec-ecapa-voxceleb")
score, prediction = verification.verify_files(wavfile_1, wavfile_2)
print(score)
print(prediction)
return score, prediction
def voxceleb_featurize(wavfile, classifier=classifier):
signal, fs =torchaudio.load(wavfile)
features = classifier.encode_batch(signal).flatten()
labels=[]
for i in range(len(features)):
labels.append('voxceleb_'+str(i))
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/meta_features.py | features/audio_features/meta_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['meta_features']
Extracts meta features from models trained on the audioset dataset. This is useful for
detecting a wide array of acoustic events.
For more information, check out https://github.com/jim-schwoebel/audioset_models
'''
import numpy as np
import librosa, pickle, time
from pydub import AudioSegment
import os, random, json
def featurize(wavfile):
#initialize features
hop_length = 512
n_fft=2048
#load file
y, sr = librosa.load(wavfile)
#extract mfcc coefficients
mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=hop_length, n_mfcc=13)
mfcc_delta = librosa.feature.delta(mfcc)
#extract mean, standard deviation, min, and max value in mfcc frame, do this across all mfccs
mfcc_features=np.array([np.mean(mfcc[0]),np.std(mfcc[0]),np.amin(mfcc[0]),np.amax(mfcc[0]),
np.mean(mfcc[1]),np.std(mfcc[1]),np.amin(mfcc[1]),np.amax(mfcc[1]),
np.mean(mfcc[2]),np.std(mfcc[2]),np.amin(mfcc[2]),np.amax(mfcc[2]),
np.mean(mfcc[3]),np.std(mfcc[3]),np.amin(mfcc[3]),np.amax(mfcc[3]),
np.mean(mfcc[4]),np.std(mfcc[4]),np.amin(mfcc[4]),np.amax(mfcc[4]),
np.mean(mfcc[5]),np.std(mfcc[5]),np.amin(mfcc[5]),np.amax(mfcc[5]),
np.mean(mfcc[6]),np.std(mfcc[6]),np.amin(mfcc[6]),np.amax(mfcc[6]),
np.mean(mfcc[7]),np.std(mfcc[7]),np.amin(mfcc[7]),np.amax(mfcc[7]),
np.mean(mfcc[8]),np.std(mfcc[8]),np.amin(mfcc[8]),np.amax(mfcc[8]),
np.mean(mfcc[9]),np.std(mfcc[9]),np.amin(mfcc[9]),np.amax(mfcc[9]),
np.mean(mfcc[10]),np.std(mfcc[10]),np.amin(mfcc[10]),np.amax(mfcc[10]),
np.mean(mfcc[11]),np.std(mfcc[11]),np.amin(mfcc[11]),np.amax(mfcc[11]),
np.mean(mfcc[12]),np.std(mfcc[12]),np.amin(mfcc[12]),np.amax(mfcc[12]),
np.mean(mfcc_delta[0]),np.std(mfcc_delta[0]),np.amin(mfcc_delta[0]),np.amax(mfcc_delta[0]),
np.mean(mfcc_delta[1]),np.std(mfcc_delta[1]),np.amin(mfcc_delta[1]),np.amax(mfcc_delta[1]),
np.mean(mfcc_delta[2]),np.std(mfcc_delta[2]),np.amin(mfcc_delta[2]),np.amax(mfcc_delta[2]),
np.mean(mfcc_delta[3]),np.std(mfcc_delta[3]),np.amin(mfcc_delta[3]),np.amax(mfcc_delta[3]),
np.mean(mfcc_delta[4]),np.std(mfcc_delta[4]),np.amin(mfcc_delta[4]),np.amax(mfcc_delta[4]),
np.mean(mfcc_delta[5]),np.std(mfcc_delta[5]),np.amin(mfcc_delta[5]),np.amax(mfcc_delta[5]),
np.mean(mfcc_delta[6]),np.std(mfcc_delta[6]),np.amin(mfcc_delta[6]),np.amax(mfcc_delta[6]),
np.mean(mfcc_delta[7]),np.std(mfcc_delta[7]),np.amin(mfcc_delta[7]),np.amax(mfcc_delta[7]),
np.mean(mfcc_delta[8]),np.std(mfcc_delta[8]),np.amin(mfcc_delta[8]),np.amax(mfcc_delta[8]),
np.mean(mfcc_delta[9]),np.std(mfcc_delta[9]),np.amin(mfcc_delta[9]),np.amax(mfcc_delta[9]),
np.mean(mfcc_delta[10]),np.std(mfcc_delta[10]),np.amin(mfcc_delta[10]),np.amax(mfcc_delta[10]),
np.mean(mfcc_delta[11]),np.std(mfcc_delta[11]),np.amin(mfcc_delta[11]),np.amax(mfcc_delta[11]),
np.mean(mfcc_delta[12]),np.std(mfcc_delta[12]),np.amin(mfcc_delta[12]),np.amax(mfcc_delta[12])])
return mfcc_features
def exportfile(newAudio,time1,time2,filename,i):
#Exports to a wav file in the current path.
newAudio2 = newAudio[time1:time2]
g=os.listdir()
if filename[0:-4]+'_'+str(i)+'.wav' in g:
filename2=str(i)+'_segment'+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2,format="wav")
else:
filename2=str(i)+'.wav'
print('making %s'%(filename2))
newAudio2.export(filename2, format="wav")
return filename2
def audio_time_features(filename):
#recommend >0.50 seconds for timesplit
timesplit=0.50
hop_length = 512
n_fft=2048
y, sr = librosa.load(filename)
duration=float(librosa.core.get_duration(y))
#Now splice an audio signal into individual elements of 100 ms and extract
#all these features per 100 ms
segnum=round(duration/timesplit)
deltat=duration/segnum
timesegment=list()
time=0
for i in range(segnum):
#milliseconds
timesegment.append(time)
time=time+deltat*1000
newAudio = AudioSegment.from_wav(filename)
filelist=list()
for i in range(len(timesegment)-1):
filename=exportfile(newAudio,timesegment[i],timesegment[i+1],filename,i)
filelist.append(filename)
featureslist=np.array([0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0])
#save 100 ms segments in current folder (delete them after)
for j in range(len(filelist)):
try:
features=featurize(filelist[i])
featureslist=featureslist+features
os.remove(filelist[j])
except:
print('error splicing')
featureslist.append('silence')
os.remove(filelist[j])
#now scale the featureslist array by the length to get mean in each category
featureslist=featureslist/segnum
return featureslist
def convert(file):
if file[-4:] != '.wav':
filename=file[0:-4]+'.wav'
os.system('ffmpeg -i %s -an %s'%(file,filename))
os.remove(file)
elif file[-4:] == '.wav':
filename=file
else:
filename=file
os.remove(file)
return filename
def meta_featurize(filename, file_dir, help_dir):
os.chdir(help_dir)
model_dir=os.getcwd()+'/models'
model_list=list()
os.chdir(model_dir)
listdir=os.listdir()
for i in range(len(listdir)):
if listdir[i][-7:]=='.pickle':
model_list.append(listdir[i])
count=0
errorcount=0
os.chdir(file_dir)
features=featurize(filename)
features=features.reshape(1,-1)
os.chdir(model_dir)
outputs=list()
class_list=list()
model_acc=list()
deviations=list()
modeltypes=list()
for j in range(len(model_list)):
try:
modelname=model_list[j]
i1=modelname.find('_')
name1=modelname[0:i1]
i2=modelname[i1:]
i3=i2.find('_')
name2=i2[0:i3]
loadmodel=open(modelname, 'rb')
model = pickle.load(loadmodel)
loadmodel.close()
output=str(model.predict(features)[0])
classname=output
class_list.append(classname)
except:
modelname=model_list[j]
class_list.append('error')
features=list()
for i in range(len(class_list)):
if class_list[i].find('controls')>0:
features.append(0)
else:
features.append(1)
features=np.array(features)
labels=class_list
os.chdir(file_dir)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/pyworld_features.py | features/audio_features/pyworld_features.py | '''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['pyworld_features']
Extracts a range of pitch-based features.
For more information, check out hte documentation: https://github.com/JeremyCCHsu/Python-Wrapper-for-World-Vocoder
'''
import os, sys
import scipy.io.wavfile as wav
import numpy as np
# adding this in because some installations may not have pyworld features installed
try:
import pyworld as pw
except:
os.system('pip3 install pyworld==0.2.10')
import pyworld as pw
import numpy as np
# get statistical features in numpy
def stats(matrix, label):
labels=list()
# extract features
if label in ['smoothed_spectrogram', 'aperiodicity']:
mean=np.mean(matrix, axis=0)
for i in range(len(mean)):
labels.append(label+'_mean_'+str(i))
std=np.std(matrix, axis=0)
for i in range(len(std)):
labels.append(label+'_std_'+str(i))
maxv=np.amax(matrix, axis=0)
for i in range(len(maxv)):
labels.append(label+'_max_'+str(i))
minv=np.amin(matrix, axis=0)
for i in range(len(minv)):
labels.append(label+'_min_'+str(i))
median=np.median(matrix, axis=0)
for i in range(len(median)):
labels.append(label+'_median_'+str(i))
else:
mean=np.mean(matrix)
std=np.std(matrix)
maxv=np.amax(matrix)
minv=np.amin(matrix)
median=np.median(matrix)
labels=[label+'_mean',label+'_std',label+'_max'+label+'_min',label, '_median']
features=np.append(mean, std)
features=np.append(features, maxv)
features=np.append(features, minv)
features=np.append(features, median)
return features, labels
def pyworld_featurize(audiofile):
fs, x = wav.read(audiofile)
print(x)
print(fs)
# corrects for 2 channel audio
try:
x= x[:,0]
except:
pass
x=np.array(np.ascontiguousarray(x), dtype=np.double)
print(fs)
print(x)
_f0, t = pw.dio(x, fs) # raw pitch extractor
f0 = pw.stonemask(x, _f0, t, fs) # pitch refinement
sp = pw.cheaptrick(x, f0, t, fs) # extract smoothed spectrogram
ap = pw.d4c(x, f0, t, fs) # extract aperiodicity)
features_0, labels_0 = stats(_f0, 'pitch')
features_1, labels_1 = stats(_f0, 'pitch_refinement')
features_2, labels_2 = stats(sp, 'smoothed_spectrogram')
features_3, labels_3 = stats(ap, 'aperiodicity')
features_0=list(features_0)
features_1=list(features_1)
features_2=list(features_2)
features_3=list(features_3)
features=features_0+features_1+features_2+features_3
labels=labels_0+labels_1+labels_2+labels_3
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/transcribe.py | features/audio_features/helpers/transcribe.py | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## TRANSCRIBE.PY ##
================================================
Overview of how to implement various transcriptions for offline or
online applications.
Note some of these transcription methods require environment variables
to be setup (e.g. Google).
'''
import os, json, time, datetime
import speech_recognition as sr_audio
def sync_record(filename, duration, fs, channels):
print('recording')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
sf.write(filename, myrecording, fs)
print('done recording')
def convert_audio(file):
# convert to proper format with FFmpeg shell script
filename=file[0:-4]+'_temp.wav'
command='ffmpeg -i %s -acodec pcm_s16le -ac 1 -ar 16000 %s'%(file,filename)
os.system(command)
return filename
def transcribe_google(file):
# transcribe with google speech API, $0.024/minute
r=sr_audio.Recognizer()
with sr_audio.AudioFile(file) as source:
audio = r.record(source)
transcript=r.recognize_google_cloud(audio)
print('google transcript: '+transcript)
return transcript
# transcribe with pocketsphinx (open-source)
def transcribe_sphinx(file):
r=sr_audio.Recognizer()
with sr_audio.AudioFile(file) as source:
audio = r.record(source)
transcript=r.recognize_sphinx(audio)
print('sphinx transcript: '+transcript)
return transcript
# transcribe with deepspeech (open-source, but can be CPU-intensive)
def transcribe_deepspeech(file):
# get the deepspeech model installed if you don't already have it (1.6 GB model)
# can be computationally-intensive, so make sure it works on your CPU
if 'models' not in os.listdir():
os.system('brew install wget')
os.system('pip3 install deepspeech')
os.system('wget https://github.com/mozilla/DeepSpeech/releases/download/v0.1.1/deepspeech-0.1.1-models.tar.gz')
os.system('tar -xvzf deepspeech-0.1.1-models.tar.gz')
# make intermediate text file and fetch transcript
textfile=file[0:-4]+'.txt'
command='deepspeech models/output_graph.pb %s models/alphabet.txt models/lm.binary models/trie >> %s'%(file,textfile)
os.system(command)
transcript=open(textfile).read()
print('deepspeech transcript: '+transcript)
# remove text file
os.remove(textfile)
return transcript
def transcribe_all(file):
# get transcripts from all methods and store in .json file
filename=convert_audio(file)
try:
google_transcript=transcribe_google(filename)
except:
google_transcript=''
try:
sphinx_transcript=transcribe_sphinx(filename)
except:
sphinx_transcript=''
try:
deepspeech_transcript=transcribe_deepspeech(filename)
except:
deepspeech_transcript=''
os.remove(filename)
# write to .json
jsonfilename=file[0:-4]+'.json'
jsonfile=open(jsonfilename,'w')
data={
'filename': file,
'date': str(datetime.datetime.now()),
'transcripts': {
'google':google_transcript,
'sphinx':sphinx_transcript,
'deepspeech':deepspeech_transcript}
}
json.dump(data,jsonfile)
return jsonfilename
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/audio_plot.py | features/audio_features/helpers/audio_plot.py | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## AUDIO_PLOT.PY ##
================================================
A simple function to plot a spectrogram with librosa.
'''
import librosa, os
import matplotlib.pyplot as plt
import numpy as np
import librosa.display
from PIL import Image
# now begin plotting linear-frequency power spectrum
def plot_spectrogram(filename):
y, sr = librosa.load(filename)
plt.figure(figsize=(12, 8))
D = librosa.amplitude_to_db(librosa.stft(y), ref=np.max)
plt.subplot(4, 2, 1)
librosa.display.specshow(D, y_axis='linear')
plt.colorbar(format='%+2.0f dB')
plt.title('Linear-frequency power spectrogram')
# on logarithmic scale
plt.subplot(4, 2, 2)
librosa.display.specshow(D, y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Log-frequency power spectrogram')
# Or use a CQT scale
CQT = librosa.amplitude_to_db(librosa.cqt(y, sr=sr), ref=np.max)
plt.subplot(4, 2, 3)
librosa.display.specshow(CQT, y_axis='cqt_note')
plt.colorbar(format='%+2.0f dB')
plt.title('Constant-Q power spectrogram (note)')
plt.subplot(4, 2, 4)
librosa.display.specshow(CQT, y_axis='cqt_hz')
plt.colorbar(format='%+2.0f dB')
plt.title('Constant-Q power spectrogram (Hz)')
# Draw a chromagram with pitch classes
C = librosa.feature.chroma_cqt(y=y, sr=sr)
plt.subplot(4, 2, 5)
librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
# Force a grayscale colormap (white -> black)
plt.subplot(4, 2, 6)
librosa.display.specshow(D, cmap='gray_r', y_axis='linear')
plt.colorbar(format='%+2.0f dB')
plt.title('Linear power spectrogram (grayscale)')
# Draw time markers automatically
plt.subplot(4, 2, 7)
librosa.display.specshow(D, x_axis='time', y_axis='log')
plt.colorbar(format='%+2.0f dB')
plt.title('Log power spectrogram')
# Draw a tempogram with BPM markers
plt.subplot(4, 2, 8)
Tgram = librosa.feature.tempogram(y=y, sr=sr)
librosa.display.specshow(Tgram, x_axis='time', y_axis='tempo')
plt.colorbar()
plt.title('Tempogram')
plt.tight_layout()
# image file save
imgfile=filename[0:-4]+'.png'
plt.savefig(imgfile)
img = Image.open(imgfile).convert('LA')
img.save(imgfile)
# os.system('open %s'%(imgfile))
return imgfile | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyaudio_help.py | features/audio_features/helpers/pyaudio_help.py | # import required modules
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import sys,json, os
import numpy as np
def stats(matrix):
mean=np.mean(matrix)
std=np.std(matrix)
maxv=np.amax(matrix)
minv=np.amin(matrix)
median=np.median(matrix)
output=np.array([mean,std,maxv,minv,median])
return output
def convert_mono(filename):
mono=filename[0:-4]+'_mono.wav'
os.system('ffmpeg -i %s -ac 1 %s'%(filename,mono))
return mono
filename=sys.argv[1]
print(filename)
mono=convert_mono(filename)
[Fs, x] = audioBasicIO.readAudioFile(mono)
features, labels= audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs)
new_features=list()
new_labels=list()
for i in range(len(features)):
tfeatures=stats(features[i])
new_features=np.append(new_features,tfeatures)
new_labels.append('mean '+labels[i])
new_labels.append('std '+labels[i])
new_labels.append('max '+labels[i])
new_labels.append('min '+labels[i])
new_labels.append('median '+labels[i])
os.remove(mono)
os.remove(filename)
data={'features': new_features.tolist(),
'labels': new_labels}
jsonfile=open(filename[0:-4]+'.json','w')
json.dump(data,jsonfile)
jsonfile.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/sonopy_features.py | features/audio_features/helpers/sonopy_features.py | import numpy as np
from sonopy import power_spec, mel_spec, mfcc_spec, filterbanks
from scipy.io import wavfile
sr, audio = wavfile.read('test.wav')
# powers = power_spec(audio, window_stride=(100, 50), fft_size=512)
# mels = mel_spec(audio, sr, window_stride=(1600, 800), fft_size=1024, num_filt=30)
mfccs = mfcc_spec(audio, sr, window_stride=(160, 80), fft_size=512, num_filt=20, num_coeffs=13)
print(mfccs)
# filters = filterbanks(16000, 20, 257) # Probably not ever useful
# powers, filters, mels, mfccs = mfcc_spec(audio, sr, return_parts=True)
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/make_mixed_features.py | features/audio_features/helpers/make_mixed_features.py | '''
Create a random set of features.
'''
import librosa_features as lf
import helpers.transcribe as ts
import random, math, os, sys, json
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
directory=os.getcwd()
prevdir=prev_dir(directory)
sys.path.append(prevdir+'/text_features')
import nltk_features as nf
# get features
librosa_features, librosa_labels=lf.librosa_featurize('test.wav', False)
transcript=ts.transcribe_sphinx('test.wav')
nltk_features, nltk_labels=nf.nltk_featurize(transcript)
# relate some features to each other
# engineer 10 random features by dividing them and making new labels
mixed_features=list()
mixed_labels=list()
mixed_inds=list()
for i in range(5):
while len(mixed_labels) < 100:
# get some random features from both text and audio
i1=random.randint(0,len(librosa_features)-1)
label_1=librosa_labels[i1]
feature_1=librosa_features[i1]
i2=random.randint(0,len(nltk_features)-1)
label_2=nltk_labels[i2]
feature_2=nltk_features[i2]
# make new feature from labels
mixed_feature=feature_2/feature_1
if mixed_feature != 0.0 and math.isnan(mixed_feature) == False and math.isinf(abs(mixed_feature)) == False:
# make new label
mixed_label=label_2+' (nltk) ' + '| / | '+label_1 + ' (librosa)'
print(mixed_label)
mixed_labels.append(mixed_label)
print(mixed_feature)
mixed_features.append(mixed_feature)
mixed_inds.append([i2,i1])
data={'labels': mixed_labels,
'mixed_inds': mixed_inds,
'first_ind': 'nltk_features',
'second_ind':'librosa_features'}
jsonfile=open('mixed_feature_%s.json'%(str(i)),'w')
json.dump(data,jsonfile)
jsonfile.close()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/myprosody_features.py | features/audio_features/helpers/myprosody_features.py | '''
Taken from https://github.com/Shahabks/myprosody/blob/master/EXAMPLES.pdf
'''
import parselmouth
from parselmouth.praat import call, run_file
import numpy as np
import os, sys, shutil
def prev_dir(directory):
g=directory.split('/')
# print(g)
lastdir=g[len(g)-1]
i1=directory.find(lastdir)
directory=directory[0:i1]
return directory
def myprosody_featurize(wavfile, cur_dir, help_dir):
sound=cur_dir+'/'+wavfile
path=cur_dir
sourcerun=help_dir+'/myprosody/myprosody/dataset/essen/myspsolution.praat'
objects= run_file(sourcerun, -20, 2, 0.3, "yes",sound,path, 80, 400, 0.01, capture_output=True)
print(objects[0]) # This will print the info from the sound object, and objects[0] is a parselmouth.Sound object
z1=str(objects[1]) # This will print the info from the textgrid object, and objects[1] is a parselmouth.Data object with a TextGrid inside
z2=z1.strip().split()
z3=np.array(z2)
z4=np.array(z3)[np.newaxis]
z5=z4.T
try:
syllables=float(z5[0,:])
except:
syllables=0
try:
pauses=float(z5[1,:])
except:
pauses=0
try:
rate=float(z5[2,:])
except:
rate=0
try:
articulation=float(z5[3,:])
except:
articulation=0
try:
speak_duration=float(z5[4,:])
except:
speak_duration=0
try:
original_duration=float(z5[5,:])
except:
original_duration=0
try:
balance=float(z5[6,:])
except:
balance=0
try:
f0_mean=float(z5[7,:])
except:
f0_mean=0
try:
f0_std=float(z5[8,:])
except:
f0_std=0
try:
f0_median=float(z5[9,:])
except:
f0_median=0
try:
f0_min=float(z5[10,:])
except:
f0_min=0
try:
f0_max=float(z5[11,:])
except:
f0_max=0
try:
f0_quant25=float(z5[12,:])
except:
f0_quant25=0
try:
f0_quant75=float(z5[13,:])
except:
f0_quant75=0
dataset={"number_ of_syllables":syllables,"number_of_pauses":pauses,"rate_of_speech":rate,
"articulation_rate":articulation,"speaking_duration":speak_duration,"original_duration":original_duration,
"balance":balance,"f0_mean":f0_mean,"f0_std":f0_std,"f0_median":f0_median,
"f0_min":f0_min,"f0_max":f0_max,"f0_quantile25":f0_quant25,"f0_quant75":f0_quant75}
os.remove(wavfile[0:-4]+'.TextGrid')
# sound = parselmouth.Sound(wavfile)
# formant = sound.to_formant_burg(max_number_of_formants=5, maximum_formant=5500)
# zero=formant.get_value_at_time(3, 0.5) # For the value of formant 3 at 0.5 seconds
# print(zero)
# other features you could extract
# one=sound.get_energy()
# two=sound.get_energy_in_air()
# three=sound.get_intensity()
# five=sound.get_power()
# six=sound.get_power_in_air()
# seven=sound.get_rms()
# eight=sound.get_root_mean_square()
# nine=sound.to_harmonicity()
# ten=sound.to_harmonicity_ac()
# eleven=sound.to_harmonicity_cc()
# twelve=sound.to_harmonicity_gne()
# thirteen=sound.to_intensity()
# fourteen=sound.to_mfcc()
# fifteen=sound.to_pitch_ac()
# sixteen=sound.to_pitch_cc()
# seventeen=sound.to_pitch_shs()
# eighteen=sound.to_pitch_spinet()
# nineteen=sound.to_spectrogram()
labels=list(dataset)
features=list(dataset.values())
return features, labels
# os.chdir('test')
# wavfile='test.wav'
# cur_dir=os.getcwd()
# features, labels = myprosody_featurize(wavfile)
# print(features)
# print(labels) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/vggish_inference_demo.py | features/audio_features/helpers/vggish_inference_demo.py | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""A simple demonstration of running VGGish in inference mode.
This is intended as a toy example that demonstrates how the various building
blocks (feature extraction, model definition and loading, postprocessing) work
together in an inference context.
A WAV file (assumed to contain signed 16-bit PCM samples) is read in, converted
into log mel spectrogram examples, fed into VGGish, the raw embedding output is
whitened and quantized, and the postprocessed embeddings are optionally written
in a SequenceExample to a TFRecord file (using the same format as the embedding
features released in AudioSet).
Usage:
# Run a WAV file through the model and print the embeddings. The model
# checkpoint is loaded from vggish_model.ckpt and the PCA parameters are
# loaded from vggish_pca_params.npz in the current directory.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file
# Run a WAV file through the model and also write the embeddings to
# a TFRecord file. The model checkpoint and PCA parameters are explicitly
# passed in as well.
$ python vggish_inference_demo.py --wav_file /path/to/a/wav/file \
--tfrecord_file /path/to/tfrecord/file \
--checkpoint /path/to/model/checkpoint \
--pca_params /path/to/pca/params
# Run a built-in input (a sine wav) through the model and print the
# embeddings. Associated model files are read from the current directory.
$ python vggish_inference_demo.py
"""
from __future__ import print_function
import numpy as np
from scipy.io import wavfile
import six
import tensorflow as tf
import os,json, sys
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
# quick print statement
print('AUDIOSET VGGISH FEATURIZING...')
flags = tf.app.flags
flags.DEFINE_string(
'wav_file', None,
'Path to a wav file. Should contain signed 16-bit PCM samples. '
'If none is provided, a synthetic sound is used.')
flags.DEFINE_string(
'checkpoint', 'vggish_model.ckpt',
'Path to the VGGish checkpoint file.')
flags.DEFINE_string(
'pca_params', 'vggish_pca_params.npz',
'Path to the VGGish PCA parameters file.')
flags.DEFINE_string(
'tfrecord_file', None,
'Path to a TFRecord file where embeddings will be written.')
FLAGS = flags.FLAGS
def main(_):
# In this simple example, we run the examples from a single audio file through
# the model. If none is provided, we generate a synthetic input.
if FLAGS.wav_file:
wav_file = FLAGS.wav_file
else:
# Write a WAV of a sine wav into an in-memory file object.
num_secs = 5
freq = 1000
sr = 44100
t = np.linspace(0, num_secs, int(num_secs * sr))
x = np.sin(2 * np.pi * freq * t)
# Convert to signed 16-bit samples.
samples = np.clip(x * 32768, -32768, 32767).astype(np.int16)
wav_file = six.BytesIO()
wavfile.write(wav_file, sr, samples)
wav_file.seek(0)
examples_batch = vggish_input.wavfile_to_examples(wav_file)
#print(examples_batch)
# Prepare a postprocessor to munge the model embeddings.
pproc = vggish_postprocess.Postprocessor(FLAGS.pca_params)
# If needed, prepare a record writer to store the postprocessed embeddings.
writer = tf.python_io.TFRecordWriter(
FLAGS.tfrecord_file) if FLAGS.tfrecord_file else None
with tf.Graph().as_default(), tf.Session() as sess:
# Define the model in inference mode, load the checkpoint, and
# locate input and output tensors.
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
# Run inference and postprocessing.
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: examples_batch})
#print(embedding_batch)
postprocessed_batch = pproc.postprocess(embedding_batch)
#print(postprocessed_batch)
######### ######### ######### ######### ######### ######### #########
# CHANGED CODE - SUPPRESSED THIS PART
#
# supresss this part; we don't really need to write a sequence example
#
######### ######### ######### ######### ######### ######### #########
# Write the postprocessed embeddings as a SequenceExample, in a similar
# format as the features released in AudioSet. Each row of the batch of
# embeddings corresponds to roughly a second of audio (96 10ms frames), and
# the rows are written as a sequence of bytes-valued features, where each
# feature value contains the 128 bytes of the whitened quantized embedding.
# seq_example = tf.train.SequenceExample(
# feature_lists=tf.train.FeatureLists(
# feature_list={
# vggish_params.AUDIO_EMBEDDING_FEATURE_NAME:
# tf.train.FeatureList(
# feature=[
# tf.train.Feature(
# bytes_list=tf.train.BytesList(
# value=[embedding.tobytes()]))
# for embedding in postprocessed_batch
# ]
# )
# }
# )
# )
# #print(seq_example)
# if writer:
# writer.write(seq_example.SerializeToString())
# if writer:
# writer.close()
######### ######### ######### ######### ######### ######### ######### #########
# MODIFIED CODE HERE
#
# to allow for featurization into a processdir
#
######### ######### ######### ######### ######### ######### ######### #########
try:
os.chdir(os.getcwd()+'/processdir')
except:
os.mkdir(os.getcwd()+'/processdir')
os.chdir(os.getcwd()+'/processdir')
#print(len(postprocessed_batch))
#print(type(postprocessed_batch))
filepath=sys.argv[2]
i1=filepath[::-1].find('/')
jsonfilename=filepath[-1*i1:][0:-4]+'.json'
print('writing data to '+jsonfilename)
jsonfile=open(jsonfilename,'w')
data={
'features':postprocessed_batch.tolist(),
}
json.dump(data,jsonfile)
jsonfile.close()
if __name__ == '__main__':
tf.app.run()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/speechpy_features.py | features/audio_features/helpers/speechpy_features.py | import scipy.io.wavfile as wav
import numpy as np
import speechpy
import os, sys
file_name = sys.argv[1]
fs, signal = wav.read(file_name)
signal = signal[:,0]
# Example of pre-emphasizing.
signal_preemphasized = speechpy.processing.preemphasis(signal, cof=0.98)
# Example of staching frames
frames = speechpy.processing.stack_frames(signal, sampling_frequency=fs, frame_length=0.020, frame_stride=0.01, filter=lambda x: np.ones((x,)),
zero_padding=True)
# Example of extracting power spectrum
power_spectrum = speechpy.processing.power_spectrum(frames, fft_points=512)
print('power spectrum shape=', power_spectrum.shape)
############# Extract MFCC features #############
mfcc = speechpy.feature.mfcc(signal, sampling_frequency=fs, frame_length=0.020, frame_stride=0.01,
num_filters=40, fft_length=512, low_frequency=0, high_frequency=None)
mfcc_cmvn = speechpy.processing.cmvnw(mfcc,win_size=301,variance_normalization=True)
print('mfcc(mean + variance normalized) feature shape=', mfcc_cmvn.shape)
mfcc_feature_cube = speechpy.feature.extract_derivative_feature(mfcc)
print('mfcc feature cube shape=', mfcc_feature_cube.shape)
############# Extract logenergy features #############
logenergy = speechpy.feature.lmfe(signal, sampling_frequency=fs, frame_length=0.020, frame_stride=0.01,
num_filters=40, fft_length=512, low_frequency=0, high_frequency=None)
logenergy_feature_cube = speechpy.feature.extract_derivative_feature(logenergy)
print('logenergy features=', logenergy.shape) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/opensmile/arff_parse.py | features/audio_features/helpers/opensmile/arff_parse.py | import numpy as np
import json, os, time
def parseArff(arff_file):
'''
Parses Arff File created by OpenSmile Feature Extraction
'''
f = open(arff_file,'r', encoding='utf-8')
data = []
labels = []
for line in f:
if '@attribute' in line:
temp = line.split(" ")
feature = temp[1]
labels.append(feature)
if ',' in line:
temp = line.split(",")
for item in temp:
data.append(item)
temp = arff_file.split('/')
temp = temp[-1]
data[0] = temp[:-5] + '.wav'
newdata=list()
newlabels=list()
for i in range(len(data)):
try:
newdata.append(float(data[i]))
newlabels.append(labels[i])
except:
pass
return newdata,newlabels
# extract all features as different arff files
cmdlist=list()
feature_extractors=['avec2013.conf', 'emobase2010.conf', 'IS13_ComParE_Voc.conf', 'IS10_paraling.conf', 'IS13_ComParE.conf', 'IS10_paraling_compat.conf', 'emobase.conf',
'emo_large.conf', 'IS11_speaker_state.conf', 'IS12_speaker_trait_compat.conf', 'IS09_emotion.conf', 'avec2011.conf', 'IS12_speaker_trait.conf',
'prosodyShsViterbiLoudness.conf', 'ComParE_2016.conf', 'GeMAPSv01a.conf']
curdir=os.getcwd()
filedir=os.getcwd()+'/sample'
for i in range(len(feature_extractors)):
feature_extractor=feature_extractors[i]
arff_file=feature_extractor[0:-5]+'.arff'
print(feature_extractor.upper())
if feature_extractor== 'GeMAPSv01a.conf':
os.system('SMILExtract -C opensmile-2.3.0/config/gemaps/%s -I sample/0.wav -O sample/%s'%(feature_extractor, arff_file))
else:
os.system('SMILExtract -C opensmile-2.3.0/config/%s -I sample/0.wav -O sample/%s'%(feature_extractor, arff_file))
os.chdir(filedir)
try:
data, labels = parseArff(arff_file)
print(len(data))
print(len(labels))
jsonfile=open(arff_file[0:-5]+'.json','w')
data={'features': data,
'labels': labels}
json.dump(data,jsonfile)
jsonfile.close()
except:
print('error')
os.chdir(curdir)
# print(labels) | python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/DigiPsych_Prosody/vad_helper.py | features/audio_features/helpers/DigiPsych_Prosody/vad_helper.py | import collections
import contextlib
import sys
import wave
import webrtcvad
'''
vad_helper.py takes the code from https://github.com/wiseman/py-webrtcvad/blob/master/example.py
directly.
The methods created in example.py are leveraged immediately by prosody.py. All credit
for functions in this python program are credited to wiseman's implementation of py-webrtcvad
prosody.py is an API wrapper built on top of example.py (vad_helper) to extract prosodic features.
'''
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write('1' if is_speech else '0')
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
sys.stdout.write('+(%s)' % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/DigiPsych_Prosody/prosody.py | features/audio_features/helpers/DigiPsych_Prosody/prosody.py | import webrtcvad
from vad_helper import read_wave, frame_generator
import os
import sys
import sox
import shutil
from operator import itemgetter
from itertools import groupby
import numpy as np
class Voice_Prosody:
def __init__(self):
'''
Class embeds methods of voice activity detection
to generate prosodic features of voice
'''
self.temp_folder = './Temp_Folder'
os.mkdir(self.temp_folder)
def __del__(self):
'''
Destructor for Program
Removes all created
'''
shutil.rmtree(self.temp_folder,ignore_errors=True)
def featurize_audio(self,audioFile,frame_ms):
'''
Central API method to call to perform audio featurization.
'''
if os.path.exists(audioFile) == False or '.wav' not in audioFile:
sys.stderr.write("Path does not exist or is not a .wav file\n")
sys.exit(1)
vad_dict = self.preproc_audio(audioFile,frame_ms)
feat_dict = dict()
feat_names = ['Speech_Time','Total_Time','Pause_Time','Pause_Percentage',
'Pause_Speech_Ratio','Mean_Pause_Length','Pause_Variability']
for key, value in vad_dict.items():
speech_time = self.getSpeechTime(value,frame_ms)
feat_dict[feat_names[0] + '_VADInt_' + str(key)] = speech_time
relevant_time = self.getRelevantTime(value,frame_ms)
feat_dict[feat_names[1] + '_VADInt_' + str(key)] = relevant_time
pause_time = relevant_time - speech_time
feat_dict[feat_names[2] + '_VADInt_' + str(key)] = pause_time
if relevant_time == 0:
pause_percent = 0 #Deal with divide by 0 error
else:
pause_percent = pause_time / relevant_time
feat_dict[feat_names[3] + '_VADInt_' + str(key)] = pause_percent
if speech_time == 0: #Deal with divide by 0 error
pause_sp_ratio = 0
else:
pause_sp_ratio = pause_time / speech_time
feat_dict[feat_names[4] + '_VADInt_' + str(key)] = pause_sp_ratio
mean_pause = self.meanPauseDuration(value,frame_ms)
feat_dict[feat_names[5] + '_VADInt_' + str(key)] = mean_pause
pause_var = self.pauseVariability(value,frame_ms)
feat_dict[feat_names[6] + '_VADInt_' + str(key)] = pause_var
feat_dict['AudioFile'] = audioFile.split('/')[-1]
return feat_dict
def preproc_audio(self,audioFile,frame_ms):
'''
Preprocessing Audio File into pcm data and gain segments of data
and map to voice/nonvoice presence
'''
vad_dict = dict()
#Create Transformer to ensure all files are of proper dimensions
# 1-channel, sample rate of 48000
wavName = audioFile.split('/')[-1]
output_path = os.path.join(self.temp_folder,wavName)
tfm = sox.Transformer()
tfm.channels(n_channels=1)
tfm.rate(samplerate = 48000)
tfm.build(audioFile,output_path)
#Perform Segmentation via VAD
levels = [1,2,3] #VADInt levels
audio, sample_rate = read_wave(output_path)
for lv in levels:
lv_dict = dict()
vad = webrtcvad.Vad(lv)
frames = list(frame_generator(frame_ms,audio,sample_rate)) # 20 to 40 ms recommended
for frame in frames:
lv_dict[round(frame.timestamp,2)] = str(vad.is_speech(frame.bytes,sample_rate))
vad_dict[lv] = lv_dict
return vad_dict
def getSpeechTime(self,v_dict,frame_ms):
'''
Returns Total Speech Time
'''
if 'True' not in list(v_dict.values()):
return 0
tot_time = list(v_dict.values()).count('True') * frame_ms / 1000
return tot_time
def getRelevantTime(self,v_dict,frame_ms):
'''
Gets time block from first voicing to last voicing
'''
keys = list(v_dict.keys())
values = list(v_dict.values())
if 'True' not in values:
return 0
f_ind = values.index('True')
l_ind = len(values) - 1 - values[::-1].index('True')
tot_time = keys[l_ind] + float(frame_ms)/1000 - keys[f_ind]
return tot_time
def calculate_pauses(self,v_dict,frame_ms):
'''
Calculates pauses. Returns as an array of pauses
'''
pauses = []
keys = list(v_dict.keys())
values = list(v_dict.values())
indices = [i for i, x in enumerate(values) if x == 'False']
for k, g in groupby(enumerate(indices), lambda ix : ix[0] - ix[1]):
pause =float(len(list(map(itemgetter(1), g)))) * float(frame_ms) / 1000
pauses.append(pause)
return pauses
def meanPauseDuration(self,v_dict,frame_ms):
'''
Calculate Mean Pause Duration:
- Calculate all the pauses in the sound
- Average by number of pauses.
'''
pauses = self.calculate_pauses(v_dict,frame_ms)
if len(pauses) == 0 or len(pauses) == 1: #Account for cases where there are no pauses, or empty file
return 0
mean_pause = np.average(pauses)
return mean_pause
def pauseVariability(self,v_dict,frame_ms):
'''
Calculates the variance of the pauses
- Calculate pauses in sound clip
- np.var(array)
'''
pauses = self.calculate_pauses(v_dict,frame_ms)
if len(pauses) == 0 or len(pauses) == 1: #Account for cases where there are no pauses, or empty file
return 0
pause_var = np.var(pauses)
return pause_var
def main():
pros = Voice_Prosody()
path = '/home/lazhang/UW_Projects/MHA_Data/AllAudio'
print(pros.featurize_audio(os.path.join(path,'NLX-1527883573725426010-1527883619077.wav'),20))
if __name__ == '__main__':
main()
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/DigiPsych_Prosody/featurize.py | features/audio_features/helpers/DigiPsych_Prosody/featurize.py | import argparse, json, os, sys
from prosody import Voice_Prosody
import pandas as pd
from datetime import datetime
'''
Featurize Wrapper for grabbing prosody features for audio stored in a folder
'''
def featurize_audio(audiofile,fsize):
df = pd.DataFrame()
vp = Voice_Prosody()
if audiofile.endswith('.wav'):
print('Featurizing:',audiofile)
feat_dict = vp.featurize_audio(audiofile,int(fsize))
features=list(feat_dict.values())[0:-1]
labels=list(feat_dict)[0:-1]
print(features)
print(labels)
return features, labels
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyAudioLex/number_ratio.py | features/audio_features/helpers/pyAudioLex/number_ratio.py | '''
@package: pyAudioLex
@author: Drew Morris
@module: number_ratio
During conversations, subjects give details about their birth dates,
how many kids they have, and other numerical information. Such use of
numbers in a sentence can be a measure of recall ability. The number ratio
feature is calculated by dividing the total count of numbers by the total
count of words the subject used in the conversation.
'''
from nltk.tokenize import RegexpTokenizer, word_tokenize
def number_ratio(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
tokenizer = RegexpTokenizer('zero|one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|hundred|thousand|million|billion|trillion|dozen|couple|several|few|\d')
qtokens = tokenizer.tokenize(s.lower())
if len(tokens) == 0:
return float(0)
else:
return float(len(qtokens)) / float(len(tokens))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyAudioLex/verb_freq.py | features/audio_features/helpers/pyAudioLex/verb_freq.py | '''
@package: pyAudioLex
@author: Drew Morris
@module: verb_freq
Frequency of a POS tag is computed by dividing the total number of words
with that tag by the total number of words spoken by the subject in the
recording.
'''
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
def verb_freq(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
pos = pos_tag(tokens)
verbs = []
for [token, tag] in pos:
part = map_tag('en-ptb', 'universal', tag)
if part == "VERB":
verbs.append(token)
if len(tokens) == 0:
return float(0)
else:
return float(len(verbs)) / float(len(tokens))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyAudioLex/noun_freq.py | features/audio_features/helpers/pyAudioLex/noun_freq.py | '''
@package: pyAudioLex
@author: Drew Morris
@module: noun_freq
Frequency of a POS tag is computed by dividing the total number of words
with that tag by the total number of words spoken by the subject in the
recording.
'''
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
def noun_freq(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
pos = pos_tag(tokens)
nouns = []
for [token, tag] in pos:
part = map_tag('en-ptb', 'universal', tag)
if part == "NOUN":
nouns.append(token)
if len(tokens) == 0:
return float(0)
else:
return float(len(nouns)) / float(len(tokens))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyAudioLex/honores_statistic.py | features/audio_features/helpers/pyAudioLex/honores_statistic.py | '''
@package: pyAudioLex
@author: Drew Morris
@module: honores_statistic
Honore's statistic [21] is based on the notion that the larger the number
of words used by a speaker that occur only once, the richer his overall
lexicon is. Words spoken only once (V1) and the total vocabulary used (V)
have been shown to be linearly associated. Honore's statistic generates a
lexical richness measure according to R = (100 x log(N)) / (1 _ (V1 / V)),
where N is the total text length. Higher values correspond to a richer
vocabulary. As with standardized word entropy, stemming is done on words
and only the stems are considered.
'''
import math
from nltk.tokenize import word_tokenize
from nltk import FreqDist
def honores_statistic(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
uniques = []
for token, count in FreqDist(tokens).items():
if count == 1:
uniques.append(token)
N = float(len(tokens))
V = float(len(set(tokens)))
V1 = float(len(uniques))
if N == 0 or V == 0 or V1 == 0:
return float(0)
elif V == V1:
return (100 * math.log(N))
else:
return (100 * math.log(N)) / (1 - (V1 / V))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
jim-schwoebel/allie | https://github.com/jim-schwoebel/allie/blob/b89f1403f63033ad406d0606b7c7a45000b43481/features/audio_features/helpers/pyAudioLex/particle_freq.py | features/audio_features/helpers/pyAudioLex/particle_freq.py | '''
@package: pyAudioLex
@author: Drew Morris
@module: particle_freq
Frequency of a POS tag is computed by dividing the total number of words
with that tag by the total number of words spoken by the subject in the
recording.
'''
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
def particle_freq(s, tokens = None):
if tokens == None:
tokens = word_tokenize(s)
pos = pos_tag(tokens)
particles = []
for [token, tag] in pos:
part = map_tag('en-ptb', 'universal', tag)
if part == "PRT":
particles.append(token)
if len(tokens) == 0:
return float(0)
else:
return float(len(particles)) / float(len(tokens))
| python | Apache-2.0 | b89f1403f63033ad406d0606b7c7a45000b43481 | 2026-01-05T07:09:07.495102Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.