id
int64 0
877k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
66
| repo_stars
int64 94
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 11
values | repo_extraction_date
stringclasses 197
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,540,323
|
netutils.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/netutils.h
|
#ifndef NETUTILS_H
#define NETUTILS_H
#include <curl/curl.h>
#include <fstream>
#include <sstream>
#include <string>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <thread>
#include <chrono>
#define DEBUG false
namespace Curl
{
size_t write_data_file(char* ptr, size_t size, size_t nmemb, void* stream);
void updateProgress();
int progress_callback(void* clientp, double dltotal, double dlnow, double ultotal, double ulnow);
CURLcode curlDownload(std::string url, std::string fileName);
CURLcode curlGet(std::string url, std::string& output);
}
#endif // NETUTILS_H
| 597
|
C++
|
.h
| 21
| 27
| 98
| 0.772727
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,324
|
archiveutils.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/archiveutils.h
|
#ifndef ARCHIVEUTILS_H
#define ARCHIVEUTILS_H
#include "filesystem.hpp"
#include <archive.h>
#include <archive_entry.h>
#ifdef __linux__
#include <cstring>
#endif
#include "debugutils.h"
namespace Archive
{
int copy_data(struct archive* ar, struct archive* aw);
bool extract(const char* from, const char* filename, const char* to);
bool extract_s(fs::path from, std::string filename, fs::path to);
}
#endif // ARCHIVEUTILS_H
| 434
|
C++
|
.h
| 16
| 25.5625
| 70
| 0.762136
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,325
|
debugutils.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/debugutils.h
|
#ifndef DEBUGUTILS_H
#define DEBUGUTILS_H
#define VERBOSE_DBG
#define VERBOSE_ERR
#include <iostream>
void logd(std::string msg);
void logerr(std::string err);
#endif // DEBUGUTILS_H
| 188
|
C++
|
.h
| 8
| 21.875
| 29
| 0.794286
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,326
|
servicestoputils.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/servicestoputils.h
|
#ifndef SERVICESTOPUTILS_H
#define SERVICESTOPUTILS_H
#include <string>
#include <stdarg.h>
#ifdef _WIN32
#include "Windows.h"
#else
typedef int SC_HANDLE; // dummy typename for non windows systems
#endif
class ServiceStopper
{
public:
class ServiceStopException : public std::exception
{
public:
ServiceStopException(const char* msg, ...) {
char formatted[512];
va_list args;
va_start(args, msg);
sprintf(formatted, msg, args);
va_end(args);
sprintf(message, "Error: %s", formatted);
}
const char* what() const noexcept { return message; }
private:
char message[1024];
};
// TODO: check if linux needs these functions and if so reimplement them
// or alternatively wrap them in ifdef if it doesn't need them
static void StopService_s(std::string serviceName);
static bool StopDependantServices(SC_HANDLE schService, SC_HANDLE schSCManager);
static bool StartService_s(std::string serviceName);
static bool KillProcess(std::string procName);
};
#endif // SERVICESTOPUTILS_H
| 1,022
|
C++
|
.h
| 35
| 26.885714
| 81
| 0.760204
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,327
|
installinfoutils.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/installinfoutils.h
|
#ifndef INSTALLINFOUTILS_H
#define INSTALLINFOUTILS_H
#include <string>
#include "config.h"
class VMWareInfoRetriever
{
public:
VMWareInfoRetriever();
std::string getInstallPath();
std::string getInstallPath64();
std::string getProductVersion();
private:
std::string installPath, installPath64, prodVersion;
};
#endif // INSTALLINFOUTILS_H
| 350
|
C++
|
.h
| 15
| 21.666667
| 53
| 0.815152
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,328
|
config.h
|
muhac_vmware-macos-unlocker/auto-unlocker/include/config.h
|
#ifndef CONFIG_H
#define CONFIG_H
/*
All the URLS, file names and patterns needed by the app
Grouped here to make it easy to fix something if they change some url or file names
*/
// Program options
#define PROG_VERSION "v1.1"
// Install - Default option
#define INSTALL_OPTION "--install"
// Uninstall
#define UNINSTALL_OPTION "--uninstall"
// Show help message
#define HELP_OPTION "--help"
// Paths
#define TOOLS_DOWNLOAD_FOLDER "tools"
#define BACKUP_FOLDER "backup"
// Base URL to parse for the tools
#define FUSION_BASE_URL "http://softwareupdate.vmware.com/cds/vmw-desktop/fusion/"
// Relative URLs and file names to be appended to version/build numbers
#define FUSION_DEF_TOOLS_LOC "/packages/com.vmware.fusion.tools.darwin.zip.tar"
#define FUSION_DEF_TOOLS_NAME "com.vmware.fusion.tools.darwin.zip.tar"
#define FUSION_DEF_TOOLS_ZIP "com.vmware.fusion.tools.darwin.zip"
#define FUSION_DEF_PRE15_TOOLS_LOC "/packages/com.vmware.fusion.tools.darwinPre15.zip.tar"
#define FUSION_DEF_PRE15_TOOLS_NAME "com.vmware.fusion.tools.darwinPre15.zip.tar"
#define FUSION_DEF_PRE15_TOOLS_ZIP "com.vmware.fusion.tools.darwinPre15.zip"
#define FUSION_DEF_CORE_LOC "/core/com.vmware.fusion.zip.tar"
#define FUSION_DEF_CORE_NAME "com.vmware.fusion.zip.tar"
#define FUSION_DEF_CORE_NAME_ZIP "com.vmware.fusion.zip"
// Tools locations in the dedicated TARs
#define FUSION_TAR_TOOLS_ISO "payload/darwin.iso"
#define FUSION_TAR_PRE15_TOOLS_ISO "payload/darwinPre15.iso"
// Tools locations in the core TAR
#define FUSION_ZIP_TOOLS_ISO "payload/VMware Fusion.app/Contents/Library/isoimages/darwin.iso"
#define FUSION_ZIP_TOOLS_NAME "darwin.iso"
#define FUSION_ZIP_PRE15_TOOLS_ISO "payload/VMware Fusion.app/Contents/Library/isoimages/darwinPre15.iso"
#define FUSION_ZIP_PRE15_TOOLS_NAME "darwinPre15.iso"
// Pattern to parse the versions/builds
#define VERSION_REGEX_PATTERN "<li><a href=\"[^\"]+\">([^<]+)<\\/a><\\/li>"
// Windows registry configuration
#define HKEY_VMWARE HKEY_LOCAL_MACHINE
#define HKEY_SUBKEY_VMWARE "SOFTWARE\\Wow6432Node\\VMware, Inc.\\VMware Player"
#define HKEY_QUERY_VALUE_INSTALLPATH "InstallPath"
#define HKEY_QUERY_VALUE_INSTALLPATH64 "InstallPath64"
#define HKEY_QUERY_VALUE_PRODUCTVERSION "ProductVersion"
// Windows services to stop
#define VM_KILL_SERVICES {"vmware-view-usbd", "VMwareHostd", "VMAuthdService", "VMUSBArbService"}
#define VM_KILL_PROCESSES {"vmware-tray.exe"}
// Files to backup (win) { source, destination }
#define VM_WIN_BACKUP_FILES { \
{"x64\\vmware-vmx.exe", "x64"}, \
{"x64\\vmware-vmx-debug.exe", "x64"}, \
{"x64\\vmware-vmx-stats.exe", "x64"}, \
{"vmwarebase.dll", ""} \
}
// Files to backup (linux) { source, destination }
#define VM_LNX_BACKUP_FILES { \
"/usr/lib/vmware/bin/vmware-vmx", \
"/usr/lib/vmware/bin/vmware-vmx-debug", \
"/usr/lib/vmware/bin/vmware-vmx-stats", \
}
// Files to patch (have to keep the same order)
#define VM_WIN_PATCH_FILES { \
"vmware-vmx.exe", \
"vmware-vmx-debug.exe", \
"vmware-vmx-stats.exe", \
"vmwarebase.dll" \
}
// Linux paths and files
#define VM_LNX_PATH "/usr/lib/vmware/bin"
#define VM_LNX_BINS { \
"vmware-vmx", \
"vmware-vmx-debug", \
"vmware-vmx-stats", \
}
#define VM_LNX_LIB_CANDIDATES { \
"/usr/lib/vmware/lib/libvmwarebase.so/libvmwarebase.so", \
"/usr/lib/vmware/lib/libvmwarebase.so.0/libvmwarebase.so.0" \
}
#define VM_LNX_ISO_DESTPATH "/usr/lib/vmware/isoimages"
// Patch data
#define SMC_HEADER_V0 "\xF2\x00\x00\x00\xF0\x00\x00\x00"
#define SMC_HEADER_V0_SZ 8
#define SMC_HEADER_V1 "\xB4\x01\x00\x00\xB0\x01\x00\x00"
#define SMC_HEADER_V1_SZ 8
#define KEY_KEY "\x59\x45\x4B\x23\x04\x32\x33\x69\x75"
#define KEY_KEY_SZ 9
#define ADR_KEY "\x72\x64\x41\x24\x04\x32\x33\x69\x75"
#define ADR_KEY_SZ 9
#define DARWIN_REGEX "\\x10\\x00\\x00\\x00[\\x10|\\x20]\\x00\\x00\\x00[\\x01|\\x02]" \
"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00" \
"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"
#define DARWIN_PATTERN_PERM_1 {'\x10', '\x00', '\x00', '\x00', '\x10', '\x00', '\x00', '\x00', '\x01', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00',\
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
}
#define DARWIN_PATTERN_PERM_2 {'\x10', '\x00', '\x00', '\x00', '\x20', '\x00', '\x00', '\x00', '\x01', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
}
#define DARWIN_PATTERN_PERM_3 {'\x10', '\x00', '\x00', '\x00', '\x10', '\x00', '\x00', '\x00', '\x02', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
}
#define DARWIN_PATTERN_PERM_4 {'\x10', '\x00', '\x00', '\x00', '\x20', '\x00', '\x00', '\x00', '\x02', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
'\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00', \
}
#define SMC_NEW_DATA "bheuneqjbexolgurfrjbeqfthneqrqcy"
#define SMC_NEW_DATA2 "rnfrqbagfgrny(p)NccyrPbzchgreVap"
#define VMKCTL_FIND_STR "applesmc"
#define VMKCTL_REPLACE_STR "vmkernel"
#endif // CONFIG_H
| 5,406
|
C++
|
.h
| 113
| 45.769912
| 105
| 0.662932
|
muhac/vmware-macos-unlocker
| 35
| 17
| 0
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,330
|
slide_box_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/slide_box_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QHBoxLayout>
#include <cmath>
#include "slide_box_widget.h"
SlideBoxWidget::SlideBoxWidget(const QString &text, float min, float max, QWidget *parent)
: QWidget(parent)
{
minValue = min;
maxValue = max;
QHBoxLayout *lay = new QHBoxLayout(this);
QLabel *label = new QLabel(text,this);
lay->addWidget(label);
label->setMinimumWidth(200);
slider = new QSlider(Qt::Horizontal, this);
lay->addWidget(slider);
slider->setRange(0, 1000000);
slider->setSingleStep(1);
valueLabel = new QLabel("0", this);
lay->addWidget(valueLabel);
valueLabel->setMinimumWidth(100);
QString strValue;
strValue = QString("%1").arg(minValue, 4, 'f', 2);
valueLabel->setText(strValue);
connect(slider, &QSlider::valueChanged, this, &SlideBoxWidget::sliderValueChanged);
}
void SlideBoxWidget::sliderValueChanged(int value)
{
QString strValue;
float newValue = round(100.0 * ((double)value /
1000000.0 * (maxValue - minValue) + minValue)) / 100.0;
strValue = QString("%1").arg(newValue, 4, 'f', 2);
valueLabel->setText(strValue);
emit valueChanged(newValue);
}
void SlideBoxWidget::setValue(float value)
{
slider->setValue((value - minValue) / (maxValue - minValue) * 1000000.0);
}
| 2,076
|
C++
|
.cpp
| 55
| 35.236364
| 90
| 0.713858
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,331
|
amp_nonlinear_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/amp_nonlinear_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QFrame>
#include <QVBoxLayout>
#include <QWidget>
#include <QSizePolicy>
#include <QSlider>
#include <cmath>
#include "amp_nonlinear_edit_widget.h"
#include "freq_response_widget.h"
AmpNonlinearEditWidget::AmpNonlinearEditWidget(QWidget *parent, Processor *prc) :
BlockEditWidget(parent)
{
processor = prc;
setFrameShape(QFrame::Panel);
QVBoxLayout *vbox = new QVBoxLayout(this);
QLabel *ampNonlinearLabel = new QLabel(tr("Power Amp Nonlinear Function"), this);
vbox->addWidget(ampNonlinearLabel);
ampNonlinearLabel->setMaximumHeight(30);
ampNonlinearLabel->setAlignment(Qt::AlignHCenter);
QFont ampNonlinearLabelFont = ampNonlinearLabel->font();
ampNonlinearLabelFont.setPointSize(15);
ampNonlinearLabel->setFont(ampNonlinearLabelFont);
vbox->addWidget(ampNonlinearLabel);
nonlinear = new NonlinearWidget(this);
vbox->addWidget(nonlinear);
biasSlide = new SlideBoxWidget(tr("Bias"), 0.0, 1.0, this);
vbox->addWidget(biasSlide);
connect(biasSlide, &SlideBoxWidget::valueChanged, this, &AmpNonlinearEditWidget::biasChanged);
uporSlide = new SlideBoxWidget(tr("Saturation Level"), 0.0, 10.0, this);
vbox->addWidget(uporSlide);
connect(uporSlide, &SlideBoxWidget::valueChanged, this, &AmpNonlinearEditWidget::uporChanged);
kregSlide = new SlideBoxWidget(tr("Saturation Hard/Soft"), 0.0, 10.0, this);
vbox->addWidget(kregSlide);
connect(kregSlide, &SlideBoxWidget::valueChanged, this, &AmpNonlinearEditWidget::kregChanged);
levelSlide = new SlideBoxWidget(tr("Gain Level"), -20.0, 20.0, this);
vbox->addWidget(levelSlide);
connect(levelSlide, &SlideBoxWidget::valueChanged, this, &AmpNonlinearEditWidget::levelChanged);
sagCoeffSlide = new SlideBoxWidget(tr("Voltage Sag Strength"), 0.0, 10.0, this);
vbox->addWidget(sagCoeffSlide);
connect(sagCoeffSlide, &SlideBoxWidget::valueChanged, this,
&AmpNonlinearEditWidget::sagCoeffChanged);
sagTimeSlide = new SlideBoxWidget(tr("Voltage Sag Time"), 0.0, 0.5, this);
vbox->addWidget(sagTimeSlide);
connect(sagTimeSlide, &SlideBoxWidget::valueChanged, this,
&AmpNonlinearEditWidget::sagTimeChanged);
masterOutputLevelSlide = new SlideBoxWidget(tr("MASTER OUTPUT LEVEL"), 0.0, 2.0, this);
vbox->addWidget(masterOutputLevelSlide);
connect(masterOutputLevelSlide, &SlideBoxWidget::valueChanged, this,
&AmpNonlinearEditWidget::masterOutputLevelChanged);
recalculate();
resetControls();
}
void AmpNonlinearEditWidget::recalculate()
{
st_profile profile = processor->getProfile();
nonlinear->inValues.resize(1000);
nonlinear->outValues.resize(1000);
for (int i = 0; i < nonlinear->inValues.size(); i++)
{
nonlinear->inValues[i] = -3.0 + (float)i / (nonlinear->inValues.size() - 1) * 6.0;
nonlinear->outValues[i] = (processor->tube(nonlinear->inValues[i], profile.amp_Kreg,
profile.amp_Upor, profile.amp_bias, 0.0)
- processor->tube(-nonlinear->inValues[i], profile.amp_Kreg,
profile.amp_Upor, profile.amp_bias, 0.0));
}
nonlinear->maxIn = 3.0;
nonlinear->maxOut = 2.0;
nonlinear->drawBackground();
nonlinear->update(0,0,width(),height());
}
void AmpNonlinearEditWidget::biasChanged(float value)
{
st_profile profile = processor->getProfile();
profile.amp_bias = value;
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::uporChanged(float value)
{
st_profile profile = processor->getProfile();
profile.amp_Upor = value;
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::kregChanged(float value)
{
st_profile profile = processor->getProfile();
profile.amp_Kreg = value;
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::levelChanged(float value)
{
st_profile profile = processor->getProfile();
profile.amp_level = pow(10.0, value / 20.0);
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::sagCoeffChanged(float value)
{
st_profile profile = processor->getProfile();
profile.sag_coeff = value;
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::sagTimeChanged(float value)
{
st_profile profile = processor->getProfile();
profile.sag_time = value;
processor->setProfile(profile);
recalculate();
}
void AmpNonlinearEditWidget::resetControls()
{
st_profile profile = processor->getProfile();
biasSlide->setValue(profile.amp_bias);
uporSlide->setValue(profile.amp_Upor);
sagTimeSlide->setValue(profile.sag_time);
sagCoeffSlide->setValue(profile.sag_coeff);
levelSlide->setValue(20.0 * log10(profile.amp_level));
kregSlide->setValue(profile.amp_Kreg);
masterOutputLevelSlide->setValue(profile.output_level);
}
void AmpNonlinearEditWidget::updateControls()
{
resetControls();
}
void AmpNonlinearEditWidget::masterOutputLevelChanged(float value)
{
st_profile profile = processor->getProfile();
profile.output_level = value;
processor->setProfile(profile);
}
| 5,862
|
C++
|
.cpp
| 151
| 35.94702
| 98
| 0.753838
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,332
|
preamp_filter_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/preamp_filter_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QFrame>
#include <QVBoxLayout>
#include <QWidget>
#include <QSizePolicy>
#include <QLabel>
#include <QFont>
#include <QFileDialog>
#include <cmath>
#include "preamp_filter_edit_widget.h"
#include "nonlinear_widget.h"
PreampFilterEditWidget::PreampFilterEditWidget(QWidget *parent, Processor *prc) :
BlockEditWidget(parent)
{
processor = prc;
setFrameShape(QFrame::Panel);
QVBoxLayout *vbox = new QVBoxLayout(this);
QLabel *preampFilterEqualizerLabel = new QLabel(tr("Equalizer"), this);
vbox->addWidget(preampFilterEqualizerLabel);
preampFilterEqualizerLabel->setMaximumHeight(30);
preampFilterEqualizerLabel->setAlignment(Qt::AlignHCenter);
QFont preampFilterEqualizerLabelFont = preampFilterEqualizerLabel->font();
preampFilterEqualizerLabelFont.setPointSize(15);
preampFilterEqualizerLabel->setFont(preampFilterEqualizerLabelFont);
QWidget *equalizerButtonsBar = new QWidget(this);
vbox->addWidget(equalizerButtonsBar);
equalizerButtonsBar->setSizePolicy(QSizePolicy::Fixed, QSizePolicy::Fixed);
QHBoxLayout *equalizerButtonsHBox = new QHBoxLayout(equalizerButtonsBar);
loadButton = new QPushButton(tr("Load from file"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(loadButton);
connect(loadButton, &QPushButton::clicked, this,
&PreampFilterEditWidget::loadButtonClicked);
saveButton = new QPushButton(tr("Save to file"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(saveButton);
connect(saveButton, &QPushButton::clicked, this,
&PreampFilterEditWidget::saveButtonClicked);
resetButton = new QPushButton(tr("Reset"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(resetButton);
connect(resetButton, &QPushButton::clicked, this,
&PreampFilterEditWidget::resetButtonClicked);
applyButton = new QPushButton(tr("Apply"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(applyButton);
connect(applyButton, &QPushButton::clicked, this,
&PreampFilterEditWidget::applyButtonClicked);
equalizerButtonsHBox->addSpacing(40);
disableButton = new QPushButton(equalizerButtonsBar);
equalizerButtonsHBox->addWidget(disableButton);
if (processor->isPreampCorrectionEnabled())
{
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
}
else
{
disableStatus = STAT_DISABLED;
disableButton->setText(tr("Enable"));
}
connect(disableButton, &QPushButton::clicked, this,
&PreampFilterEditWidget::disableButtonClicked);
equalizer = new EqualizerWidget(this);
vbox->addWidget(equalizer);
connect(equalizer, &EqualizerWidget::responseChanged, this,
&PreampFilterEditWidget::responseChangedSlot);
fileResamplingThread = new FileResamplingThread();
connect(fileResamplingThread, &QThread::finished, this,
&PreampFilterEditWidget::fileResamplingThreadFinished);
if (processor->preampCorrectionEqualizerFLogValues.size() == 0)
{
processor->preampCorrectionEqualizerFLogValues = equalizer->fLogValuesEq;
}
else
{
equalizer->fLogValuesEq = processor->preampCorrectionEqualizerFLogValues;
}
if (processor->preampCorrectionEqualizerDbValues.size() == 0)
{
processor->preampCorrectionEqualizerDbValues = equalizer->dbValuesEq;
}
else
{
equalizer->dbValuesEq = processor->preampCorrectionEqualizerDbValues;
}
msg = new MessageWidget(this);
recalculate();
}
void PreampFilterEditWidget::recalculate()
{
equalizer->fLogValuesEq = processor->preampCorrectionEqualizerFLogValues;
equalizer->dbValuesEq = processor->preampCorrectionEqualizerDbValues;
QVector<float> freqs(100);
for (int i = 0; i < freqs.size(); i++)
{
float fLog = (log10(20000.0) - log10(10.0))*(float)(i) / 99.0 + log10(10.0);
freqs[i] = pow(10, fLog);
}
QVector<float> frequencyResponse = processor->getPreampFrequencyResponse(freqs);
equalizer->fLogValuesFr.resize(frequencyResponse.size());
equalizer->dbValuesFr.resize(frequencyResponse.size());
for (int i = 0; i < frequencyResponse.size(); i++)
{
equalizer->fLogValuesFr[i] = log10(freqs[i]);
equalizer->dbValuesFr[i] = 20.0 * log10(frequencyResponse[i]);
}
equalizer->drawBackground();
equalizer->update(0,0,width(),height());
}
void PreampFilterEditWidget::responseChangedSlot()
{
processor->preampCorrectionEqualizerFLogValues = equalizer->fLogValuesEq;
processor->preampCorrectionEqualizerDbValues = equalizer->dbValuesEq;
QVector<double> w(equalizer->fLogValuesEq.size());
QVector<double> A(equalizer->fLogValuesEq.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, equalizer->fLogValuesEq[i]);
A[i] = pow(10.0, equalizer->dbValuesEq[i] / 20.0);
}
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
processor->setPreampCorrectionImpulseFromFrequencyResponse(w, A);
}
void PreampFilterEditWidget::applyButtonClicked()
{
processor->applyPreampCorrection();
recalculate();
resetButtonClicked();
}
void PreampFilterEditWidget::resetButtonClicked()
{
processor->resetPreampCorrection();
equalizer->resetEq();
processor->preampCorrectionEqualizerFLogValues = equalizer->fLogValuesEq;
processor->preampCorrectionEqualizerDbValues = equalizer->dbValuesEq;
equalizer->drawBackground();
equalizer->update(0,0,width(),height());
}
void PreampFilterEditWidget::resetControls()
{
resetButtonClicked();
}
void PreampFilterEditWidget::saveButtonClicked()
{
QString saveFileName =
QFileDialog::getSaveFileName(this,
tr("Save impulse response file"),
QString(),
tr("WAV files (*.wav)"));
if (!saveFileName.isEmpty())
{
QVector<float> preamp_impulse = processor->getPreampImpulse();
SF_INFO sfinfo;
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = preamp_impulse.size();
sfinfo.samplerate = processor->getSamplingRate();
sfinfo.channels = 1;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *impulseFile = sf_open(saveFileName.toUtf8().constData(), SFM_WRITE, &sfinfo);
if (impulseFile != NULL)
{
sf_writef_float(impulseFile, preamp_impulse.data(), preamp_impulse.size());
}
sf_close(impulseFile);
}
}
void PreampFilterEditWidget::loadButtonClicked()
{
QString filename = QFileDialog::getOpenFileName(this,
tr("Open impulse response file"),
QString(),
tr("Sound files (*.wav *.ogg *.flac)"));
if (!filename.isEmpty())
{
msg->setTitle(tr("Please wait!"));
msg->setMessage(tr("Resampling impulse response file..."));
msg->open();
fileResamplingThread->filename = filename;
fileResamplingThread->samplingRate = processor->getSamplingRate();
fileResamplingThread->start();
}
}
void PreampFilterEditWidget::fileResamplingThreadFinished()
{
msg->setProgressValue(100);
msg->close();
float max_val = 0.0;
for (int i = 0; i < fileResamplingThread->dataL.size(); i++)
{
if (fabs(fileResamplingThread->dataL[i]) > max_val)
{
max_val = fabs(fileResamplingThread->dataL[i]);
}
}
max_val /= 0.4 * (48000.0 / (float)processor->getSamplingRate());
for (int i = 0; i < fileResamplingThread->dataL.size(); i++)
{
fileResamplingThread->dataL[i] /= max_val;
}
processor->setPreampImpulse(fileResamplingThread->dataL);
recalculate();
}
void PreampFilterEditWidget::disableButtonClicked()
{
switch (disableStatus)
{
case STAT_DISABLED:
{
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
processor->setPreampCorrectionStatus(true);
}
break;
case STAT_ENABLED:
{
disableStatus = STAT_DISABLED;
disableButton->setText(tr("Enable"));
processor->setPreampCorrectionStatus(false);
}
}
}
| 8,809
|
C++
|
.cpp
| 238
| 32.697479
| 90
| 0.728491
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,333
|
profiler_dialog.cpp
|
olegkapitonov_tubeAmp-Designer/src/profiler_dialog.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QGridLayout>
#include <QVBoxLayout>
#include <QLabel>
#include <QFileDialog>
#include "profiler_dialog.h"
ProfilerDialog::ProfilerDialog(Processor *prc, Player *plr, PlayerPanel *pnl, QWidget *parent) : QDialog(parent)
{
processor = prc;
player = plr;
playerPanel = pnl;
setMinimumWidth(600);
setWindowTitle(tr("Profiler"));
QGridLayout *lay = new QGridLayout(this);
QLabel *testSignalLabel = new QLabel(tr("Test signal"), this);
lay->addWidget(testSignalLabel, 0, 0, 1, 1);
testSignalComboBox = new QComboBox(this);
testSignalComboBox->addItem(tr("Test Signal v1.0"));
lay->addWidget(testSignalComboBox, 0, 1, 1, 1);
createTestSignalWavButton = new QPushButton(tr("Create *.wav"), this);
lay->addWidget(createTestSignalWavButton, 0, 2, 1, 1);
connect(createTestSignalWavButton, &QPushButton::clicked, this,
&ProfilerDialog::createTestSignalWavButtonClick);
QLabel *responseFileLabel = new QLabel(tr("Response File"), this);
lay->addWidget(responseFileLabel, 1, 0, 1, 1);
responseFileEdit = new QLineEdit(this);
lay->addWidget(responseFileEdit, 1, 1, 1, 1);
responseFileOpenButton = new QPushButton(tr("Open"), this);
lay->addWidget(responseFileOpenButton, 1, 2, 1, 1);
connect(responseFileOpenButton, &QPushButton::clicked, this,
&ProfilerDialog::responseFileOpenButtonClick);
presetGroupBox = new QGroupBox(tr("Preset"), this);
lay->addWidget(presetGroupBox, 2, 0, 1, 3);
QVBoxLayout *presetLay = new QVBoxLayout(presetGroupBox);
crystalcleanPresetRadioButton = new QRadioButton(tr("Crystal Clean"),
presetGroupBox);
presetLay->addWidget(crystalcleanPresetRadioButton);
classicPresetRadioButton = new QRadioButton(tr("Classic (without master gain)"),
presetGroupBox);
presetLay->addWidget(classicPresetRadioButton);
mastergainPresetRadioButton = new QRadioButton(tr("Master gain"), presetGroupBox);
presetLay->addWidget(mastergainPresetRadioButton);
classicPresetRadioButton->setChecked(true);
analyzeButton = new QPushButton(tr("Analyze"), this);
analyzeButton->setEnabled(false);
lay->addWidget(analyzeButton, 3, 1, 1, 1);
connect(analyzeButton, &QPushButton::clicked, this, &ProfilerDialog::analyzeButtonClick);
cancelButton = new QPushButton(tr("Cancel"), this);
lay->addWidget(cancelButton, 3, 2, 1, 1);
connect(cancelButton, &QPushButton::clicked, this, &ProfilerDialog::cancelButtonClick);
profilerThread = new ProfilerThread();
connect(profilerThread, &QThread::finished, this,
&ProfilerDialog::profilerThreadFinished);
msg = new MessageWidget(this);
}
void ProfilerDialog::analyzeButtonClick()
{
if (!responseFileEdit->text().isEmpty())
{
profiler = new Profiler(processor, player);
connect(profiler, &Profiler::progressChanged, this,
&ProfilerDialog::profilerProgressChanged);
connect(profiler, &Profiler::stopPlaybackNeeded, playerPanel, &PlayerPanel::stopPlayback);
profiler->loadResponseFile(responseFileEdit->text());
profilerThread->profiler = profiler;
if (crystalcleanPresetRadioButton->isChecked())
{
profilerThread->presetType = CRYSTALCLEAN_PRESET;
}
if (classicPresetRadioButton->isChecked())
{
profilerThread->presetType = CLASSIC_PRESET;
}
if (mastergainPresetRadioButton->isChecked())
{
profilerThread->presetType = MASTERGAIN_PRESET;
}
msg->setMessage(tr("Analyzing..."));
msg->setTitle(tr("Please Wait!"));
msg->setProgressValue(0);
msg->open();
profilerThread->start();
}
}
void ProfilerDialog::responseFileOpenButtonClick()
{
responseFileName = QFileDialog::getOpenFileName(this,
tr("Open Response File"),
QString(),
tr("Sound File (*.wav)"));
if (!responseFileName.isEmpty())
{
responseFileEdit->setText(responseFileName);
}
if (!(responseFileName.isEmpty()))
{
analyzeButton->setEnabled(true);
}
}
void ProfilerDialog::cancelButtonClick()
{
reject();
}
void ProfilerDialog::createTestSignalWavButtonClick()
{
QString testFileName = QFileDialog::getSaveFileName(this,
tr("Create Test File"),
QString(),
tr("Sound File (*.wav)"));
if (!(testFileName.isEmpty()))
{
Profiler profiler(processor, player);
profiler.createTestFile(testFileName, testSignalComboBox->currentIndex());
}
}
void ProfilerDialog::profilerThreadFinished()
{
delete profiler;
msg->setProgressValue(100);
msg->close();
accept();
}
void ProfilerDialog::profilerProgressChanged(int progress)
{
msg->setProgressValue(progress);
}
| 5,764
|
C++
|
.cpp
| 143
| 34.426573
| 112
| 0.693453
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,334
|
player_panel.cpp
|
olegkapitonov_tubeAmp-Designer/src/player_panel.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QIcon>
#include <QHBoxLayout>
#include <QSettings>
#include "player_panel.h"
PlayerPanel::PlayerPanel(QWidget *parent, Player *plr, Processor *prc) :
QFrame(parent)
{
player = plr;
connect(player, &Player::dataChanged, this, &PlayerPanel::playerDataChanged);
connect(player, &Player::equalRMSFinished, this, &PlayerPanel::playerEqualRMSFinished);
processor = prc;
setFrameShape(QFrame::Panel);
playReferenceTrack = false;
playPaused = true;
QHBoxLayout *hbox = new QHBoxLayout(this);
buttonLoad = new QPushButton(this);
buttonLoad->setToolTip(tr("Load DI and Reference files"));
hbox->addWidget(buttonLoad);
buttonLoad->setIcon(QIcon(":/icons/load.png"));
buttonLoad->setIconSize(QSize(24, 24));
connect(buttonLoad, &QPushButton::clicked, this, &PlayerPanel::loadButtonClicked);
buttonStop = new QPushButton(this);
buttonStop->setToolTip(tr("Stop playback"));
hbox->addWidget(buttonStop);
buttonStop->setIcon(QIcon(":/icons/stop.png"));
buttonStop->setIconSize(QSize(24, 24));
connect(buttonStop, &QPushButton::clicked, this, &PlayerPanel::buttonStopClicked);
buttonPlay = new QPushButton(this);
buttonPlay->setToolTip(tr("Start playback"));
hbox->addWidget(buttonPlay);
buttonPlay->setIcon(QIcon(":/icons/play.png"));
buttonPlay->setIconSize(QSize(24, 24));
connect(buttonPlay, &QPushButton::clicked, this, &PlayerPanel::buttonPlayClicked);
buttonMonitor = new QPushButton(this);
buttonMonitor->setToolTip(tr("Enable Monitor Mode"));
hbox->addWidget(buttonMonitor);
buttonMonitor->setIcon(QIcon(":/icons/monitor.png"));
buttonMonitor->setIconSize(QSize(24, 24));
buttonMonitor->setCheckable(true);
connect(buttonMonitor, &QPushButton::clicked, this, &PlayerPanel::buttonMonitorClicked);
diButton = new QPushButton(tr("DI"), this);
hbox->addWidget(diButton);
connect(diButton, &QPushButton::clicked, this, &PlayerPanel::diButtonClicked);
equalRMSButton = new QPushButton(this);
equalRMSButton->setToolTip(tr("Adjust Reference file volume "
"to match amplifier output volume"));
equalRMSButton->setIcon(QIcon(":/icons/equalRMS.png"));
equalRMSButton->setIconSize(QSize(24, 24));
hbox->addWidget(equalRMSButton);
connect(equalRMSButton, &QPushButton::clicked, this,
&PlayerPanel::equalRMSButtonClicked);
inputLevelSlider = new QSlider(Qt::Horizontal, this);
inputLevelSlider->setMinimumWidth(200);
inputLevelSlider->setRange(0, 100);
inputLevelSlider->setSingleStep(1);
inputLevelSlider->setValue(50);
inputLevelSlider->setToolTip(tr("Input Level"));
hbox->addWidget(inputLevelSlider);
connect(inputLevelSlider, &QSlider::valueChanged,
this, &PlayerPanel::sliderValueChanged);
QSettings settings;
if (settings.contains("playerPanel/inputLevel"))
{
int value = settings.value("playerPanel/inputLevel").toInt();
player->setInputLevel(value);
inputLevelSlider->setValue(value + 50);
}
loadDialog = new LoadDialog(this);
connect(loadDialog, &QDialog::finished, this, &PlayerPanel::loadDialogFinished);
diFileResamplingThread = new FileResamplingThread();
refFileResamplingThread = new FileResamplingThread();
diFileResamplingThreadWorking = false;
refFileResamplingThreadWorking = false;
connect(diFileResamplingThread, &QThread::finished, this,
&PlayerPanel::diFileResamplingThreadFinished);
connect(refFileResamplingThread, &QThread::finished, this,
&PlayerPanel::refFileResamplingThreadFinished);
buttonPlay->setEnabled(false);
buttonStop->setEnabled(false);
diButton->setEnabled(false);
msg = new MessageWidget(this);
}
void PlayerPanel::diButtonClicked()
{
if (playReferenceTrack)
{
diButton->setText(tr("DI"));
playReferenceTrack = false;
if (player->status == Player::PlayerStatus::PS_PLAY_REF)
{
player->setStatus(Player::PlayerStatus::PS_PLAY_DI);
}
}
else
{
diButton->setText(tr("Ref."));
playReferenceTrack = true;
if (player->status == Player::PlayerStatus::PS_PLAY_DI)
{
player->setStatus(Player::PlayerStatus::PS_PLAY_REF);
}
}
}
void PlayerPanel::loadButtonClicked()
{
player->setStatus(Player::PlayerStatus::PS_STOP);
playPaused = true;
loadDialog->open();
}
void PlayerPanel::loadDialogFinished(int result)
{
if (result == QDialog::Accepted)
{
QString diFilename = loadDialog->getDiFileName();
QString refFilename = loadDialog->getRefFileName();
msg->setTitle(tr("Please wait!"));
msg->setMessage(tr("Resampling files..."));
msg->setProgressValue(0);
msg->open();
diFileResamplingThread->filename = diFilename;
refFileResamplingThread->filename = refFilename;
diFileResamplingThread->samplingRate = processor->getSamplingRate();
refFileResamplingThread->samplingRate = processor->getSamplingRate();
diFileResamplingThreadWorking = true;
refFileResamplingThreadWorking = true;
refFileResamplingThread->stereoMode = true;
diFileResamplingThread->start();
refFileResamplingThread->start();
}
}
void PlayerPanel::buttonPlayClicked()
{
if (!playPaused)
{
buttonPlay->setIcon(QIcon(":/icons/pause.png"));
player->setStatus(Player::PlayerStatus::PS_PAUSE);
playPaused = true;
}
else
{
buttonPlay->setIcon(QIcon(":/icons/play.png"));
if (playReferenceTrack)
{
player->setStatus(Player::PlayerStatus::PS_PLAY_REF);
}
else
{
player->setStatus(Player::PlayerStatus::PS_PLAY_DI);
}
playPaused = false;
}
}
void PlayerPanel::buttonStopClicked()
{
player->setStatus(Player::PlayerStatus::PS_STOP);
buttonPlay->setIcon(QIcon(":/icons/play.png"));
playPaused = true;
}
void PlayerPanel::buttonMonitorClicked()
{
if (player->status != Player::PlayerStatus::PS_MONITOR)
{
player->setStatus(Player::PlayerStatus::PS_MONITOR);
buttonPlay->setEnabled(false);
buttonStop->setEnabled(false);
diButton->setEnabled(false);
}
else
{
player->setStatus(Player::PlayerStatus::PS_STOP);
if (player->diData.size() != 0)
{
buttonPlay->setEnabled(true);
buttonStop->setEnabled(true);
diButton->setEnabled(true);
}
}
}
void PlayerPanel::diFileResamplingThreadFinished()
{
player->setDiData(diFileResamplingThread->dataL);
diFileResamplingThreadWorking = false;
if ((!diFileResamplingThreadWorking) && (!refFileResamplingThreadWorking))
{
resamplingFinished();
}
else
{
msg->setProgressValue(10);
}
}
void PlayerPanel::refFileResamplingThreadFinished()
{
player->setRefData(refFileResamplingThread->dataL, refFileResamplingThread->dataR);
refFileResamplingThreadWorking = false;
if ((!diFileResamplingThreadWorking) && (!refFileResamplingThreadWorking))
{
resamplingFinished();
}
else
{
msg->setProgressValue(50);
}
}
void PlayerPanel::resamplingFinished()
{
msg->setProgressValue(100);
msg->close();
}
void PlayerPanel::playerDataChanged()
{
if (player->diData.size() != 0)
{
buttonPlay->setEnabled(true);
buttonStop->setEnabled(true);
diButton->setEnabled(true);
}
}
void PlayerPanel::equalRMSButtonClicked()
{
player->equalDataRMS();
if (player->isEqualDataRMSThreadRunning)
{
msg->setTitle(tr("Please wait!"));
msg->setMessage(tr("Adjusting Reference sound volume..."));
msg->setProgressValue(0);
msg->open();
}
}
void PlayerPanel::sliderValueChanged(int value)
{
player->setInputLevel(value - 50);
}
int PlayerPanel::getInputLevelSliderValue()
{
return inputLevelSlider->value();
}
void PlayerPanel::playerEqualRMSFinished()
{
msg->close();
}
void PlayerPanel::stopPlayback()
{
buttonStopClicked();
}
| 8,544
|
C++
|
.cpp
| 263
| 29.121673
| 90
| 0.738147
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,335
|
message_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/message_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QVBoxLayout>
#include "message_widget.h"
MessageWidget::MessageWidget(QWidget *parent) : QDialog(parent)
{
setMinimumWidth(400);
setMinimumHeight(80);
QVBoxLayout *lay = new QVBoxLayout(this);
messageLabel = new QLabel(this);
lay->addWidget(messageLabel);
messageLabel->setAlignment(Qt::AlignCenter);
progressBar = new QProgressBar(this);
lay->addWidget(progressBar);
progressBar->setValue(0);
}
void MessageWidget::setTitle(QString title)
{
setWindowTitle(title);
}
void MessageWidget::setMessage(QString message)
{
messageLabel->setText(message);
}
void MessageWidget::setProgressValue(int value)
{
progressBar->setValue(value);
}
| 1,522
|
C++
|
.cpp
| 44
| 32.477273
| 80
| 0.734694
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,336
|
freq_response_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/freq_response_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QPainter>
#include <QPaintEvent>
#include <cmath>
#include <gsl/gsl_spline.h>
#include "freq_response_widget.h"
FreqResponseWidget::FreqResponseWidget(QWidget *parent) :
QWidget(parent)
{
QPalette Pal(palette());
Pal.setColor(QPalette::Background, Qt::black);
setAutoFillBackground(true);
setPalette(Pal);
setMaximumHeight(400);
setMinimumHeight(400);
backbuffer = new QPixmap(100, 100);
setMouseTracking(true);
fLogValues.resize(3);
dbValues.resize(3);
fLogValues[0] = log10(10.0);
fLogValues[1] = log10(1000.0);
fLogValues[2] = log10(20000.0);
dbValues[0] = 0.0;
dbValues[1] = 0.0;
dbValues[2] = 0.0;
maxDb = 0;
margin.setLeft(50);
margin.setRight(20);
margin.setTop(40);
margin.setBottom(50);
stepHeight = 10;
dbInStep = 20;
}
void FreqResponseWidget::paintEvent(QPaintEvent * event)
{
QPainter qp(this);
for (QRegion::const_iterator rects = event->region().begin();
rects!= event->region().end(); rects++)
{
QRect r = *rects;
qp.drawPixmap(r,*backbuffer,r);
}
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, fLogValues.count());
gsl_spline_init (spline, fLogValues.data(), dbValues.data(), fLogValues.count());
QPolygon polyline1(width() - margin.left() - margin.right());
for (int i=0; i < width() - margin.left() - margin.right(); i++)
{
double interpolatedFreq = pow(10, 1 + (double)(i) / (double)stepWidth);
if (interpolatedFreq > 20000.0) interpolatedFreq = 20000.0;
double interpolatedDb = gsl_spline_eval(spline, log10(interpolatedFreq), acc);
int y = margin.top() - interpolatedDb * 10.0 / dbInStep * stepHeight +
maxDb * stepHeight * 10.0 / dbInStep;
if (y > (height() - margin.bottom()))
{
y = height() - margin.bottom();
}
if (y < (margin.top()))
{
y = margin.top();
}
polyline1.setPoint(i, i + margin.left(), y);
}
gsl_spline_free(spline);
gsl_interp_accel_free(acc);
qp.setPen(QPen(QColor(0,255,0)));
qp.drawPolyline(polyline1);
qp.setPen(QPen(QColor(255,255,255)));
infoFont = QFont("Sans",12);
qp.setFont(infoFont);
qp.drawText(infop,text);
}
void FreqResponseWidget::drawBackground()
{
QPainter qp(backbuffer);
qp.fillRect(0,0,width(),height(),QBrush(QColor(0,0,0)));
QPen penThin(QColor(80,80,80));
QPen penBold(QColor(150,150,150),1);
QPen penText(QColor(255,255,255));
QFontMetrics fm(QFont("Sans",10));
qp.setFont(QFont("Sans",10));
qp.setPen(penText);
QString frequencyResponseString = tr("Frequency Response");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
int pixelsWide = fm.horizontalAdvance(frequencyResponseString);
#else
int pixelsWide = fm.boundingRect(frequencyResponseString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, 20, frequencyResponseString);
QString frequencyString = tr("Frequency, Hz");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(frequencyString);
#else
pixelsWide = fm.boundingRect(frequencyString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, height() - 8, frequencyString);
qp.save();
qp.rotate(-90);
QString magnitudeString = tr("Magnitude, dB");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(magnitudeString);
#else
pixelsWide = fm.boundingRect(magnitudeString).width();
#endif
qp.drawText(-pixelsWide / 2 - height() / 2, 12, magnitudeString);
qp.restore();
stepWidth = (width() - margin.left() - margin.right()) / 3;
stepWidth -= (1 - log10(8)) * stepWidth;
int i=0;
while ((i * stepHeight) < (height() - margin.top() - margin.bottom()))
{
if (i % 5 == 0)
{
qp.setPen(penText);
QString drText;
qp.drawText(margin.left() - 25, 4 + i * stepHeight +
margin.top(), QString("%1").arg(maxDb - dbInStep/2 * (i / 5)));
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(margin.left(), i * stepHeight + margin.top(),
width() - margin.right(),
i * stepHeight + margin.top());
i++;
}
i=0;
while ((i * stepWidth) < (width() - margin.left() - margin.right()))
{
for (int x = 1; x < 10; x++)
{
if (stepWidth * log10(x) + i * stepWidth +
margin.left() <= (width() - margin.right()))
{
if (x==1)
{
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(stepWidth * log10(x) + i * stepWidth + margin.left(),
margin.top(),
stepWidth * log10(x) + i * stepWidth + margin.left(),
height() - margin.bottom());
}
}
qp.setPen(penText);
QString drText;
qp.drawText(margin.left() + i * stepWidth - 4, height() - margin.bottom() + 20,
QString("%1").arg((int)(10 * pow(10, i))));
i++;
}
qp.setPen(QPen(QColor(255,255,255),1));
qp.drawLine(margin.left(), margin.top(), width() - margin.right(), margin.top());
qp.drawLine(margin.left(), margin.top(), margin.left(), height() - margin.bottom());
qp.drawLine(margin.left(), height() - margin.bottom(), width() - margin.right(),
height() - margin.bottom());
qp.drawLine(width() - margin.right(), margin.top(), width() - margin.right(),
height() - margin.bottom());
}
void FreqResponseWidget::resizeEvent(QResizeEvent*)
{
delete backbuffer;
backbuffer = new QPixmap(width(),height());
drawBackground();
update(0,0,width(),height());
}
void FreqResponseWidget::mouseMoveEvent(QMouseEvent *event)
{
QPoint point(event->pos());
if ((point.x() >= margin.left()) && (point.y() >= margin.top()) &&
(point.x() <= width() - margin.right()) &&
(point.y() <= height() - margin.bottom()))
{
infop = point;
QFontMetrics metrics(infoFont,this);
double Lvalue = maxDb - (double)(infop.y() - margin.top()) /
(double)stepHeight * (double)dbInStep / 10.0;
text = QString(tr("f: %1 Hz, k: %2 dB")).arg(
pow(10, 1 + (double)(infop.x() - margin.left()) /
(double)stepWidth), 4, 'f', 2).arg(Lvalue,4,'g',3);
QRect infor = metrics.tightBoundingRect(text);
infor.moveBottomLeft(infop);
infor.adjust(-5,-5,5,5);
update(backinfor);
update(infor);
backinfor = infor;
}
}
| 7,244
|
C++
|
.cpp
| 211
| 30.033175
| 86
| 0.643645
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,337
|
main.cpp
|
olegkapitonov_tubeAmp-Designer/src/main.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QApplication>
#include <QTranslator>
#include <QSettings>
#include <QDir>
#include "mainwindow.h"
#include "processor.h"
#include "player.h"
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QCoreApplication::setOrganizationName("Oleg Kapitonov");
QCoreApplication::setApplicationName("tubeAmp Designer");
QTranslator translator;
QDir appdir(a.applicationDirPath());
appdir.cdUp();
appdir.cd("share/tubeAmp Designer/translations");
translator.load("tAD_" + QLocale::system().name(), appdir.absolutePath());
a.installTranslator(&translator);
Player *playerInstance = new Player();
if (playerInstance->connectToJack() == 1)
{
QMessageBox::critical(nullptr, "Error!",
"Unable to connect to JACK server!");
exit(1);
}
Processor *processorInstance = new Processor(playerInstance->getSampleRate());
processorInstance->loadProfile(":/profiles/British Crunch.tapf");
playerInstance->setProcessor(processorInstance);
playerInstance->activate();
MainWindow w(nullptr, processorInstance, playerInstance);
w.showMaximized();
int retVal = a.exec();
delete playerInstance;
delete processorInstance;
int value = w.centralWidget->playerPanel->getInputLevelSliderValue();
QSettings settings;
settings.setValue("playerPanel/inputLevel", value - 50);
return retVal;
}
| 2,198
|
C++
|
.cpp
| 57
| 35.877193
| 80
| 0.734023
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,338
|
tameter.cpp
|
olegkapitonov_tubeAmp-Designer/src/tameter.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPainter>
#include <cmath>
#include "tameter.h"
TAMeter::TAMeter(QWidget *parent) : QWidget(parent)
{
value = -60.0;
}
void TAMeter::paintEvent(QPaintEvent *)
{
QPainter painter(this);
painter.fillRect(0, 0, width(), height(), QBrush(QColor(0, 0, 0)));
if (value < -60.0)
{
value = -60.0;
}
if (value > 0.0)
{
value = 0.0;
}
int barWidth = width() * (1.0 - (value / (-60.0)));
QLinearGradient barGrad(QPointF(0, 0), QPointF(width(), 0));
barGrad.setColorAt(0, QColor(0, 50, 0));
barGrad.setColorAt((1.0 - (-25.0 / (-60.0))), QColor(100, 150, 0));
barGrad.setColorAt((1.0 - (-20.0 / (-60.0))), QColor(200, 100, 0));
barGrad.setColorAt(1, QColor(255, 0, 0));
painter.fillRect(0, 0, barWidth, height(), barGrad);
}
void TAMeter::setValue(float v)
{
value = v;
if (value > 0.0) value = 0.0;
if (value < -60.0) value = -60.0;
repaint(0,0,-1,-1);
}
| 1,761
|
C++
|
.cpp
| 52
| 31.365385
| 80
| 0.653892
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,339
|
centralwidget.cpp
|
olegkapitonov_tubeAmp-Designer/src/centralwidget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QSizePolicy>
#include <QStyle>
#include <QApplication>
#include "centralwidget.h"
CentralWidget::CentralWidget(QWidget *parent, Processor *prc, Player *plr) :
QWidget(parent)
{
processor = prc;
player = plr;
QGridLayout *gbox = new QGridLayout(this);
QWidget *moduleSelectBar = new QWidget(this);
gbox->addWidget(moduleSelectBar, 0, 0, 1, 2);
moduleSelectBar->setSizePolicy(QSizePolicy::Fixed, QSizePolicy::Fixed);
QHBoxLayout *hbox = new QHBoxLayout(moduleSelectBar);
preAmpFilterButton = new QPushButton(tr("PreAmp Filter"), moduleSelectBar);
hbox->addWidget(preAmpFilterButton);
preAmpFilterButton->setCheckable(true);
preAmpFilterButton->setChecked(true);
connect(preAmpFilterButton, &QPushButton::clicked, this,
&CentralWidget::preAmpFilterButtonClicked);
preAmpParamsButton = new QPushButton(tr("PreAmp Parameters"), moduleSelectBar);
hbox->addWidget(preAmpParamsButton);
preAmpParamsButton->setCheckable(true);
connect(preAmpParamsButton, &QPushButton::clicked, this,
&CentralWidget::preAmpParamsButtonClicked);
toneStackButton = new QPushButton(tr("ToneStack"), moduleSelectBar);
hbox->addWidget(toneStackButton);
toneStackButton->setCheckable(true);
connect(toneStackButton, &QPushButton::clicked, this, &CentralWidget::toneStackButtonClicked);
powerAmpParamsButton = new QPushButton(tr("PowerAmp Parameters"), moduleSelectBar);
hbox->addWidget(powerAmpParamsButton);
powerAmpParamsButton->setCheckable(true);
connect(powerAmpParamsButton, &QPushButton::clicked, this,
&CentralWidget::powerAmpParamsButtonClicked);
cabSymButton = new QPushButton(tr("Cabinet"), moduleSelectBar);
hbox->addWidget(cabSymButton);
cabSymButton->setCheckable(true);
connect(cabSymButton, &QPushButton::clicked, this, &CentralWidget::cabSymButtonClicked);
playerPanel = new PlayerPanel(moduleSelectBar, player, processor);
hbox->addWidget(playerPanel);
centralArea = new QScrollArea(this);
preampFilterEditWidget = new PreampFilterEditWidget(this, processor);
activeBlockEdit = preampFilterEditWidget;
centralArea->setWidget(preampFilterEditWidget);
gbox->addWidget(centralArea, 1, 1, 1, 1);
tubeAmpPanel = new TubeAmpPanel(this, processor, player);
gbox->addWidget(tubeAmpPanel, 1, 0, 1, 1);
connect(tubeAmpPanel, &TubeAmpPanel::dialValueChanged, this, &CentralWidget::dialValueChanged);
connect(player, &Player::peakRMSValueCalculated, tubeAmpPanel,
&TubeAmpPanel::peakRMSValueChanged);
centralArea->installEventFilter(this);
}
void CentralWidget::preAmpFilterButtonClicked()
{
uncheckModuleSelectBar();
preAmpFilterButton->setChecked(true);
if (activeBlockEdit != preampFilterEditWidget)
{
preampFilterEditWidget = new PreampFilterEditWidget(this, processor);
adjustWidget(preampFilterEditWidget);
activeBlockEdit = preampFilterEditWidget;
centralArea->setWidget(preampFilterEditWidget);
}
}
void CentralWidget::preAmpParamsButtonClicked()
{
uncheckModuleSelectBar();
preAmpParamsButton->setChecked(true);
if (activeBlockEdit != preampNonlinearEditWidget)
{
preampNonlinearEditWidget = new PreampNonlinearEditWidget(this, processor);
adjustWidget(preampNonlinearEditWidget);
activeBlockEdit = preampNonlinearEditWidget;
centralArea->setWidget(preampNonlinearEditWidget);
}
}
void CentralWidget::toneStackButtonClicked()
{
uncheckModuleSelectBar();
toneStackButton->setChecked(true);
if (activeBlockEdit != tonestackEditWidget)
{
tonestackEditWidget = new TonestackEditWidget(this, processor);
adjustWidget(tonestackEditWidget);
activeBlockEdit = tonestackEditWidget;
centralArea->setWidget(tonestackEditWidget);
}
}
void CentralWidget::powerAmpParamsButtonClicked()
{
uncheckModuleSelectBar();
powerAmpParamsButton->setChecked(true);
if (activeBlockEdit != ampNonlinearEditWidget)
{
ampNonlinearEditWidget = new AmpNonlinearEditWidget(this, processor);
adjustWidget(ampNonlinearEditWidget);
activeBlockEdit = ampNonlinearEditWidget;
centralArea->setWidget(ampNonlinearEditWidget);
}
}
void CentralWidget::cabSymButtonClicked()
{
uncheckModuleSelectBar();
cabSymButton->setChecked(true);
if (activeBlockEdit != cabinetEditWidget)
{
cabinetEditWidget = new CabinetEditWidget(this, processor, player);
adjustWidget(cabinetEditWidget);
activeBlockEdit = cabinetEditWidget;
centralArea->setWidget(cabinetEditWidget);
}
}
void CentralWidget::uncheckModuleSelectBar()
{
preAmpFilterButton->setChecked(false);
preAmpParamsButton->setChecked(false);
toneStackButton->setChecked(false);
powerAmpParamsButton->setChecked(false);
cabSymButton->setChecked(false);
}
bool CentralWidget::eventFilter(QObject *o, QEvent *e)
{
if(o == centralArea && e->type() == QEvent::Resize)
{
adjustWidget(activeBlockEdit);
}
return false;
}
void CentralWidget::adjustWidget(QWidget *widget)
{
if (widget != NULL)
{
widget->setMinimumWidth(centralArea->width()
- qApp->style()->pixelMetric(QStyle::PM_ScrollBarExtent) - 5);
}
}
void CentralWidget::dialValueChanged()
{
if (activeBlockEdit == tonestackEditWidget)
{
activeBlockEdit->recalculate();
}
}
void CentralWidget::reloadBlocks()
{
tubeAmpPanel->resetControls();
activeBlockEdit->recalculate();
activeBlockEdit->resetControls();
}
void CentralWidget::updateBlocks()
{
tubeAmpPanel->resetControls();
activeBlockEdit->recalculate();
activeBlockEdit->updateControls();
}
| 6,389
|
C++
|
.cpp
| 173
| 33.820809
| 97
| 0.777688
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,340
|
convolver_dialog.cpp
|
olegkapitonov_tubeAmp-Designer/src/convolver_dialog.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QGridLayout>
#include <QHBoxLayout>
#include <QLabel>
#include <QFileDialog>
#include <sndfile.h>
#include <cmath>
#include "convolver_dialog.h"
#include "math_functions.h"
ConvolverDialog::ConvolverDialog(Processor *prc, QWidget *parent) : QDialog(parent)
{
processor = prc;
setMinimumWidth(600);
setWindowTitle(tr("FFT Convolver"));
QGridLayout *lay = new QGridLayout(this);
QLabel *inputLabel = new QLabel(tr("Input Signal"), this);
lay->addWidget(inputLabel, 0, 0, 1, 2);
inputLabel->setAlignment(Qt::AlignCenter);
QButtonGroup *inputGroup = new QButtonGroup(this);
inputCabinetRadioButton = new QRadioButton(
tr("Cabinet impulse response"), this);
inputGroup->addButton(inputCabinetRadioButton);
lay->addWidget(inputCabinetRadioButton, 1, 0, 1, 1);
inputFileRadioButton = new QRadioButton(tr("File"), this);
inputFileRadioButton->setChecked(true);
inputGroup->addButton(inputFileRadioButton);
lay->addWidget(inputFileRadioButton, 2, 0, 1, 1);
connect(inputGroup, QOverload<QAbstractButton *>::of(&QButtonGroup::buttonClicked),
this, &ConvolverDialog::inputGroupClicked);
inputFilenameEdit = new QLineEdit(this);
lay->addWidget(inputFilenameEdit, 3, 0, 1, 1);
inputFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(inputFilenameButton, 3, 1, 1, 1);
connect(inputFilenameButton, &QPushButton::clicked,
this, &ConvolverDialog::inputFilenameButtonClicked);
QLabel *IRLabel = new QLabel(tr("Impulse response file"), this);
lay->addWidget(IRLabel, 4, 0, 1, 2);
IRLabel->setAlignment(Qt::AlignCenter);
IRFilenameEdit = new QLineEdit(this);
lay->addWidget(IRFilenameEdit, 5, 0, 1, 1);
QPushButton *IRFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(IRFilenameButton, 5, 1, 1, 1);
connect(IRFilenameButton, &QPushButton::clicked,
this, &ConvolverDialog::IRFilenameButtonClicked);
QLabel *outputLabel = new QLabel(tr("Output Signal"), this);
lay->addWidget(outputLabel, 6, 0, 1, 2);
outputLabel->setAlignment(Qt::AlignCenter);
QButtonGroup *outputGroup = new QButtonGroup(this);
outputCabinetRadioButton = new QRadioButton(
tr("Cabinet impulse response"), this);
outputGroup->addButton(outputCabinetRadioButton);
lay->addWidget(outputCabinetRadioButton, 7, 0, 1, 1);
outputFileRadioButton = new QRadioButton(tr("File"), this);
outputFileRadioButton->setChecked(true);
outputGroup->addButton(outputFileRadioButton);
lay->addWidget(outputFileRadioButton, 8, 0, 1, 1);
connect(outputGroup, QOverload<QAbstractButton *>::of(&QButtonGroup::buttonClicked),
this, &ConvolverDialog::outputGroupClicked);
outputFilenameEdit = new QLineEdit(this);
lay->addWidget(outputFilenameEdit, 9, 0, 1, 1);
outputFilenameButton = new QPushButton(tr("Save"), this);
lay->addWidget(outputFilenameButton, 9, 1, 1, 1);
connect(outputFilenameButton, &QPushButton::clicked,
this, &ConvolverDialog::outputFilenameButtonClicked);
QWidget *buttonsContainer = new QWidget(this);
lay->addWidget(buttonsContainer, 10, 0, 1, 2);
QHBoxLayout *containerLay = new QHBoxLayout(buttonsContainer);
processButton = new QPushButton(tr("Process"), buttonsContainer);
containerLay->addWidget(processButton);
processButton->setMaximumWidth(80);
processButton->setEnabled(false);
connect(processButton, &QPushButton::clicked,
this, &ConvolverDialog::processButtonClicked);
QPushButton *closeButton = new QPushButton(tr("Close"), buttonsContainer);
containerLay->addWidget(closeButton);
closeButton->setMaximumWidth(80);
connect(closeButton, &QPushButton::clicked, this,
&ConvolverDialog::closeButtonClicked);
}
void ConvolverDialog::inputFilenameButtonClicked()
{
QString inputFileName = QFileDialog::getOpenFileName(this,
tr("Open Input File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!inputFileName.isEmpty())
{
inputFilenameEdit->setText(inputFileName);
}
checkSignals();
}
void ConvolverDialog::outputFilenameButtonClicked()
{
QString outputFileName = QFileDialog::getSaveFileName(this,
tr("Save Output File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!outputFileName.isEmpty())
{
outputFilenameEdit->setText(outputFileName);
}
checkSignals();
}
void ConvolverDialog::IRFilenameButtonClicked()
{
QString IRFileName = QFileDialog::getOpenFileName(this,
tr("Open IR File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!IRFileName.isEmpty())
{
IRFilenameEdit->setText(IRFileName);
}
checkSignals();
}
void ConvolverDialog::processButtonClicked()
{
QString inputFileName = inputFilenameEdit->text();
QString outputFileName = outputFilenameEdit->text();
QString IRFileName = IRFilenameEdit->text();
QVector<float> inputL;
QVector<float> inputR;
int inputSampleRate;
SF_INFO sfinfo;
SNDFILE *sndFile;
if (inputFileRadioButton->isChecked())
{
sfinfo.format = 0;
sndFile = sf_open(inputFileName.toUtf8().constData(), SFM_READ, &sfinfo);
if (sndFile != NULL)
{
inputSampleRate = sfinfo.samplerate;
QVector<float> tempBuffer(sfinfo.frames * sfinfo.channels);
sf_readf_float(sndFile, tempBuffer.data(), sfinfo.frames);
sf_close(sndFile);
inputL.resize(sfinfo.frames);
inputR.resize(sfinfo.frames);
for (int i = 0; i < sfinfo.frames * sfinfo.channels; i += sfinfo.channels)
{
float sumFrame = 0.0;
if (sfinfo.channels > 1)
{
for (int j = 1; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels - 1;
inputL[i / sfinfo.channels] = tempBuffer[i];
inputR[i / sfinfo.channels] = sumFrame;
}
else
{
inputL[i] = tempBuffer[i];
inputR[i] = tempBuffer[i];
}
}
}
else
{
return;
}
}
else
{
inputL = processor->getLeftImpulse();
inputR = processor->getRightImpulse();
inputSampleRate = processor->getSamplingRate();
}
sfinfo.format = 0;
QVector<float> IRL;
QVector<float> IRR;
int IRSampleRate;
sndFile = sf_open(IRFileName.toUtf8().constData(), SFM_READ, &sfinfo);
if (sndFile != NULL)
{
IRSampleRate = sfinfo.samplerate;
QVector<float> tempBuffer(sfinfo.frames * sfinfo.channels);
sf_readf_float(sndFile, tempBuffer.data(), sfinfo.frames);
sf_close(sndFile);
IRL.resize(sfinfo.frames);
IRR.resize(sfinfo.frames);
for (int i = 0; i < sfinfo.frames * sfinfo.channels; i += sfinfo.channels)
{
float sumFrame = 0.0;
if (sfinfo.channels > 1)
{
for (int j = 1; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels - 1;
IRL[i / sfinfo.channels] = tempBuffer[i];
IRR[i / sfinfo.channels] = sumFrame;
}
else
{
IRL[i] = tempBuffer[i];
IRR[i] = tempBuffer[i];
}
}
}
else
{
return;
}
int outputSampleRate;
if (inputSampleRate >= IRSampleRate)
{
outputSampleRate = inputSampleRate;
}
else
{
outputSampleRate = IRSampleRate;
}
inputL = resample_vector(inputL, inputSampleRate, outputSampleRate);
inputR = resample_vector(inputR, inputSampleRate, outputSampleRate);
IRL = resample_vector(IRL, IRSampleRate, outputSampleRate);
IRR = resample_vector(IRR, IRSampleRate, outputSampleRate);
float loadedCabinetImpulseEnergy = 0.0;
for (int i = 0; i < inputL.size(); i++)
{
loadedCabinetImpulseEnergy += pow(inputL[i], 2);
}
fft_convolver(inputL.data(), inputL.size(),
IRL.data(), IRL.size());
fft_convolver(inputR.data(), inputR.size(),
IRR.data(), IRR.size());
float cabinetImpulseEnergy = 0.0;
for (int i = 0; i < inputL.size(); i++)
{
cabinetImpulseEnergy += pow(inputL[i], 2);
}
float cabinetImpulseEnergyCoeff = sqrt(loadedCabinetImpulseEnergy) /
sqrt(cabinetImpulseEnergy);
for (int i = 0; i < inputL.size(); i++)
{
inputL[i] *= cabinetImpulseEnergyCoeff;
inputR[i] *= cabinetImpulseEnergyCoeff;
}
if (outputFileRadioButton->isChecked())
{
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = inputL.size();
sfinfo.samplerate = outputSampleRate;
sfinfo.channels = 2;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *outputFile = sf_open(outputFileName.toUtf8().constData(),
SFM_WRITE, &sfinfo);
if (outputFile != NULL)
{
QVector<float> tempBuffer(inputL.size() * 2);
for (int i = 0; i < (inputL.size() * 2 - 1); i += 2)
{
tempBuffer[i] = inputL[i / 2];
tempBuffer[i + 1] = inputR[i / 2];
}
sf_writef_float(outputFile, tempBuffer.data(), inputL.size());
sf_close(outputFile);
}
}
else
{
processor->setCabinetImpulse(inputL, inputR);
}
}
void ConvolverDialog::closeButtonClicked()
{
close();
}
void ConvolverDialog::inputGroupClicked(QAbstractButton *button)
{
if (button == inputCabinetRadioButton)
{
inputFilenameEdit->setEnabled(false);
inputFilenameButton->setEnabled(false);
}
else
{
inputFilenameEdit->setEnabled(true);
inputFilenameButton->setEnabled(true);
}
checkSignals();
}
void ConvolverDialog::outputGroupClicked(QAbstractButton *button)
{
if (button == outputCabinetRadioButton)
{
outputFilenameEdit->setEnabled(false);
outputFilenameButton->setEnabled(false);
}
else
{
outputFilenameEdit->setEnabled(true);
outputFilenameButton->setEnabled(true);
}
checkSignals();
}
void ConvolverDialog::checkSignals()
{
if (!((inputFilenameEdit->text().isEmpty() && inputFileRadioButton->isChecked()) ||
(outputFilenameEdit->text().isEmpty() && outputFileRadioButton->isChecked()) ||
IRFilenameEdit->text().isEmpty()))
{
processButton->setEnabled(true);
}
else
{
processButton->setEnabled(false);
}
}
| 10,870
|
C++
|
.cpp
| 322
| 29.503106
| 86
| 0.700029
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,341
|
load_dialog.cpp
|
olegkapitonov_tubeAmp-Designer/src/load_dialog.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QGridLayout>
#include <QHBoxLayout>
#include <QLabel>
#include <QFileDialog>
#include <QMessageBox>
#include "load_dialog.h"
LoadDialog::LoadDialog(QWidget *parent) : QDialog(parent)
{
setMinimumWidth(600);
setWindowTitle(tr("Open sound files"));
QGridLayout *lay = new QGridLayout(this);
QLabel *diLabel = new QLabel(tr("Direct Input from guitar"), this);
lay->addWidget(diLabel, 0, 0, 1, 2);
diFilenameEdit = new QLineEdit(this);
lay->addWidget(diFilenameEdit, 1, 0, 1, 1);
QPushButton *diFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(diFilenameButton, 1, 1, 1, 1);
connect(diFilenameButton, &QPushButton::clicked, this, &LoadDialog::diFilenameButtonClicked);
connect(diFilenameEdit, &QLineEdit::textEdited, this, &LoadDialog::lineEditEdited);
QLabel *refLabel = new QLabel(tr("Reference sound file"), this);
lay->addWidget(refLabel, 2, 0, 1, 2);
refFilenameEdit = new QLineEdit(this);
lay->addWidget(refFilenameEdit, 3, 0, 1, 1);
QPushButton *refFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(refFilenameButton, 3, 1, 1, 1);
connect(refFilenameButton, &QPushButton::clicked, this, &LoadDialog::refFilenameButtonClicked);
connect(refFilenameEdit, &QLineEdit::textEdited, this, &LoadDialog::lineEditEdited);
QWidget *buttonsContainer = new QWidget(this);
lay->addWidget(buttonsContainer, 4, 0, 1, 2);
QHBoxLayout *containerLay = new QHBoxLayout(buttonsContainer);
okButton = new QPushButton(tr("OK"), buttonsContainer);
containerLay->addWidget(okButton);
okButton->setMaximumWidth(80);
okButton->setEnabled(false);
connect(okButton, &QPushButton::clicked, this, &LoadDialog::okButtonClicked);
QPushButton *cancelButton = new QPushButton(tr("Cancel"), buttonsContainer);
containerLay->addWidget(cancelButton);
cancelButton->setMaximumWidth(80);
connect(cancelButton, &QPushButton::clicked, this, &LoadDialog::cancelButtonClicked);
}
void LoadDialog::diFilenameButtonClicked()
{
diFileName = QFileDialog::getOpenFileName(this, tr("Open DI File"),
QString(),
tr("Sound files (*.wav *.ogg *.flac)"));
if (!diFileName.isEmpty())
{
diFilenameEdit->setText(diFileName);
}
checkFilenames();
}
void LoadDialog::refFilenameButtonClicked()
{
refFileName = QFileDialog::getOpenFileName(this,
tr("Open Reference File"),
QString(),
tr("Sound files (*.wav *.ogg *.flac)"));
if (!refFileName.isEmpty())
{
refFilenameEdit->setText(refFileName);
}
checkFilenames();
}
void LoadDialog::cancelButtonClicked()
{
reject();
}
void LoadDialog::okButtonClicked()
{
if ((!QFile(diFileName).exists()) || (!QFile(refFileName).exists()))
{
QMessageBox::warning(this, tr("Attention!"), tr("DI or Reference file not found!"));
return;
}
accept();
}
QString LoadDialog::getDiFileName()
{
return diFileName;
}
QString LoadDialog::getRefFileName()
{
return refFileName;
}
void LoadDialog::checkFilenames()
{
if (!(diFilenameEdit->text().isEmpty() || refFilenameEdit->text().isEmpty()))
{
okButton->setEnabled(true);
}
}
void LoadDialog::lineEditEdited(QString)
{
checkFilenames();
}
| 4,223
|
C++
|
.cpp
| 113
| 32.902655
| 97
| 0.694268
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,342
|
mainwindow.cpp
|
olegkapitonov_tubeAmp-Designer/src/mainwindow.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QFileDialog>
#include <QMessageBox>
#include "mainwindow.h"
MainWindow::MainWindow(QWidget *parent, Processor *prc, Player *plr) :
QMainWindow(parent)
{
processor = prc;
player = plr;
resize(615, 410);
actionOpen = new QAction(QApplication::style()->standardIcon(
QStyle::SP_DialogOpenButton), "Open", this);
connect(actionOpen, &QAction::triggered, this, &MainWindow::actionOpenTriggered);
actionSave = new QAction(QApplication::style()->standardIcon(
QStyle::SP_DialogSaveButton), "Save", this);
connect(actionSave, &QAction::triggered, this, &MainWindow::actionSaveTriggered);
actionSave_As = new QAction(this);
connect(actionSave_As, &QAction::triggered, this, &MainWindow::actionSaveAsTriggered);
actionQuit = new QAction(QApplication::style()->standardIcon(
QStyle::SP_DialogCloseButton), "Close",this);
connect(actionQuit, &QAction::triggered, this, &MainWindow::actionQuitTriggered);
actionAbout = new QAction(QApplication::style()->standardIcon(
QStyle::SP_DialogHelpButton), "About",this);
connect(actionAbout, &QAction::triggered, this, &MainWindow::actionAboutTriggered);
actionProfiler = new QAction(this);
connect(actionProfiler, &QAction::triggered, this, &MainWindow::actionProfilerTriggered);
actionConvolver= new QAction(this);
connect(actionConvolver, &QAction::triggered, this, &MainWindow::actionConvolverTriggered);
actionDeconvolver = new QAction(this);
connect(actionDeconvolver, &QAction::triggered, this, &MainWindow::actionDeconvolverTriggered);
centralWidget = new CentralWidget(this, processor, player);
setCentralWidget(centralWidget);
menuBar = new QMenuBar(this);
menuBar->setGeometry(QRect(0, 0, 615, 30));
menuFile = new QMenu(menuBar);
menuTools = new QMenu(menuBar);
menuHelp = new QMenu(menuBar);
setMenuBar(menuBar);
statusBar = new QStatusBar(this);
setStatusBar(statusBar);
menuBar->addAction(menuFile->menuAction());
menuBar->addAction(menuTools->menuAction());
menuBar->addAction(menuHelp->menuAction());
menuFile->addAction(actionOpen);
menuFile->addAction(actionSave);
menuFile->addAction(actionSave_As);
menuFile->addSeparator();
menuFile->addAction(actionQuit);
menuTools->addAction(actionProfiler);
menuTools->addAction(actionConvolver);
menuTools->addAction(actionDeconvolver);
menuHelp->addAction(actionAbout);
setWindowTitle(QApplication::translate("MainWindow", "tubeAmp Designer", nullptr));
setWindowIcon(QIcon(":/icons/logo.png"));
actionOpen->setText(QApplication::translate("MainWindow", "Open", nullptr));
actionSave->setText(QApplication::translate("MainWindow", "Save", nullptr));
actionSave_As->setText(QApplication::translate("MainWindow", "Save As", nullptr));
actionQuit->setText(QApplication::translate("MainWindow", "Quit", nullptr));
actionAbout->setText(QApplication::translate("MainWindow", "About", nullptr));
actionProfiler->setText(QApplication::translate("MainWindow", "Profiler", nullptr));
actionConvolver->setText(QApplication::translate("MainWindow", "Convolver", nullptr));
actionDeconvolver->setText(QApplication::translate("MainWindow",
"Deconvolver", nullptr));
menuFile->setTitle(QApplication::translate("MainWindow", "File", nullptr));
menuTools->setTitle(QApplication::translate("MainWindow", "Tools", nullptr));
menuHelp->setTitle(QApplication::translate("MainWindow", "Help", nullptr));
profilerDialog = new ProfilerDialog(processor, player, centralWidget->playerPanel, this);
connect(profilerDialog, &ProfilerDialog::accepted,
this, &MainWindow::profilerDialogAccepted);
convolverDialog = new ConvolverDialog(processor, this);
deconvolverDialog = new DeconvolverDialog(processor, this);
}
void MainWindow::actionOpenTriggered()
{
QDir profilesDir(QCoreApplication::applicationDirPath());
profilesDir.cdUp();
profilesDir.cd("share/tubeAmp Designer/profiles");
QString newProfileFileName =
QFileDialog::getOpenFileName(this,
"Open profile file",
profilesDir.absolutePath(),
"tubeAmp profiles (*.tapf)");
if (!newProfileFileName.isEmpty())
{
Player::PlayerStatus rememberStatus = player->status;
player->setStatus(Player::PlayerStatus::PS_STOP);
processor->loadProfile(newProfileFileName);
setWindowTitle("tubeAmp Designer — " + QFileInfo(newProfileFileName).baseName());
centralWidget->reloadBlocks();
player->setStatus(rememberStatus);
}
}
void MainWindow::actionSaveTriggered()
{
if ((processor->isPreampCorrectionEnabled()) || (processor->isCabinetCorrectionEnabled()))
{
int ret = QMessageBox::warning(this, tr("Warning!"),
tr("Equalizer settings changed\n"
"and will be applied before saving!\n"
"This can't be undone!"),
QMessageBox::Save | QMessageBox::Cancel,
QMessageBox::Save);
if (ret == QMessageBox::Cancel)
{
return;
}
if (processor->isPreampCorrectionEnabled())
{
processor->applyPreampCorrection();
processor->resetPreampCorrection();
}
if (processor->isCabinetCorrectionEnabled())
{
processor->applyCabinetSumCorrection();
processor->resetCabinetSumCorrection();
}
centralWidget->reloadBlocks();
}
if (!processor->saveProfile(processor->getProfileFileName()))
{
QMessageBox::critical(this, tr("Error!"),
tr("Can't save profile!"));
actionSaveAsTriggered();
}
}
void MainWindow::actionSaveAsTriggered()
{
if ((processor->isPreampCorrectionEnabled()) || (processor->isCabinetCorrectionEnabled()))
{
int ret = QMessageBox::warning(this, tr("Warning!"),
tr("Equalizer settings changed\n"
"and will be applied before saving!\n"
"This can't be undone!"),
QMessageBox::Save | QMessageBox::Cancel,
QMessageBox::Save);
if (ret == QMessageBox::Cancel)
{
return;
}
if (processor->isPreampCorrectionEnabled())
{
processor->applyPreampCorrection();
processor->resetPreampCorrection();
}
if (processor->isCabinetCorrectionEnabled())
{
processor->applyCabinetSumCorrection();
processor->resetCabinetSumCorrection();
}
centralWidget->reloadBlocks();
}
QString saveProfileFileName =
QFileDialog::getSaveFileName(this,
"Save profile file",
QString(),
"tubeAmp profiles (*.tapf)");
if (!saveProfileFileName.isEmpty())
{
processor->saveProfile(saveProfileFileName);
setWindowTitle("tubeAmp Designer — " + QFileInfo(saveProfileFileName).baseName());
}
}
void MainWindow::actionProfilerTriggered()
{
profilerDialog->show();
}
void MainWindow::profilerDialogAccepted()
{
centralWidget->updateBlocks();
setWindowTitle("tubeAmp Designer — New Profile");
}
void MainWindow::actionQuitTriggered()
{
close();
}
void MainWindow::actionConvolverTriggered()
{
convolverDialog->open();
}
void MainWindow::actionDeconvolverTriggered()
{
deconvolverDialog->open();
}
void MainWindow::actionAboutTriggered()
{
QMessageBox::about(this, tr("About"),
tr("tubeAmp Designer - Virtual guitar"
" amplifier and profiler.\n2020 Oleg Kapitonov"));
}
| 8,553
|
C++
|
.cpp
| 206
| 35.150485
| 97
| 0.69012
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,343
|
tonestack_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/tonestack_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QVBoxLayout>
#include <cmath>
#include <gsl/gsl_complex.h>
#include <gsl/gsl_complex_math.h>
#include "tonestack_edit_widget.h"
float db2gain(float db)
{
return pow(10.0, db / 20.0);
}
TonestackEditWidget::TonestackEditWidget(QWidget *parent, Processor *prc) :
BlockEditWidget(parent)
{
processor = prc;
setFrameShape(QFrame::Panel);
QVBoxLayout *vbox = new QVBoxLayout(this);
QLabel *tonestackLabel = new QLabel(tr("Tonestack Frequency Response"), this);
vbox->addWidget(tonestackLabel);
tonestackLabel->setMaximumHeight(30);
tonestackLabel->setAlignment(Qt::AlignHCenter);
QFont tonestackLabelFont = tonestackLabel->font();
tonestackLabelFont.setPointSize(15);
tonestackLabel->setFont(tonestackLabelFont);
vbox->addWidget(tonestackLabel);
freqResponse = new FreqResponseWidget(this);
vbox->addWidget(freqResponse);
freqResponse->maxDb = 20.0;
bassFreqSlide = new SlideBoxWidget(tr("Bass Frequency"), 10.0, 500.0, this);
vbox->addWidget(bassFreqSlide);
connect(bassFreqSlide, &SlideBoxWidget::valueChanged, this,
&TonestackEditWidget::bassFreqChanged);
bassBandSlide = new SlideBoxWidget(tr("Bass Bandwidth"), 100.0, 1000.0, this);
vbox->addWidget(bassBandSlide);
connect(bassBandSlide, &SlideBoxWidget::valueChanged, this,
&TonestackEditWidget::bassBandChanged);
middleFreqSlide = new SlideBoxWidget(tr("Middle Frequency"), 100.0, 1000.0, this);
vbox->addWidget(middleFreqSlide);
connect(middleFreqSlide, &SlideBoxWidget::valueChanged, this,
&TonestackEditWidget::middleFreqChanged);
middleBandSlide = new SlideBoxWidget(tr("Middle Bandwidth"), 200.0, 5000.0, this);
vbox->addWidget(middleBandSlide);
connect(middleBandSlide, &SlideBoxWidget::valueChanged, this, &TonestackEditWidget::middleBandChanged);
trebleFreqSlide = new SlideBoxWidget(tr("Treble Frequency"), 2000.0, 20000.0, this);
vbox->addWidget(trebleFreqSlide);
connect(trebleFreqSlide, &SlideBoxWidget::valueChanged, this,
&TonestackEditWidget::trebleFreqChanged);
trebleBandSlide = new SlideBoxWidget(tr("Treble Bandwidth"), 3000.0, 40000.0, this);
vbox->addWidget(trebleBandSlide);
connect(trebleBandSlide, &SlideBoxWidget::valueChanged, this,
&TonestackEditWidget::trebleBandChanged);
recalculate();
resetControls();
}
void TonestackEditWidget::recalculate()
{
st_profile profile = processor->getProfile();
stControls controls = processor->getControls();
QVector<float> freqs(100);
for (int i = 0; i < freqs.size(); i++)
{
float fLog = (log10(20000.0) - log10(10.0))*(float)(i) / 99.0 + log10(10.0);
freqs[i] = pow(10, fLog);
}
freqResponse->fLogValues.resize(freqs.size());
freqResponse->dbValues.resize(freqs.size());
for (int i = 0; i < freqs.size(); i++)
{
freqResponse->fLogValues[i] = log10(freqs[i]);
gsl_complex j_w_norm = gsl_complex_rect(0, freqs[i] / profile.tonestack_low_freq);
gsl_complex B_norm = gsl_complex_rect(profile.tonestack_low_band /
profile.tonestack_low_freq, 0);
gsl_complex gain = gsl_complex_rect(db2gain(controls.low), 0);
gsl_complex A_numer = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(gsl_complex_mul(B_norm, gain), j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
gsl_complex A_denom = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(B_norm, j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
gsl_complex A_low = gsl_complex_div(A_numer, A_denom);
j_w_norm = gsl_complex_rect(0, freqs[i] / profile.tonestack_middle_freq);
B_norm = gsl_complex_rect(profile.tonestack_middle_band / profile.tonestack_middle_freq, 0);
gain = gsl_complex_rect(db2gain(controls.middle), 0);
A_numer = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(gsl_complex_mul(B_norm, gain), j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
A_denom = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(B_norm, j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
gsl_complex A_middle = gsl_complex_div(A_numer, A_denom);
j_w_norm = gsl_complex_rect(0, freqs[i] / profile.tonestack_high_freq);
B_norm = gsl_complex_rect(profile.tonestack_high_band / profile.tonestack_high_freq, 0);
gain = gsl_complex_rect(db2gain(controls.high), 0);
A_numer = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(gsl_complex_mul(B_norm, gain), j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
A_denom = gsl_complex_add(
gsl_complex_add(
gsl_complex_mul(j_w_norm, j_w_norm),
gsl_complex_mul(B_norm, j_w_norm)
),
gsl_complex_rect(1.0, 0.0)
);
gsl_complex A_treble = gsl_complex_div(A_numer, A_denom);
float A = gsl_complex_abs(A_low) * gsl_complex_abs(A_middle) * gsl_complex_abs(A_treble);
freqResponse->dbValues[i] = 20.0 * log10(A);
}
freqResponse->update(0,0,width(),height());
}
void TonestackEditWidget::bassFreqChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_low_freq = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::middleFreqChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_middle_freq = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::trebleFreqChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_high_freq = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::bassBandChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_low_band = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::middleBandChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_middle_band = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::trebleBandChanged(float value)
{
st_profile profile = processor->getProfile();
profile.tonestack_high_band = value;
processor->setProfile(profile);
recalculate();
}
void TonestackEditWidget::resetControls()
{
st_profile profile = processor->getProfile();
bassFreqSlide->setValue(profile.tonestack_low_freq);
bassBandSlide->setValue(profile.tonestack_low_band);
middleBandSlide->setValue(profile.tonestack_middle_band);
middleFreqSlide->setValue(profile.tonestack_middle_freq);
trebleBandSlide->setValue(profile.tonestack_high_band);
trebleFreqSlide->setValue(profile.tonestack_high_freq);
}
| 7,709
|
C++
|
.cpp
| 197
| 35.040609
| 105
| 0.714247
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,344
|
nonlinear_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/nonlinear_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPainter>
#include <QPaintEvent>
#include <QPolygon>
#include <gsl/gsl_spline.h>
#include "nonlinear_widget.h"
NonlinearWidget::NonlinearWidget(QWidget *parent) :
QWidget(parent)
{
QPalette Pal(palette());
Pal.setColor(QPalette::Background, Qt::black);
setAutoFillBackground(true);
setPalette(Pal);
setMaximumHeight(400);
setMinimumHeight(400);
backbuffer = new QPixmap(100, 100);
setMouseTracking(true);
margin.setLeft(60);
margin.setRight(20);
margin.setTop(40);
margin.setBottom(50);
maxIn = 3.0;
maxOut = 2.0;
for (int i = 0; i < 12; i++)
{
inValues.append(i * 2.0 / 10.0 - 1.0);
outValues.append(i * 2.0 / 10.0 - 1.0);
}
}
void NonlinearWidget::paintEvent(QPaintEvent * event)
{
QPainter qp(this);
for (QRegion::const_iterator rects = event->region().begin();
rects!= event->region().end(); rects++)
{
QRect r = *rects;
qp.drawPixmap(r,*backbuffer,r);
}
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, inValues.count());
gsl_spline_init (spline, inValues.data(), outValues.data(), inValues.count());
int graphWidth = width() - margin.left() - margin.right();
QPolygon polyline1(graphWidth);
for (int i=0; i < graphWidth; i++)
{
double interpolatedIn = (i - graphWidth / 2.0) / (graphWidth / 2.0) * maxIn;
double interpolatedOut = gsl_spline_eval(spline, interpolatedIn, acc);
int x = (1.0 + interpolatedIn / maxIn) * stepWidth * 20.0 / 2.0;
int y = (1.0 - interpolatedOut / maxOut) * stepHeight * 20.0 / 2.0;
if (y > (height() - margin.bottom() - margin.top()))
{
y = height() - margin.bottom() - margin.top();
}
if (y < 0)
{
y = 0;
}
polyline1.setPoint(i, x + margin.left(), y + margin.top());
}
gsl_spline_free(spline);
gsl_interp_accel_free(acc);
qp.setPen(QPen(QColor(0,255,0)));
qp.drawPolyline(polyline1);
qp.setPen(QPen(QColor(255,255,255)));
infoFont = QFont("Sans",12);
qp.setFont(infoFont);
qp.drawText(infop,text);
}
void NonlinearWidget::drawBackground()
{
QPainter qp(backbuffer);
qp.fillRect(0,0,width(),height(),QBrush(QColor(0,0,0)));
QPen penThin(QColor(80,80,80));
QPen penBold(QColor(150,150,150),1);
QPen penZero(QColor(150,150,150),3);
QPen penText(QColor(255,255,255));
QFontMetrics fm(QFont("Sans",10));
qp.setFont(QFont("Sans",10));
qp.setPen(penText);
QString inputLevelString = tr("Input Level");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
int pixelsWide = fm.horizontalAdvance(inputLevelString);
#else
int pixelsWide = fm.boundingRect(inputLevelString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, height() - 8, inputLevelString);
QString nonlinearFunctionString = tr("Nonlinear Function");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(nonlinearFunctionString);
#else
pixelsWide = fm.boundingRect(nonlinearFunctionString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, 20, nonlinearFunctionString);
qp.save();
qp.rotate(-90);
QString outputLevelString = tr("Output Level");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(outputLevelString);
#else
pixelsWide = fm.boundingRect(outputLevelString).width();
#endif
qp.drawText(-pixelsWide / 2 - height() / 2, 12, outputLevelString);
qp.restore();
stepWidth = (width() - margin.left() - margin.right()) / 20;
stepHeight = (height() - margin.top() - margin.bottom()) / 20;
int i=0;
while (i*stepHeight < height()-margin.top()-margin.bottom())
{
if (i % 5 == 0)
{
qp.setPen(penText);
QString drText;
qp.drawText(margin.left() - 35, 4 + i * stepHeight +
margin.top(), QString("%1").arg(maxOut -
(double)i / 20.0 * 2.0 * maxOut, 2, 'f', 2));
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(margin.left(), i * stepHeight + margin.top(), width() - margin.right(),
i * stepHeight + margin.top());
i++;
}
i=0;
while ((i * stepWidth) < (width() - margin.left() - margin.right()))
{
if (i % 5 == 0)
{
qp.setPen(penText);
QString drText;
qp.drawText(-4 + i * stepWidth + margin.left(), height() - margin.bottom() + 20,
QString("%1").arg(-maxIn + (double)i / 20.0 * 2.0 * maxIn, 2, 'f', 2));
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(i*stepWidth+margin.left(),
margin.top(),
i * stepWidth + margin.left(),
height() - margin.bottom());
i++;
}
qp.setPen(penZero);
qp.drawLine(margin.left() + 2, 10 * stepHeight + margin.top(),
width() - margin.right() - 2,
10 * stepHeight + margin.top());
qp.drawLine(10 * stepWidth + margin.left(),
margin.top() + 2,
10 * stepWidth + margin.left(),
height() - margin.bottom() - 2);
qp.setPen(QPen(QColor(255,255,255),1));
qp.drawLine(margin.left(), margin.top(), width() - margin.right(), margin.top());
qp.drawLine(margin.left(), margin.top(), margin.left(), height() - margin.bottom());
qp.drawLine(margin.left(), height() - margin.bottom(),
width() - margin.right(), height() - margin.bottom());
qp.drawLine(width() - margin.right(), margin.top(),
width() - margin.right(), height() - margin.bottom());
}
void NonlinearWidget::resizeEvent(QResizeEvent*)
{
delete backbuffer;
backbuffer = new QPixmap(width(),height());
drawBackground();
update(0,0,width(),height());
}
void NonlinearWidget::mouseMoveEvent(QMouseEvent *event)
{
QPoint point(event->pos());
if ((point.x() >= margin.left()) && (point.y() >= margin.top()) &&
(point.x() <= width() - margin.right()) &&
(point.y() <= height() - margin.bottom()))
{
infop = point;
QFontMetrics metrics(infoFont,this);
int graphWidth = width() - margin.left() - margin.right();
int graphHeight = stepHeight * 20;
double inValue = (point.x() - margin.left() - graphWidth / 2.0) /
(graphWidth / 2.0) * maxIn;
double outValue = -(point.y() - margin.right() - graphHeight / 2.0) /
(graphHeight / 2.0) * maxOut;
text = QString(tr("In: %1, Out: %2")).arg(inValue, 4, 'f', 2).arg(
outValue, 4, 'f', 2);
QRect infor = metrics.tightBoundingRect(text);
infor.moveBottomLeft(infop);
infor.adjust(-5,-5,5,5);
update(backinfor);
update(infor);
backinfor = infor;
}
}
| 7,397
|
C++
|
.cpp
| 211
| 30.881517
| 87
| 0.642917
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,345
|
preamp_nonlinear_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/preamp_nonlinear_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QFrame>
#include <QVBoxLayout>
#include <QWidget>
#include <QSizePolicy>
#include <QSlider>
#include <cmath>
#include "preamp_nonlinear_edit_widget.h"
#include "freq_response_widget.h"
PreampNonlinearEditWidget::PreampNonlinearEditWidget(QWidget *parent, Processor *prc) :
BlockEditWidget(parent)
{
processor = prc;
setFrameShape(QFrame::Panel);
QVBoxLayout *vbox = new QVBoxLayout(this);
QLabel *preampNonlinearLabel = new QLabel(tr("Preamp Nonlinear Function"), this);
vbox->addWidget(preampNonlinearLabel);
preampNonlinearLabel->setMaximumHeight(30);
preampNonlinearLabel->setAlignment(Qt::AlignHCenter);
QFont preampNonlinearLabelFont = preampNonlinearLabel->font();
preampNonlinearLabelFont.setPointSize(15);
preampNonlinearLabel->setFont(preampNonlinearLabelFont);
vbox->addWidget(preampNonlinearLabel);
nonlinear = new NonlinearWidget(this);
vbox->addWidget(nonlinear);
biasSlide = new SlideBoxWidget(tr("Bias"), -1.0, 1.0, this);
vbox->addWidget(biasSlide);
connect(biasSlide, &SlideBoxWidget::valueChanged, this,
&PreampNonlinearEditWidget::biasChanged);
uporSlide = new SlideBoxWidget(tr("Saturation Level"), 0.1, 2.0, this);
vbox->addWidget(uporSlide);
connect(uporSlide, &SlideBoxWidget::valueChanged, this,
&PreampNonlinearEditWidget::uporChanged);
kregSlide = new SlideBoxWidget(tr("Saturation Hard/Soft"), 0.0, 10.0, this);
vbox->addWidget(kregSlide);
connect(kregSlide, &SlideBoxWidget::valueChanged, this,
&PreampNonlinearEditWidget::kregChanged);
levelSlide = new SlideBoxWidget(tr("Gain Level"), -46.0, 10.0, this);
vbox->addWidget(levelSlide);
connect(levelSlide, &SlideBoxWidget::valueChanged, this,
&PreampNonlinearEditWidget::levelChanged);
resetControls();
recalculate();
}
void PreampNonlinearEditWidget::recalculate()
{
st_profile profile = processor->getProfile();
nonlinear->inValues.resize(1000);
nonlinear->outValues.resize(1000);
for (int i = 0; i < nonlinear->inValues.size(); i++)
{
nonlinear->inValues[i] = -3.0 + (float)i / (nonlinear->inValues.size() - 1) * 6.0;
nonlinear->outValues[i] = processor->tube(nonlinear->inValues[i],
profile.preamp_Kreg,
profile.preamp_Upor,
profile.preamp_bias,
-profile.preamp_Upor);
}
nonlinear->maxIn = 3.0;
nonlinear->maxOut = 2.0;
nonlinear->drawBackground();
nonlinear->update(0,0,width(),height());
}
void PreampNonlinearEditWidget::biasChanged(float value)
{
st_profile profile = processor->getProfile();
profile.preamp_bias = value;
processor->setProfile(profile);
recalculate();
}
void PreampNonlinearEditWidget::uporChanged(float value)
{
st_profile profile = processor->getProfile();
profile.preamp_Upor = value;
processor->setProfile(profile);
recalculate();
}
void PreampNonlinearEditWidget::kregChanged(float value)
{
st_profile profile = processor->getProfile();
profile.preamp_Kreg = value;
processor->setProfile(profile);
recalculate();
}
void PreampNonlinearEditWidget::levelChanged(float value)
{
st_profile profile = processor->getProfile();
profile.preamp_level = pow(10.0, value / 20.0);
processor->setProfile(profile);
recalculate();
}
void PreampNonlinearEditWidget::resetControls()
{
st_profile profile = processor->getProfile();
biasSlide->setValue(profile.preamp_bias);
levelSlide->setValue(20.0 * log10(profile.preamp_level));
kregSlide->setValue(profile.preamp_Kreg);
uporSlide->setValue(profile.preamp_Upor);
}
void PreampNonlinearEditWidget::updateControls()
{
resetControls();
}
| 4,638
|
C++
|
.cpp
| 121
| 34.024793
| 87
| 0.719349
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,346
|
tadial.cpp
|
olegkapitonov_tubeAmp-Designer/src/tadial.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPainter>
#include <math.h>
#include "tadial.h"
TADial::TADial(QWidget *parent) : QWidget(parent)
{
dialSize = 80;
dialLeft = width() / 2 - dialSize / 2;
dialTop = 0;
dialMaxAngle = 140;
setMinimumWidth(85);
value = 0;
startValue = 0;
}
void TADial::paintEvent(QPaintEvent *)
{
QPainter painter(this);
painter.setRenderHint(QPainter::Antialiasing);
painter.setBrush(QBrush(QColor(200,200,200)));
painter.drawEllipse(dialLeft + 10, dialTop + 10, dialSize - 20, dialSize - 20);
painter.setBrush(QBrush(QColor(0,0,0)));
painter.drawEllipse(dialLeft + 20, dialTop + 20, dialSize - 40, dialSize - 40);
int pointerAngle = value/100.0*(dialMaxAngle * 2.0) - dialMaxAngle;
painter.translate(width() / 2, height() / 2);
painter.rotate(pointerAngle);
painter.setPen(QPen(QColor(0,0,0)));
painter.setBrush(QBrush(QColor(255,255,255)));
painter.drawRect(-5,-dialSize / 2,10,20);
painter.setPen(QPen(QColor(2.54*value,127*(1.0-value/100.0),
(1.0-2.0*fabs(0.5-value/100.0))*254)));
painter.drawLine(-5,-dialSize / 2 + 7,5,-dialSize / 2 + 7);
painter.drawLine(-5,-dialSize / 2 + 10,5,-dialSize / 2 + 10);
painter.drawLine(-5,-dialSize / 2 + 13,5,-dialSize / 2 + 13);
}
void TADial::mouseMoveEvent(QMouseEvent *event)
{
QPoint pos = event->pos();
value = startValue + startScroll.y()-pos.y();
if (value<0)
{
value = 0;
}
else if (value>100)
{
value = 100;
}
repaint(0,0,-1,-1);
emit valueChanged(value);
}
void TADial::mousePressEvent(QMouseEvent *event)
{
startScroll = event->pos();
startValue = value;
}
void TADial::resizeEvent(QResizeEvent *)
{
dialLeft = width() / 2 - dialSize / 2;
dialTop = height() / 2 - dialSize / 2;
}
void TADial::setValue(int v)
{
value = v;
if (value > 100) value = 100;
if (value < 0) value = 0;
repaint(0,0,-1,-1);
}
| 2,731
|
C++
|
.cpp
| 83
| 30.012048
| 81
| 0.67364
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,347
|
math_functions.cpp
|
olegkapitonov_tubeAmp-Designer/src/math_functions.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QScopedPointer>
#include <gsl/gsl_complex_math.h>
#include <gsl/gsl_multifit.h>
#include <gsl/gsl_spline.h>
#include <fftw3.h>
#include <cstring>
#include "math_functions.h"
#include <zita-resampler/resampler.h>
// Function converts amplitude-frequency response
// to time domain impulse response.
void frequency_response_to_impulse_response(double w_in[],
double A_in[],
int n_count,
float IR[],
int IR_n_count,
int rate)
{
// Additional high frequency point.
// Needed for better interpolation
// at high frequency edge of the input response
double far_point = A_in[n_count - 1]/100.0;
double far_point_w = w_in[n_count - 1]*10.0;
n_count++;
// Convert frequency response to symmetrical form
// (with negative frequencies)
// [0, w_max] => [-w_max, w_max]
QVector<double> A_sym(n_count * 2);
QVector<double> w_sym(n_count * 2);
// Mirror positive frequency values
// to negative frequency values
A_sym[n_count*2 - 1] = log(far_point);
A_sym[0] = log(far_point);
w_sym[n_count*2 - 1] = log10(far_point_w);
w_sym[0] = -log10(far_point_w);
for (int i = 1; i < n_count; i++)
{
A_sym[i + n_count - 1] = log(A_in[i - 1]);
A_sym[-i - 1 + n_count + 1] = log(A_in[i - 1]);
w_sym[i + n_count - 1] = log10(w_in[i - 1]);
w_sym[-i - 1 + n_count + 1] = -log10(w_in[i - 1]);
}
double specrum_freq_step = rate / IR_n_count;
// To perform this conversion, we must calculate
// minimum phase response by Hilbert transform
// of log(A(w))
// hilbert_count is count of frequency points in interpolated spectrum
// which will be used as input for Hilbert transform
int hilbert_count = 2 * floor(far_point_w / 2.0 / M_PI / specrum_freq_step);
// Use GSL spline interpolator
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, n_count*2);
gsl_spline_init (spline, w_sym.data(), A_sym.data(), n_count*2);
// Interpolated log(A(w)) response
QVector<double> Alog(hilbert_count);
QVector<double> ww(hilbert_count);
// Perform interpolation
for (int i = 0; i < hilbert_count; i++)
{
ww[i] = 2.0*M_PI*(-specrum_freq_step * hilbert_count / 2.0 +
specrum_freq_step * hilbert_count / 2.0 *2.0 / (double)(hilbert_count) * i);
if (ww[i] < 0)
{
Alog[i] = gsl_spline_eval(spline, -log10(-ww[i]), acc);
}
else if (ww[i] != 0)
{
Alog[i] = gsl_spline_eval(spline, log10(ww[i]), acc);
}
if (ww[i] == 0.0)
{
Alog[i] = gsl_spline_eval(spline, 0.0, acc);
}
}
gsl_spline_free (spline);
gsl_interp_accel_free (acc);
// Frequency response
QVector<double> F(hilbert_count);
// Perform Hilbert transform in frequency domain.
// For this:
// 1. Perform FFT of input interpolated log(A(w)) response
QVector<s_fftw_complex> out(hilbert_count/2+1);
fftw_plan p,p1;
p = fftw_plan_dft_r2c_1d(hilbert_count, Alog.data(),
(double (*)[2])out.data(), FFTW_ESTIMATE);
fftw_execute(p);
// 2. Perform Hilbert transform (swap real and imaginary parts)
out[0].real = 0.0;
out[0].imagine = 0.0;
out[hilbert_count/2].real = 0.0;
out[hilbert_count/2].imagine = 0.0;
for (int i=1;i<hilbert_count/2;i++)
{
double a,b;
a = out[i].real;
b = out[i].imagine;
out[i].imagine = a;
out[i].real = -b;
}
// 3. Perform inverse FFT
p1 = fftw_plan_dft_c2r_1d(hilbert_count, (double (*)[2])out.data(),
F.data(), FFTW_ESTIMATE);
fftw_execute(p1);
fftw_destroy_plan(p);
fftw_destroy_plan(p1);
// 4. Normalize result, get minimum phase response (F[])
QVector<double> F_interp(hilbert_count/2);
for (int i=hilbert_count/2;i<hilbert_count;i++)
{
F_interp[i-hilbert_count/2] = F[i] / hilbert_count; // !!!!!!!!!!!!!!!!!!!!!!!!
}
// Construct spectrum in complex form
// from amplitude and phase responses
int spectrum_count = IR_n_count / 2 + 1;
QVector<s_fftw_complex> spectrum(spectrum_count);
for (int i=1; i<spectrum_count;i++)
{
gsl_complex A = gsl_complex_polar(exp(Alog[i+hilbert_count/2]), F_interp[i]);
spectrum[i].real = GSL_REAL(A);
spectrum[i].imagine = GSL_IMAG(A);
}
// Kill constant component
spectrum[0].real = 0.0;
spectrum[0].imagine = 0.0;
// Get output impulse response from spectrum by inverse FFT
QVector<double> IR_internal(IR_n_count);
p1 = fftw_plan_dft_c2r_1d(IR_n_count, (double (*)[2])spectrum.data(),
IR_internal.data(), FFTW_ESTIMATE);
fftw_execute(p1);
fftw_destroy_plan(p1);
// Calculated frequency response is not accurate.
// This may lead to problems in impulse response -
// it will start at time t < 0 instead of t = 0
// We have no negative time in IR_internal buffer,
// so n first samples of the responce will be placed
// at the end of the response (inverse FFT has that effect).
// We must return this samples back to the beginning
// by circular rotating of the buffer.
// For this:
// 1. Find maximum sample value
double max_sample = 0.0;
for (int i = 0; i < IR_n_count; i++)
{
if (fabs(IR_internal[i]) > max_sample)
{
max_sample = fabs(IR_internal[i]);
}
}
// 2. Find number of sample at which impulse response starts.
// At this sample RMS amplitude will jump sharply
// to the value near max_sample
int impulse_start_sample = 0;
for (int i = (IR_n_count - 1); i >= 4; i--)
{
double rms = sqrt((pow(
IR_internal[i], 2) +
pow(IR_internal[i - 1], 2) +
pow(IR_internal[i - 2], 2) +
pow(IR_internal[i - 3], 2) +
pow(IR_internal[i - 4], 2)
) / 5.0);
if (rms < max_sample / 100.0)
{
impulse_start_sample = i;
break;
}
}
// 3. Perform circular rotation and normalization
for (int i = impulse_start_sample; i < IR_n_count; i++)
{
IR[i - impulse_start_sample] = IR_internal[i] / IR_n_count;
}
for (int i = 0; i < impulse_start_sample; i++)
{
IR[i + IR_n_count - impulse_start_sample] = IR_internal[i] / IR_n_count;
}
}
// Calculates convolution of signal and impulse response
// in frequency domain
void fft_convolver(float signal[], int signal_n_count, float impulse_response[],
int ir_n_count)
{
int n_count;
// Signal and impulse responce must have the same length,
// so we choose maximum length from both and extend
// shorter impulse response (or signal)
if (signal_n_count >= ir_n_count)
{
n_count = signal_n_count;
}
else
{
n_count = ir_n_count;
}
QVector<double> signal_double(n_count);
for (int i = 0; i < signal_n_count; i++)
{
signal_double[i] = signal[i];
}
for (int i = signal_n_count; i < n_count; i++)
{
signal_double[i] = 0.0;
}
// Get spectrum of the signal
QVector<s_fftw_complex> signal_spectrum(n_count / 2 + 1);
fftw_plan p;
p = fftw_plan_dft_r2c_1d(n_count, signal_double.data(),
(double (*)[2])signal_spectrum.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
// Extend impulse response to signal length if needed
QVector<double> impulse_response_double(n_count);
for (int i = 0; i < ir_n_count; i++)
{
impulse_response_double[i] = impulse_response[i];
}
for (int i = ir_n_count; i < n_count; i++)
{
impulse_response_double[i] = 0.0;
}
// Get spectrum of the frequency response
QVector<s_fftw_complex> impulse_response_spectrum(n_count / 2 + 1);
p = fftw_plan_dft_r2c_1d(n_count, impulse_response_double.data(),
(double (*)[2])impulse_response_spectrum.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
// Perform convolution in frequency domain
// result = signal * impulse_response
for (int i = 0; i < n_count / 2 + 1; i++)
{
gsl_complex signal_A = gsl_complex_rect(signal_spectrum[i].real,
signal_spectrum[i].imagine);
gsl_complex impulse_response_A = gsl_complex_rect(impulse_response_spectrum[i].real,
impulse_response_spectrum[i].imagine);
gsl_complex result_A = gsl_complex_mul(signal_A, impulse_response_A);
signal_spectrum[i].real = GSL_REAL(result_A);
signal_spectrum[i].imagine = GSL_IMAG(result_A);
}
// Perform inverse FFT, get output signal
p = fftw_plan_dft_c2r_1d(n_count, (double (*)[2])signal_spectrum.data(),
signal_double.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
// Normalize output signal
for (int i = 0; i < signal_n_count; i++)
{
signal[i] = signal_double[i] / n_count;
}
}
// Recreates impulse response
// from test signal (signal_a)
// and response signal (signal_c)
// in frequency domain.
// Filters result by lowpass and hipass
void fft_deconvolver(float signal_a[],
int signal_a_n_count,
float signal_c[],
int signal_c_n_count,
float impulse_response[],
int ir_n_count,
float lowcut_relative_frequency,
float highcut_relative_frequency,
float noisegate_threshold_db
)
{
int n_count = signal_c_n_count;
QVector<double> signal_c_double(n_count);
for (int i = 0; i < n_count; i++)
{
signal_c_double[i] = signal_c[i];
}
// Calculate response signal spectrum
QVector<s_fftw_complex> signal_c_spectrum(n_count / 2 + 1);
fftw_plan p;
p = fftw_plan_dft_r2c_1d(n_count, signal_c_double.data(),
(double (*)[2])signal_c_spectrum.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
QVector<double> signal_a_double(n_count);
for (int i = 0; i < signal_a_n_count; i++)
{
signal_a_double[i] = signal_a[i];
}
for (int i = signal_a_n_count; i < n_count; i++)
{
signal_a_double[i] = 0.0;
}
// Calculate test signal frequency
QVector<s_fftw_complex> signal_a_spectrum(n_count / 2 + 1);
p = fftw_plan_dft_r2c_1d(n_count, signal_a_double.data(),
(double (*)[2])signal_a_spectrum.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
QVector<double> impulse_response_double(n_count);
QVector<s_fftw_complex> impulse_response_spectrum(n_count / 2 + 1);
// Perform deconvolution in frequency domain
// impulse_response = signal_c / signal_a
for (int i = 0; i < n_count / 2 + 1; i++)
{
gsl_complex signal_a_A = gsl_complex_rect(signal_a_spectrum[i].real,
signal_a_spectrum[i].imagine);
gsl_complex signal_c_A = gsl_complex_rect(signal_c_spectrum[i].real,
signal_c_spectrum[i].imagine);
gsl_complex impulse_response_A = gsl_complex_div(signal_c_A, signal_a_A);
impulse_response_spectrum[i].real = GSL_REAL(impulse_response_A);
impulse_response_spectrum[i].imagine = GSL_IMAG(impulse_response_A);
}
// Kill constant component
impulse_response_spectrum[0].real = 0.0;
impulse_response_spectrum[0].imagine = 0.0;
// Perform lowpass and hipass filtering
for (int i = 1; i < impulse_response_spectrum.size(); i++)
{
double lowcut_T = 1.0 / (2.0 * M_PI * lowcut_relative_frequency);
double highcut_T = 1.0 / (2.0 * M_PI * highcut_relative_frequency);
gsl_complex jw = gsl_complex_rect(0.0, 2 * M_PI * 0.5 * (double)i /
impulse_response_spectrum.size());
gsl_complex highcut_A = gsl_complex_div(gsl_complex_rect(1.0, 0.0),
gsl_complex_add(gsl_complex_mul(jw, gsl_complex_rect(highcut_T, 0.0)),
gsl_complex_rect(1.0,0.0))
);
gsl_complex lowcut_A = gsl_complex_div(gsl_complex_mul(gsl_complex_rect(lowcut_T, 0.0),
jw), gsl_complex_add(gsl_complex_mul(jw, gsl_complex_rect(lowcut_T, 0.0)),
gsl_complex_rect(1.0,0.0))
);
gsl_complex freq = gsl_complex_rect(impulse_response_spectrum[i].real,
impulse_response_spectrum[i].imagine);
for (int j = 0; j < 7; j++)
{
freq = gsl_complex_mul(freq, lowcut_A);
freq = gsl_complex_mul(freq, highcut_A);
}
impulse_response_spectrum[i].real = GSL_REAL(freq);
impulse_response_spectrum[i].imagine = GSL_IMAG(freq);
}
// Perform inverse FFT, get impulse response
p = fftw_plan_dft_c2r_1d(n_count, (double (*)[2])impulse_response_spectrum.data(),
impulse_response_double.data(), FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
QVector<float> IR_internal(ir_n_count);
// Normalize impulse response and apply noise gate
float irMax = 0.0;
for (int i = 0; i < ir_n_count; i++)
{
IR_internal[i] = impulse_response_double[i] / n_count;
if (fabs(IR_internal[i]) > irMax)
{
irMax = fabs(IR_internal[i]);
}
}
// Apply noisegate
float noisegate_threshold = pow(10.0, noisegate_threshold_db / 20.0);
float noiseGateThresholdOff = 0.9 * irMax * noisegate_threshold;
float noiseGateThresholdOn = 1.1 * irMax * noisegate_threshold;
bool noiseGateOn = false;
for (int i = 0; i < ir_n_count; i++)
{
if (fabs(IR_internal[i]) <= noiseGateThresholdOff)
{
noiseGateOn = false;
}
if (fabs(IR_internal[i]) >= noiseGateThresholdOn)
{
noiseGateOn = true;
}
if (!noiseGateOn)
{
IR_internal[i] = 0.0;
}
}
// Calculated frequency response is not accurate.
// This may lead to problems in impulse response -
// it will start at time t < 0 instead of t = 0
// We have no negative time in IR_internal buffer,
// so n first samples of the responce will be placed
// at the end of the response (inverse FFT has that effect).
// We must return this samples back to the beginning
// by circular rotating of the buffer.
// For this:
// 1. Find number of sample at which impulse response starts.
// At this sample RMS amplitude will jump sharply
// to the value near max_sample
int impulse_start_sample = 0;
for (int i = (ir_n_count - 1); i >= 4; i--)
{
double rms = sqrt((pow(
IR_internal[i], 2) +
pow(IR_internal[i - 1], 2) +
pow(IR_internal[i - 2], 2) +
pow(IR_internal[i - 3], 2) +
pow(IR_internal[i - 4], 2)
) / 5.0);
if (rms < irMax / 100.0)
{
impulse_start_sample = i;
break;
}
}
// 2. Perform circular rotation
for (int i = impulse_start_sample; i < ir_n_count; i++)
{
impulse_response[i - impulse_start_sample] = IR_internal[i];
}
for (int i = 0; i < impulse_start_sample; i++)
{
impulse_response[i + ir_n_count - impulse_start_sample] = IR_internal[i];
}
// 3. Suppress possible remaining artifacts at the end of the buffer
for (int i = 0.99 * ir_n_count; i < ir_n_count; i++)
{
impulse_response[i] = 0.0;
}
}
// Calculates average amplitude value of each
// frequency component of the signal in buffer
// Spectrum calculated on each n_spectrum samples
// type:
// FFT_AVERAGE_MAX - to get maximum value
// FFT_AVERAGE_MEAN - to get mean value
void fft_average(double *average_spectrum,
double *buffer,
int n_spectrum,
int n_samples,
FFT_AVERAGE_TYPE type)
{
QVector<s_fftw_complex> out(n_spectrum + 1);
memset(average_spectrum, 0.0, n_spectrum * sizeof(double));
int p_buffer = 0;
// Calculate spectrum on n_spectrum slice,
// calculate average value of each frequency in spectrum
while ((p_buffer + n_spectrum * 2) < n_samples)
{
// Perform FFT
fftw_plan p;
p = fftw_plan_dft_r2c_1d(n_spectrum * 2, buffer + p_buffer,
(double (*)[2])out.data(), FFTW_ESTIMATE);
p_buffer += n_spectrum * 2;
fftw_execute(p);
fftw_destroy_plan(p);
for (int i = 1; i < n_spectrum + 1; i++)
{
double A = sqrt(pow(out[i].real, 2) + pow(out[i].imagine, 2)) / n_spectrum / 2.0;
switch (type)
{
case FFT_AVERAGE_MAX:
if (A > average_spectrum[i - 1])
{
average_spectrum[i - 1] = A;
}
break;
case FFT_AVERAGE_MEAN:
average_spectrum[i - 1] += A / n_spectrum;
break;
}
}
}
}
// Calculates correction frequency response
// for auto-equalizer
void calulate_autoeq_amplitude_response(int n_spectrum,
int sample_rate,
double *current_signal,
int n_current_samples,
double *ref_signal,
int n_ref_samples,
double *f_log_values,
double *db_values,
int n_autoeq_points
)
{
QVector<double> current_spectrum(n_spectrum);
QVector<double> ref_spectrum(n_spectrum);
// Calculate average spectrums of current signal
// and reference signal
fft_average(current_spectrum.data(), current_signal, n_spectrum,
n_current_samples, FFT_AVERAGE_MEAN);
fft_average(ref_spectrum.data(), ref_signal, n_spectrum,
n_ref_samples, FFT_AVERAGE_MEAN);
// Calculate diff between spectrums
QVector<double> diff_spectrum(n_spectrum + 1);
QVector<double> diff_spectrum_log_freqs(n_spectrum + 1);
for (int i = 1; i < n_spectrum + 1; i++)
{
diff_spectrum[i] = 20.0 * log10(ref_spectrum[i - 1] / current_spectrum[i - 1]);
diff_spectrum_log_freqs[i] = log10(
(double)(i + 1)
/ (double)(n_spectrum)
* sample_rate / 2
);
}
diff_spectrum_log_freqs[0] = 0.0;
diff_spectrum[0] = diff_spectrum[1];
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline,
n_spectrum + 1);
gsl_spline_init (spline,
diff_spectrum_log_freqs.data(),
diff_spectrum.data(),
n_spectrum + 1);
// Interpolate diff spectrum to given frequency values
QVector<double> interpolated_diff_spectrum_log_freqs(n_autoeq_points * 100);
QVector<double> interpolated_diff_spectrum(n_autoeq_points * 100);
for (int i = 0; i < n_autoeq_points * 100; i++)
{
interpolated_diff_spectrum_log_freqs[i] = (log10(20000.0) - log10(10.0))
* (double)(i) / (n_autoeq_points * 100 - 1) + log10(10.0);
interpolated_diff_spectrum[i] =
gsl_spline_eval(spline,
interpolated_diff_spectrum_log_freqs[i],
acc);
}
int begin_f_pos = 0;
for (int i = 0; i < n_autoeq_points - 1; i++)
{
double amplitude_sum = 0.0;
int prev_f_pos = begin_f_pos;
while (interpolated_diff_spectrum_log_freqs[begin_f_pos] < f_log_values[i])
{
amplitude_sum += interpolated_diff_spectrum[begin_f_pos];
begin_f_pos++;
}
int end_f_pos = begin_f_pos;
while (interpolated_diff_spectrum_log_freqs[end_f_pos] < f_log_values[i + 1])
{
amplitude_sum += interpolated_diff_spectrum[end_f_pos];
end_f_pos++;
}
amplitude_sum /= end_f_pos - prev_f_pos;
db_values[i] = amplitude_sum;
}
db_values[n_autoeq_points - 1] =
db_values[n_autoeq_points - 2];
// Normalize amplitude response
double max_amplitude = -DBL_MAX;
for (int i = 0; i < n_autoeq_points; i++)
{
if (max_amplitude < db_values[i])
{
max_amplitude = db_values[i];
}
}
for (int i = 0; i < n_autoeq_points; i++)
{
db_values[i] -= max_amplitude;
}
}
// Generates logarithmic sweep signal
// It can be used as a test signal to get inpulse response
void generate_logarithmic_sweep(double length_sec,
int sample_rate,
double f_start,
double f_end,
double sweep_amplitude,
float data[])
{
for (int i = 0; i < sample_rate * length_sec; i++)
{
float frame_data = sweep_amplitude * sin( 2 * M_PI * length_sec * f_start * (pow(f_end / f_start, (double)i / sample_rate / length_sec) - 1.0) / log(f_end / f_start));
data[i] = frame_data;
}
}
// Resamples signal in buffer
QVector<float> resample_vector(QVector<float> sourceBuffer,
float sourceSamplerate,
float targetSamplerate)
{
QVector<float> targetBuffer;
if (sourceSamplerate == targetSamplerate)
{
targetBuffer = sourceBuffer;
}
else
{
float ratio = targetSamplerate/(float)sourceSamplerate;
targetBuffer.resize(sourceBuffer.size() * ratio);
QScopedPointer<Resampler> resampl(new Resampler());
resampl->setup(sourceSamplerate, targetSamplerate, 1, 48);
int k = resampl->inpsize();
QVector<float> signalIn(sourceBuffer.size() + k/2 - 1 + k - 1);
QVector<float> signalOut((int)((sourceBuffer.size() + k/2 - 1 + k - 1) * ratio));
// Create paddig before and after signal, needed for zita-resampler
for (int i = 0; i < sourceBuffer.size() + k/2 - 1 + k - 1; i++)
{
signalIn[i] = 0.0;
}
for (int i = k/2 - 1; i < sourceBuffer.size() + k/2 - 1; i++)
{
signalIn[i] = sourceBuffer[i - k/2 + 1];
}
resampl->inp_count = sourceBuffer.size() + k/2 - 1 + k - 1;
resampl->out_count = (sourceBuffer.size() + k/2 - 1 + k - 1) * ratio;
resampl->inp_data = signalIn.data();
resampl->out_data = signalOut.data();
resampl->process();
for (int i = 0; i < targetBuffer.size(); i++)
{
targetBuffer[i] = signalOut[i] / ratio;
}
}
return targetBuffer;
}
| 22,595
|
C++
|
.cpp
| 619
| 30.44588
| 171
| 0.6148
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,348
|
cabinet_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/cabinet_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QFrame>
#include <QVBoxLayout>
#include <QWidget>
#include <QSizePolicy>
#include <QLabel>
#include <QFont>
#include <QFileDialog>
#include <cmath>
#include "math_functions.h"
#include "cabinet_edit_widget.h"
#include "nonlinear_widget.h"
CabinetEditWidget::CabinetEditWidget(QWidget *parent, Processor *prc, Player *plr) :
BlockEditWidget(parent)
{
processor = prc;
player = plr;
setFrameShape(QFrame::Panel);
QVBoxLayout *vbox = new QVBoxLayout(this);
QLabel *cabinetEqualizerLabel = new QLabel(tr("Equalizer"), this);
vbox->addWidget(cabinetEqualizerLabel);
cabinetEqualizerLabel->setMaximumHeight(30);
cabinetEqualizerLabel->setAlignment(Qt::AlignHCenter);
QFont cabinetEqualizerLabelFont = cabinetEqualizerLabel->font();
cabinetEqualizerLabelFont.setPointSize(15);
cabinetEqualizerLabel->setFont(cabinetEqualizerLabelFont);
QWidget *equalizerButtonsBar = new QWidget(this);
vbox->addWidget(equalizerButtonsBar);
equalizerButtonsBar->setSizePolicy(QSizePolicy::Fixed, QSizePolicy::Fixed);
QHBoxLayout *equalizerButtonsHBox = new QHBoxLayout(equalizerButtonsBar);
loadButton = new QPushButton(tr("Load from file"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(loadButton);
connect(loadButton, &QPushButton::clicked, this, &CabinetEditWidget::loadButtonClicked);
saveButton = new QPushButton(tr("Save to file"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(saveButton);
connect(saveButton, &QPushButton::clicked, this, &CabinetEditWidget::saveButtonClicked);
resetButton = new QPushButton(tr("Reset"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(resetButton);
connect(resetButton, &QPushButton::clicked, this, &CabinetEditWidget::resetButtonClicked);
applyButton = new QPushButton(tr("Apply"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(applyButton);
connect(applyButton, &QPushButton::clicked, this, &CabinetEditWidget::applyButtonClicked);
equalizerButtonsHBox->addSpacing(20);
autoEqButton = new QPushButton(tr("Auto Equalizer"), equalizerButtonsBar);
equalizerButtonsHBox->addWidget(autoEqButton);
connect(autoEqButton, &QPushButton::clicked, this, &CabinetEditWidget::autoEqButtonClicked);
equalizerButtonsHBox->addSpacing(40);
disableButton = new QPushButton(equalizerButtonsBar);
equalizerButtonsHBox->addWidget(disableButton);
if (processor->isCabinetCorrectionEnabled())
{
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
}
else
{
disableStatus = STAT_DISABLED;
disableButton->setText(tr("Enable"));
}
connect(disableButton, &QPushButton::clicked, this, &CabinetEditWidget::disableButtonClicked);
equalizer = new EqualizerWidget(this);
vbox->addWidget(equalizer);
connect(equalizer, &EqualizerWidget::responseChanged, this, &CabinetEditWidget::responseChanged);
autoEqThread = new AutoEqThread();
autoEqThread->player = player;
autoEqThread->processor = processor;
autoEqThread->equalizer = equalizer;
connect(autoEqThread, &QThread::finished, this, &CabinetEditWidget::autoEqThreadFinished);
connect(autoEqThread, &AutoEqThread::progressChanged,
this, &CabinetEditWidget::autoEqThreadProgressChanged);
fileResamplingThread = new FileResamplingThread();
connect(fileResamplingThread, &QThread::finished, this,
&CabinetEditWidget::fileResamplingThreadFinished);
msg = new MessageWidget(this);
if (processor->correctionEqualizerFLogValues.size() == 0)
{
processor->correctionEqualizerFLogValues = equalizer->fLogValuesEq;
}
else
{
equalizer->fLogValuesEq = processor->correctionEqualizerFLogValues;
}
if (processor->correctionEqualizerDbValues.size() == 0)
{
processor->correctionEqualizerDbValues = equalizer->dbValuesEq;
}
else
{
equalizer->dbValuesEq = processor->correctionEqualizerDbValues;
}
recalculate();
}
void CabinetEditWidget::recalculate()
{
equalizer->fLogValuesEq = processor->correctionEqualizerFLogValues;
equalizer->dbValuesEq = processor->correctionEqualizerDbValues;
QVector<float> freqs(1000);
for (int i = 0; i < freqs.size(); i++)
{
float fLog = (log10(20000.0) - log10(10.0))*(float)(i) / (freqs.size() - 1) + log10(10.0);
freqs[i] = pow(10, fLog);
}
QVector<float> frequencyResponse = processor->getCabinetSumFrequencyResponse(freqs);
equalizer->fLogValuesFr.resize(frequencyResponse.size());
equalizer->dbValuesFr.resize(frequencyResponse.size());
for (int i = 0; i < frequencyResponse.size(); i++)
{
equalizer->fLogValuesFr[i] = log10(freqs[i]);
equalizer->dbValuesFr[i] = 20.0 * log10(frequencyResponse[i]);
}
equalizer->drawBackground();
equalizer->update(0,0,width(),height());
}
void CabinetEditWidget::responseChanged()
{
processor->correctionEqualizerFLogValues = equalizer->fLogValuesEq;
processor->correctionEqualizerDbValues = equalizer->dbValuesEq;
QVector<double> w(equalizer->fLogValuesEq.size());
QVector<double> A(equalizer->fLogValuesEq.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, equalizer->fLogValuesEq[i]);
A[i] = pow(10.0, equalizer->dbValuesEq[i] / 20.0);
}
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
processor->setCabinetSumCorrectionImpulseFromFrequencyResponse(w, A);
}
void CabinetEditWidget::applyButtonClicked()
{
processor->applyCabinetSumCorrection();
recalculate();
resetButtonClicked();
}
void CabinetEditWidget::resetButtonClicked()
{
processor->resetCabinetSumCorrection();
equalizer->resetEq();
processor->correctionEqualizerFLogValues = equalizer->fLogValuesEq;
processor->correctionEqualizerDbValues = equalizer->dbValuesEq;
equalizer->drawBackground();
equalizer->update(0,0,width(),height());
}
void CabinetEditWidget::autoEqButtonClicked()
{
msg->setProgressValue(0);
if (player->refDataL.size() != 0)
{
msg->setMessage(tr("Analyzing..."));
msg->setTitle(tr("Please Wait!"));
msg->open();
autoEqThread->start();
}
else
{
QMessageBox::information(this, tr("AutoEqualizer"), tr("Reference file is not loaded!"));
}
}
void CabinetEditWidget::autoEqThreadFinished()
{
msg->setProgressValue(100);
msg->close();
responseChanged();
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
equalizer->drawBackground();
equalizer->update(0, 0, width(), height());
}
void CabinetEditWidget::resetControls()
{
resetButtonClicked();
}
void CabinetEditWidget::saveButtonClicked()
{
QString saveFileName =
QFileDialog::getSaveFileName(this,
tr("Save impulse response file"),
QString(),
tr("WAV files (*.wav)"));
if (!saveFileName.isEmpty())
{
QVector<float> left_impulse = processor->getLeftImpulse();
QVector<float> right_impulse = processor->getRightImpulse();
SF_INFO sfinfo;
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = left_impulse.size();
sfinfo.samplerate = processor->getSamplingRate();
sfinfo.channels = 2;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *impulseFile = sf_open(saveFileName.toUtf8().constData(), SFM_WRITE, &sfinfo);
if (impulseFile != NULL)
{
QVector<float> tempBuffer(left_impulse.size() * 2);
for (int i = 0; i < (left_impulse.size() * 2 - 1); i += 2)
{
tempBuffer[i] = left_impulse[i / 2];
tempBuffer[i + 1] = right_impulse[i / 2];
}
sf_writef_float(impulseFile, tempBuffer.data(), left_impulse.size());
}
sf_close(impulseFile);
}
}
void CabinetEditWidget::loadButtonClicked()
{
QString filename = QFileDialog::getOpenFileName(this,
tr("Open impulse response file"),
QString(),
tr("Sound files (*.wav *.ogg *.flac)"));
if (!filename.isEmpty())
{
msg->setMessage(tr("Resampling..."));
msg->setTitle(tr("Please Wait!"));
msg->open();
fileResamplingThread->stereoMode = true;
fileResamplingThread->filename = filename;
fileResamplingThread->samplingRate = processor->getSamplingRate();
fileResamplingThread->start();
}
}
void CabinetEditWidget::fileResamplingThreadFinished()
{
msg->setProgressValue(100);
msg->close();
float cabinetImpulseEnergy = 0.0;
for (int i = 0; i < fileResamplingThread->dataL.size(); i++)
{
cabinetImpulseEnergy += pow(fileResamplingThread->dataL[i], 2);
}
float cabinetImpulseEnergyCoeff = sqrt(0.45 * 48000.0 /
(float)processor->getSamplingRate()) /
sqrt(cabinetImpulseEnergy);
for (int i = 0; i < fileResamplingThread->dataL.size(); i++)
{
fileResamplingThread->dataL[i] *= cabinetImpulseEnergyCoeff;
fileResamplingThread->dataR[i] *= cabinetImpulseEnergyCoeff;
}
processor->setCabinetImpulse(fileResamplingThread->dataL, fileResamplingThread->dataR);
recalculate();
}
void CabinetEditWidget::disableButtonClicked()
{
switch (disableStatus)
{
case STAT_DISABLED:
{
disableStatus = STAT_ENABLED;
disableButton->setText(tr("Disable"));
processor->setCabinetCorrectionStatus(true);
}
break;
case STAT_ENABLED:
{
disableStatus = STAT_DISABLED;
disableButton->setText(tr("Enable"));
processor->setCabinetCorrectionStatus(false);
}
}
}
void CabinetEditWidget::autoEqThreadProgressChanged(int progress)
{
msg->setProgressValue(progress);
}
void AutoEqThread::run()
{
int size_divisible_by_fragm = floor((double)player->diData.size() / (double)fragm) * fragm;
QVector<double> diData(size_divisible_by_fragm);
for (int i = 0; i < diData.size(); i++)
{
diData[i] = player->diData[i];
}
QVector<double> refData(player->refDataL.size());
for (int i = 0; i < refData.size(); i++)
{
refData[i] = (player->refDataL[i] + player->refDataR[i]) / 2.0;
}
QVector<float> floatProcessedDataL(diData.size());
QVector<float> floatProcessedDataR(diData.size());
QSharedPointer<Processor> backProcessor
= QSharedPointer<Processor>(new Processor(processor->getSamplingRate()));
backProcessor->loadProfile(processor->getProfileFileName());
backProcessor->setControls(processor->getControls());
backProcessor->setProfile(processor->getProfile());
backProcessor->setPreampImpulse(processor->getPreampImpulse());
backProcessor->setCabinetImpulse(processor->getLeftImpulse(), processor->getRightImpulse());
QVector<double> w(processor->preampCorrectionEqualizerFLogValues.size());
QVector<double> A(processor->preampCorrectionEqualizerFLogValues.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->preampCorrectionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->preampCorrectionEqualizerDbValues[i] / 20.0);
}
backProcessor->setPreampCorrectionImpulseFromFrequencyResponse(w, A);
backProcessor->process(floatProcessedDataL.data(),
floatProcessedDataR.data(),
player->diData.data(),
floatProcessedDataL.size());
emit progressChanged(30);
QVector<double> processedData(floatProcessedDataL.size());
for (int i = 0; i < floatProcessedDataL.size(); i++)
{
processedData[i] = (floatProcessedDataL[i] + floatProcessedDataR[i]) / 2.0;
}
int averageSpectrumSize = 4096;
int autoEqualazierPointsNum = 40;
equalizer->fLogValuesEq.resize(autoEqualazierPointsNum);
equalizer->dbValuesEq.resize(autoEqualazierPointsNum);
equalizer->fLogValuesEq[0] = log10(10.0);
for (int i = 0; i < autoEqualazierPointsNum - 1; i++)
{
equalizer->fLogValuesEq[i + 1] = (log10(20000.0) - log10(10.0))
* (double)(i + 1) / (equalizer->fLogValuesEq.size() - 1) + log10(10.0);
}
emit progressChanged(50);
calulate_autoeq_amplitude_response(averageSpectrumSize,
player->getSampleRate(),
processedData.data(),
processedData.size(),
refData.data(),
refData.size(),
equalizer->fLogValuesEq.data(),
equalizer->dbValuesEq.data(),
autoEqualazierPointsNum
);
for (int i = 0; i < equalizer->fLogValuesEq.size(); i++)
{
if (equalizer->dbValuesEq[i] > 20.0)
{
equalizer->dbValuesEq[i] = 20.0;
}
if (equalizer->dbValuesEq[i] < (-30.0))
{
equalizer->dbValuesEq[i] = -30.0;
}
}
}
| 13,620
|
C++
|
.cpp
| 355
| 33.270423
| 99
| 0.704582
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,349
|
block_edit_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/block_edit_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include "block_edit_widget.h"
BlockEditWidget::BlockEditWidget(QWidget *parent) :
QFrame(parent)
{
}
| 954
|
C++
|
.cpp
| 23
| 39.521739
| 80
| 0.702586
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,350
|
file_resampling_thread.cpp
|
olegkapitonov_tubeAmp-Designer/src/file_resampling_thread.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include "file_resampling_thread.h"
#include "math_functions.h"
FileResamplingThread::FileResamplingThread()
{
stereoMode = false;
}
void FileResamplingThread::run()
{
if (!filename.isEmpty())
{
loadFile(filename.toUtf8().constData());
}
}
void FileResamplingThread::loadFile(const char *filename)
{
SF_INFO sfinfo;
sfinfo.format = 0;
SNDFILE *sndFile = sf_open(filename, SFM_READ, &sfinfo);
if (sndFile != NULL)
{
QVector<float> tempBuffer(sfinfo.frames * sfinfo.channels);
sf_readf_float(sndFile, tempBuffer.data(), sfinfo.frames);
sf_close(sndFile);
QVector<float> notResampledBufferL(sfinfo.frames);
QVector<float> notResampledBufferR(sfinfo.frames);
for (int i = 0; i < sfinfo.frames * sfinfo.channels; i += sfinfo.channels)
{
float sumFrame = 0.0;
if (stereoMode && (sfinfo.channels > 1))
{
for (int j = 1; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels - 1;
notResampledBufferL[i / sfinfo.channels] = tempBuffer[i];
notResampledBufferR[i / sfinfo.channels] = sumFrame;
}
else
{
for (int j = 0; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels;
notResampledBufferL[i / sfinfo.channels] = sumFrame;
}
}
dataL = resample_vector(notResampledBufferL, sfinfo.samplerate, samplingRate);
// Resampling Right Channel
if (stereoMode && (sfinfo.channels > 1))
{
dataR = resample_vector(notResampledBufferR, sfinfo.samplerate, samplingRate);
}
if (stereoMode && (sfinfo.channels == 1))
{
dataR = dataL;
}
}
}
| 2,594
|
C++
|
.cpp
| 78
| 28.692308
| 84
| 0.658683
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,351
|
equalizer_widget.cpp
|
olegkapitonov_tubeAmp-Designer/src/equalizer_widget.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QPalette>
#include <QPainter>
#include <QPaintEvent>
#include <cmath>
#include <gsl/gsl_spline.h>
#include "equalizer_widget.h"
EqualizerWidget::EqualizerWidget(QWidget *parent) : QWidget(parent)
{
QPalette Pal(palette());
Pal.setColor(QPalette::Background, Qt::black);
setAutoFillBackground(true);
setPalette(Pal);
backbuffer = new QPixmap(100, 100);
maxDb = 20;
activePoint = 0;
setMaximumHeight(450);
setMinimumHeight(450);
setMouseTracking(true);
resetEq();
margin.setLeft(50);
margin.setRight(20);
margin.setTop(40);
margin.setBottom(50);
stepHeight = 10;
dbInStep = 20;
}
void EqualizerWidget::paintEvent(QPaintEvent * event)
{
QPainter qp(this);
qp.setRenderHint(QPainter::Antialiasing);
for (QRegion::const_iterator rects = event->region().begin();
rects!= event->region().end(); rects++)
{
QRect r = *rects;
qp.drawPixmap(r,*backbuffer,r);
}
double farPointDb = dbValuesEq[dbValuesEq.size() - 1] - 40.0;
double farPointFLog = fLogValuesEq[fLogValuesEq.size() - 1] + 1.0;
QVector<double> dbValuesEqSym((dbValuesEq.size() + 1) * 2);
QVector<double> fLogValuesEqSym((fLogValuesEq.size() + 1) * 2);
dbValuesEqSym[0] = farPointDb;
fLogValuesEqSym[0] = -farPointFLog;
dbValuesEqSym[dbValuesEqSym.size() - 1] = farPointDb;
fLogValuesEqSym[fLogValuesEqSym.size() - 1] = farPointFLog;
for (int i = 1; i < dbValuesEqSym.size() / 2; i++)
{
dbValuesEqSym[i + dbValuesEqSym.size() / 2 - 1] = dbValuesEq[i - 1];
dbValuesEqSym[-i + dbValuesEqSym.size() / 2] = dbValuesEq[i - 1];
fLogValuesEqSym[i + dbValuesEqSym.size() / 2 - 1] = fLogValuesEq[i - 1];
fLogValuesEqSym[-i + dbValuesEqSym.size() / 2] = -fLogValuesEq[i - 1];;
}
gsl_interp_accel *acc_eq = gsl_interp_accel_alloc ();
gsl_spline *spline_eq = gsl_spline_alloc (gsl_interp_cspline, fLogValuesEqSym.count());
gsl_spline_init (spline_eq, fLogValuesEqSym.data(), dbValuesEqSym.data(),
fLogValuesEqSym.count());
gsl_interp_accel *acc_fr = gsl_interp_accel_alloc();
gsl_spline *spline_fr = gsl_spline_alloc(gsl_interp_cspline, fLogValuesFr.count());
gsl_spline_init(spline_fr, fLogValuesFr.data(), dbValuesFr.data(),
fLogValuesFr.count());
QPolygon polyline1(width() - margin.left() - margin.right());
QPolygon polyline2(width() - margin.left() - margin.right());
int maxFr = 10000;
for (int i=0; i < width() - margin.left() - margin.right(); i++)
{
double interpolatedFreq = pow(10, 1 + (double)(i) / (double)stepWidth);
if (interpolatedFreq > 20000.0) interpolatedFreq = 20000.0;
double interpolatedDbEq = gsl_spline_eval(spline_eq,
log10(interpolatedFreq),
acc_eq);
double interpolatedDbFr = gsl_spline_eval(spline_fr,
log10(interpolatedFreq),
acc_fr) + interpolatedDbEq;
int yEq = margin.top() - interpolatedDbEq * 10.0 / dbInStep * stepHeight
+ maxDb * stepHeight * 10.0 / dbInStep;
int yFr = margin.top() - interpolatedDbFr * 10.0 / dbInStep * stepHeight
+ maxDb * stepHeight * 10.0 / dbInStep;
if (yEq > (height() - margin.bottom()))
{
yEq = height() - margin.bottom();
}
if (yEq < (margin.top()))
{
yEq = margin.top();
}
polyline1.setPoint(i, i + margin.left(), yEq);
polyline2.setPoint(i, i + margin.left(), yFr);
if (maxFr > yFr)
{
maxFr = yFr;
}
}
for (int i=0; i < width() - margin.left() - margin.right(); i++)
{
int x, y;
polyline2.point(i, &x, &y);
y += -maxFr + margin.top() + maxDb * stepHeight * 10.0 / dbInStep;
if (y > (height() - margin.bottom()))
{
y = height() - margin.bottom();
}
if (y < (margin.top()))
{
y = margin.top();
}
polyline2.setPoint(i, x, y);
}
gsl_spline_free(spline_eq);
gsl_interp_accel_free(acc_eq);
gsl_spline_free(spline_fr);
gsl_interp_accel_free(acc_fr);
qp.setPen(QPen(QColor(0,255,0)));
qp.drawPolyline(polyline1);
qp.setPen(QPen(QColor(150,255,255)));
qp.drawPolyline(polyline2);
qp.setPen(QPen(QColor(255,255,255)));
infoFont = QFont("Sans",12);
qp.setFont(infoFont);
qp.drawText(infop,text);
for (int i = 0; i < fLogValuesEq.count() - 1; i++)
{
int x = (fLogValuesEq[i] - 1.0) * stepWidth + margin.left();
int y = (-dbValuesEq[i] + maxDb) * 10 / dbInStep * stepHeight + margin.top();
if (i != activePoint)
{
qp.setBrush(QBrush(QColor(0,200,0)));
qp.setPen(QPen(QColor(0,200,0)));
}
else
{
qp.setPen(QPen(QColor(200,255,200)));
qp.setBrush(QBrush(QColor(200,255,200)));
}
qp.drawEllipse(x - 10, y - 10, 20, 20);
}
}
void EqualizerWidget::mouseMoveEvent(QMouseEvent *event)
{
QPoint point(event->pos());
if ((point.x() >= margin.left()) && (point.y() >= margin.top()) &&
(point.x() <= width() - margin.right()) &&
(point.y() <= height() - margin.bottom()))
{
infop = point;
QFontMetrics metrics(infoFont,this);
double Lvalue = maxDb - (double)(infop.y() - margin.top()) /
(double)stepHeight * (double)dbInStep / 10.0;
text = QString(tr("f: %1 Hz, k: %2 dB")).arg(
pow(10, 1 + (double)(infop.x() - margin.left()) /
(double)stepWidth), 4, 'f', 2).arg(Lvalue,4,'g',3);
QRect infor = metrics.tightBoundingRect(text);
infor.moveBottomLeft(infop);
infor.adjust(-5, -5, 5, 5);
update(backinfor);
update(infor);
backinfor = infor;
}
if (event->buttons() == Qt::LeftButton)
{
int dx = point.x() - mousePressPoint.x();
int dy = point.y() - mousePressPoint.y();
int newY = (- activeDb + (double)dy / 10.0 * dbInStep / stepHeight) *
10 / dbInStep * stepHeight +
maxDb * 10 / dbInStep * stepHeight + margin.top();
if ((newY <= height() - margin.bottom()) && (newY >= margin.top()))
{
if ((activePoint != 0) && (activePoint != fLogValuesEq.count() - 2))
{
double newF = activeFLog + (double)dx / (double)stepWidth;
if ((newF < fLogValuesEq[activePoint + 1]) &&
(newF > fLogValuesEq[activePoint - 1]))
{
fLogValuesEq[activePoint] = newF;
}
}
double dyDb = (double)dy / 10.0 * dbInStep / stepHeight;
dbValuesEq[activePoint] = activeDb - dyDb;
if (activePoint == (fLogValuesEq.count() - 2))
{
dbValuesEq[dbValuesEq.size() - 1] = activeDb - dyDb;
}
update(0, 0, width(), height());
isResponseChanged = true;
}
}
else if (event->buttons() == Qt::NoButton)
{
int minDistance = INT_MAX;
int nearestPoint = 0;
for (int i = 0; i < fLogValuesEq.count() - 1; i++)
{
int x = (fLogValuesEq[i] - 1.0) * stepWidth + margin.left();
int y = (-dbValuesEq[i] + maxDb) * stepHeight / dbInStep * 10 + margin.top();
int distanceToPoint = sqrt(pow(x - point.x(), 2) + pow(y - point.y(), 2));
if (distanceToPoint < minDistance)
{
minDistance = distanceToPoint;
nearestPoint = i;
}
}
activePoint = nearestPoint;
update(QRect(0, 0, width(), height()));
}
}
void EqualizerWidget::mousePressEvent(QMouseEvent *event)
{
if (event->button() == Qt::LeftButton)
{
mousePressPoint = event->pos();
activeFLog = fLogValuesEq[activePoint];
activeDb = dbValuesEq[activePoint];
}
else if (event->button() == Qt::RightButton)
{
if ((activePoint != 0) && (activePoint != (fLogValuesEq.count() - 1)
&& fLogValuesEq.count() > 3))
{
fLogValuesEq.remove(activePoint, 1);
dbValuesEq.remove(activePoint, 1);
update(0, 0, width(), height());
isResponseChanged = true;
}
}
}
void EqualizerWidget::mouseDoubleClickEvent(QMouseEvent *event)
{
if (event->button() == Qt::LeftButton)
{
QPoint point(event->pos());
double db = maxDb - (double)(infop.y()-margin.top()) /
(double)stepHeight * (double)dbInStep / 10.0;
double fLog = 1 + (double)(infop.x()-margin.left())/(double)stepWidth;
int insertPos = 0;
for (int i = 0; i < fLogValuesEq.count(); i++)
{
if (fLogValuesEq[i] >= fLog)
{
insertPos = i;
break;
}
}
if (fLogValuesEq[insertPos] != fLog)
{
fLogValuesEq.insert(insertPos, fLog);
dbValuesEq.insert(insertPos, db);
activePoint = insertPos;
update(0, 0, width(), height());
isResponseChanged = true;
}
}
}
void EqualizerWidget::drawBackground()
{
QPainter qp(backbuffer);
qp.fillRect(0,0,width(),height(),QBrush(QColor(0,0,0)));
QPen penThin(QColor(80,80,80));
QPen penBold(QColor(150,150,150),1);
QPen penText(QColor(255,255,255));
QFontMetrics fm(QFont("Sans",10));
qp.setFont(QFont("Sans",10));
qp.setPen(penText);
QString frequencyResponseString = tr("Frequency Response");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
int pixelsWide = fm.horizontalAdvance(frequencyResponseString);
#else
int pixelsWide = fm.boundingRect(frequencyResponseString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, 20, frequencyResponseString);
QString frequencyString = tr("Frequency, Hz");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(frequencyString);
#else
pixelsWide = fm.boundingRect(frequencyString).width();
#endif
qp.drawText(width() / 2 -pixelsWide / 2, height() - 8, frequencyString);
qp.save();
qp.rotate(-90);
QString magnitudeString = tr("Magnitude, dB");
#if QT_VERSION >= QT_VERSION_CHECK(5, 11, 0)
pixelsWide = fm.horizontalAdvance(magnitudeString);
#else
pixelsWide = fm.boundingRect(magnitudeString).width();
#endif
qp.drawText(-pixelsWide / 2 - height() / 2, 12, magnitudeString);
qp.restore();
stepWidth = (width() - margin.left() - margin.right()) / 3;
stepWidth -= (1 - log10(8)) * stepWidth;
int i=0;
while (i*stepHeight < height()-margin.top()-margin.bottom())
{
if (i % 5 == 0)
{
qp.setPen(penText);
QString drText;
qp.drawText(margin.left() - 25, 4 + i * stepHeight +
margin.top(), QString("%1").arg(maxDb - dbInStep/2*(i / 5)));
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(margin.left(), i * stepHeight + margin.top(), width() - margin.right(),
i * stepHeight+margin.top());
i++;
}
i=0;
while ((i * stepWidth) < (width() - margin.left() - margin.right()))
{
for (int x = 1; x < 10; x++)
{
if ((stepWidth * log10(x) + i * stepWidth + margin.left()) <=
(width() - margin.right()))
{
if (x == 1)
{
qp.setPen(penBold);
}
else
{
qp.setPen(penThin);
}
qp.drawLine(stepWidth * log10(x) + i * stepWidth + margin.left(),
margin.top(),
stepWidth * log10(x) + i * stepWidth + margin.left(),
height() - margin.bottom());
}
}
qp.setPen(penText);
QString drText;
qp.drawText(margin.left() + i * stepWidth - 4, height() - margin.bottom() + 20,
QString("%1").arg((int)(10 * pow(10, i))));
i++;
}
qp.setPen(QPen(QColor(255, 255, 255), 1));
qp.drawLine(margin.left(), margin.top(), width() - margin.right(), margin.top());
qp.drawLine(margin.left(), margin.top(), margin.left(), height() - margin.bottom());
qp.drawLine(margin.left(), height() - margin.bottom(), width() -
margin.right(), height() - margin.bottom());
qp.drawLine(width() - margin.right(), margin.top(), width() - margin.right(),
height() - margin.bottom());
}
void EqualizerWidget::resizeEvent(QResizeEvent*)
{
delete backbuffer;
backbuffer = new QPixmap(width(), height());
drawBackground();
update(0,0,width(),height());
}
void EqualizerWidget::mouseReleaseEvent(QMouseEvent *)
{
if (isResponseChanged)
{
emit responseChanged();
isResponseChanged = false;
}
}
void EqualizerWidget::resetEq()
{
fLogValuesEq.resize(4);
dbValuesEq.resize(4);
fLogValuesEq[0] = log10(10.0);
fLogValuesEq[1] = log10(1000.0);
fLogValuesEq[2] = log10(20000.0);
fLogValuesEq[3] = log10(22000.0);
dbValuesEq[0] = 0.0;
dbValuesEq[1] = 0.0;
dbValuesEq[2] = 0.0;
dbValuesEq[3] = 0.0;
}
| 13,261
|
C++
|
.cpp
| 387
| 29.129199
| 89
| 0.623073
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,352
|
player.cpp
|
olegkapitonov_tubeAmp-Designer/src/player.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <sndfile.h>
#include <cmath>
#include <QSharedPointer>
#include "player.h"
#define RMS_COUNT_MAX 4800
int peakRMScount = 0;
double peakInputRMSsum = 0.0;
double peakOutputRMSsum = 0.0;
static int process (jack_nframes_t nframes, void *arg)
{
jack_default_audio_sample_t *in, *outL, *outR;
Player *inst = (Player *)arg;
in = (jack_default_audio_sample_t *)jack_port_get_buffer(inst->input_port,
nframes);
outL = (jack_default_audio_sample_t *)jack_port_get_buffer (inst->output_port_left,
nframes);
outR = (jack_default_audio_sample_t *)jack_port_get_buffer (inst->output_port_right,
nframes);
switch (inst->status)
{
case Player::PlayerStatus::PS_STOP:
{
inst->diPos = 0;
inst->refPos = 0;
memset(outL, 0, sizeof (jack_default_audio_sample_t) * nframes);
memset(outR, 0, sizeof (jack_default_audio_sample_t) * nframes);
}
break;
case Player::PlayerStatus::PS_PAUSE:
{
memset(outL, 0, sizeof (jack_default_audio_sample_t) * nframes);
memset(outR, 0, sizeof (jack_default_audio_sample_t) * nframes);
}
break;
case Player::PlayerStatus::PS_PLAY_DI:
{
if (inst->diData.size() != 0)
{
if ((inst->diPos + nframes) > (unsigned int)inst->diData.size())
{
QVector<float> tempBuffer(nframes);
for (unsigned int i = inst->diPos;
i < (unsigned int)inst->diData.size(); i++)
{
tempBuffer[i - inst->diPos] = inst->diData[i];
peakInputRMSsum += pow(inst->diData[i], 2);
}
for (unsigned int i = 0;
i < (nframes - inst->diData.size() + inst->diPos); i++)
{
tempBuffer[i + inst->diData.size() - inst->diPos] = inst->diData[i];
peakInputRMSsum += pow(inst->diData[i], 2);
inst->incRMScounter();
}
inst->processor->process(outL,
outR,
tempBuffer.data(),
nframes);
for (unsigned int i = 0; i < nframes; i++)
{
peakOutputRMSsum += pow(outL[i], 2);
}
inst->diPos = inst->diPos + nframes - inst->diData.size();
}
else
{
for (unsigned int i = 0; i < nframes; i++)
{
peakInputRMSsum += pow(inst->diData[i + inst->diPos], 2);
inst->incRMScounter();
}
inst->processor->process(outL,
outR,
inst->diData.data() + inst->diPos,
nframes);
for (unsigned int i = 0; i < nframes; i++)
{
peakOutputRMSsum += pow(outL[i], 2);
}
inst->diPos += nframes;
}
if ((inst->refPos + nframes) > (unsigned int)inst->refDataL.size())
{
inst->refPos = inst->refPos + nframes - inst->refDataL.size();
}
else
{
inst->refPos += nframes;
}
}
else
{
memset(outL, 0, sizeof (jack_default_audio_sample_t) * nframes);
memset(outR, 0, sizeof (jack_default_audio_sample_t) * nframes);
}
}
break;
case Player::PlayerStatus::PS_PLAY_REF:
{
if (inst->refDataL.size() != 0)
{
if ((inst->refPos + nframes) > (unsigned int)inst->refDataL.size())
{
for (int i = inst->refPos; i < inst->refDataL.size(); i++)
{
outL[i - inst->refPos] = inst->refDataL[i] * inst->getLevel();
peakInputRMSsum += 0.0;
peakOutputRMSsum += pow(outL[i - inst->refPos], 2);
inst->incRMScounter();
}
for (unsigned int i = 0;
i < (nframes - inst->refDataL.size() + inst->refPos); i++)
{
outL[i + inst->refDataL.size() - inst->refPos] = inst->refDataL[i] *
inst->getLevel();
peakInputRMSsum += 0.0;
peakOutputRMSsum += pow(outL[i + inst->refDataL.size() - inst->refPos], 2);
inst->incRMScounter();
}
for (int i = inst->refPos; i < inst->refDataR.size(); i++)
{
outR[i - inst->refPos] = inst->refDataR[i] * inst->getLevel();
}
for (unsigned int i = 0;
i < (nframes - inst->refDataR.size() + inst->refPos); i++)
{
outR[i + inst->refDataR.size() - inst->refPos] = inst->refDataR[i] *
inst->getLevel();
}
inst->refPos = inst->refPos + nframes - inst->refDataL.size();
}
else
{
for (unsigned int i = inst->refPos; i < nframes + inst->refPos; i++)
{
outL[i - inst->refPos] = inst->refDataL[i] * inst->getLevel();
peakInputRMSsum += 0.0;
peakOutputRMSsum += pow(outL[i - inst->refPos], 2);
inst->incRMScounter();
}
for (unsigned int i = inst->refPos; i < nframes + inst->refPos; i++)
{
outR[i - inst->refPos] = inst->refDataR[i] * inst->getLevel();
}
inst->refPos += nframes;
}
if ((inst->diPos + nframes) > (unsigned int)inst->diData.size())
{
inst->diPos = inst->diPos + nframes - inst->diData.size();
}
else
{
inst->diPos += nframes;
}
}
else
{
memset(outL, 0, sizeof (jack_default_audio_sample_t) * nframes);
memset(outR, 0, sizeof (jack_default_audio_sample_t) * nframes);
}
}
break;
case Player::PlayerStatus::PS_MONITOR:
{
for (unsigned int i = 0; i < nframes; i++)
{
in[i] *= inst->inputLevel;
peakInputRMSsum += pow(in[i], 2);
inst->incRMScounter();
}
inst->processor->process(outL, outR, in, nframes);
for (unsigned int i = 0; i < nframes; i++)
{
peakOutputRMSsum += pow(outL[i], 2);
}
}
break;
}
return 0;
}
static void session_callback(jack_session_event_t *event, void *arg)
{
Player *inst = (Player *)arg;
char retval[100];
printf ("session notification\n");
printf ("path %s, uuid %s, type: %s\n", event->session_dir,
event->client_uuid, event->type == JackSessionSave ? "save" : "quit");
snprintf (retval, 100, "jack_simple_session_client %s", event->client_uuid);
event->command_line = strdup (retval);
jack_session_reply( inst->client, event );
if (event->type == JackSessionSaveAndQuit) {
inst->simple_quit = 1;
}
jack_session_event_free(event);
}
static void jack_shutdown(void*)
{
exit (1);
}
Player::Player()
{
simple_quit = 0;
diPos = 0;
refPos = 0;
status = PS_STOP;
equalDataRMSThread = new EqualDataRMSThread();
connect(equalDataRMSThread, &QThread::finished, this,
&Player::equalDataRMSThreadFinished);
}
Player::~Player()
{
jack_client_close(client);
}
void Player::incRMScounter()
{
if (peakRMScount < RMS_COUNT_MAX)
{
peakRMScount++;
}
else
{
float peakInputRMSvalue = sqrt(peakInputRMSsum / peakRMScount);
float peakOutputRMSvalue = sqrt(peakOutputRMSsum / peakRMScount);
peakRMScount = 0;
peakInputRMSsum = 0.0;
peakOutputRMSsum = 0.0;
emit peakRMSValueCalculated(peakInputRMSvalue, peakOutputRMSvalue);
}
}
int Player::connectToJack()
{
jack_status_t status;
const char *client_name = "tubeAmp Designer";
/* open a client connection to the JACK server */
client = jack_client_open (client_name, JackNoStartServer, &status );
if (client == NULL)
{
fprintf (stderr, "jack_client_open() failed, status = 0x%2.0x\n", status);
if (status & JackServerFailed)
{
fprintf (stderr, "Unable to connect to JACK server\n");
}
return 1;
}
if (status & JackServerStarted)
{
fprintf (stderr, "JACK server started\n");
}
if (status & JackNameNotUnique)
{
client_name = jack_get_client_name(client);
}
/* tell the JACK server to call `process()' whenever
there is work to be done.
*/
jack_set_process_callback(client, process, this);
/* tell the JACK server to call `jack_shutdown()' if
it ever shuts down, either entirely, or if it
just decides to stop calling us.
*/
jack_on_shutdown(client, jack_shutdown, this);
/* tell the JACK server to call `session_callback()' if
the session is saved.
*/
jack_set_session_callback(client, session_callback, this);
/* display the current sample rate.
*/
printf ("engine sample rate: %" PRIu32 "\n", jack_get_sample_rate (client));
sampleRate = jack_get_sample_rate (client);
/* create two ports */
input_port = jack_port_register (client, "input",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
output_port_left = jack_port_register (client, "outputL",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
output_port_right = jack_port_register (client, "outputR",
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsOutput, 0);
if ((input_port == NULL) || (output_port_left == NULL) || (output_port_right == NULL))
{
fprintf(stderr, "no more JACK ports available\n");
return 1;
}
return 0;
}
int Player::activate()
{
if (jack_activate(client))
{
fprintf (stderr, "cannot activate client");
return 1;
}
return 0;
}
void Player::setDiData(QVector<float> data)
{
diData = data;
emit dataChanged();
}
void Player::setRefData(QVector<float> dataL, QVector<float> dataR)
{
refDataL = dataL;
refDataR = dataR;
emit dataChanged();
}
void Player::setStatus(PlayerStatus newStatus)
{
status = newStatus;
}
void Player::setProcessor(Processor *prc)
{
processor = prc;
}
int Player::getSampleRate()
{
return sampleRate;
}
void Player::equalDataRMS()
{
if (isEqualDataRMSThreadRunning)
{
return;
}
if (refDataL.size() == 0)
{
emit equalRMSFinished();
return;
}
equalDataRMSThread->processor = processor;
equalDataRMSThread->player = this;
equalDataRMSThread->start();
isEqualDataRMSThreadRunning = true;
}
void Player::equalDataRMSThreadFinished()
{
isEqualDataRMSThreadRunning = false;
emit equalRMSFinished();
}
void Player::setLevel(float lev)
{
level = lev;
}
float Player::getLevel()
{
return level;
}
void Player::setInputLevel(float dbInputLevel)
{
inputLevel = pow(10.0, dbInputLevel / 20.0);
}
void EqualDataRMSThread::run()
{
QVector<float> processedDataL(player->diData.size());
QVector<float> processedDataR(player->diData.size());
QSharedPointer<Processor> backProcessor
= QSharedPointer<Processor>(new Processor(processor->getSamplingRate()));
backProcessor->loadProfile(processor->getProfileFileName());
backProcessor->setControls(processor->getControls());
backProcessor->setProfile(processor->getProfile());
backProcessor->setPreampImpulse(processor->getPreampImpulse());
backProcessor->setCabinetImpulse(processor->getLeftImpulse(),
processor->getRightImpulse());
QVector<double> w(processor->correctionEqualizerFLogValues.size());
QVector<double> A(processor->correctionEqualizerFLogValues.size());
if (processor->correctionEqualizerFLogValues.size() >= 3)
{
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->correctionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->correctionEqualizerDbValues[i] / 20.0);
}
backProcessor->setCabinetSumCorrectionImpulseFromFrequencyResponse(w, A);
}
if (processor->preampCorrectionEqualizerFLogValues.size() >= 3)
{
w.resize(processor->preampCorrectionEqualizerFLogValues.size());
A.resize(processor->preampCorrectionEqualizerFLogValues.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->preampCorrectionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->preampCorrectionEqualizerDbValues[i] / 20.0);
}
backProcessor->setPreampCorrectionImpulseFromFrequencyResponse(w, A);
}
int sizeToFragm = floor(player->diData.size() / (double)fragm) * fragm;
processedDataL.resize(sizeToFragm);
processedDataR.resize(sizeToFragm);
backProcessor->process(processedDataL.data(),
processedDataR.data(),
player->diData.data(),
sizeToFragm);
double rmsProcessedData = 0.0;
for (int i = 0; i < processedDataL.size(); i++)
{
rmsProcessedData += pow((processedDataL[i] +
processedDataR[i]) / 2.0, 2);
}
rmsProcessedData = sqrt(rmsProcessedData / processedDataL.size());
double rmsRefData = 0.0;
for (int i = 0; i < player->refDataL.size(); i++)
{
rmsRefData += pow((player->refDataL[i] + player->refDataR[i]) / 2.0, 2);
}
rmsRefData = sqrt(rmsRefData / player->refDataL.size());
double rmsRatio = rmsRefData / rmsProcessedData;
for (int i = 0; i < player->refDataL.size(); i++)
{
player->refDataL[i] /= rmsRatio;
player->refDataR[i] /= rmsRatio;
}
}
| 14,110
|
C++
|
.cpp
| 430
| 26.234884
| 88
| 0.606917
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,353
|
deconvolver_dialog.cpp
|
olegkapitonov_tubeAmp-Designer/src/deconvolver_dialog.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QGridLayout>
#include <QHBoxLayout>
#include <QLabel>
#include <QFileDialog>
#include <sndfile.h>
#include <cmath>
#include "deconvolver_dialog.h"
#include "math_functions.h"
DeconvolverDialog::DeconvolverDialog(Processor *prc, QWidget *parent) : QDialog(parent)
{
processor = prc;
setMinimumWidth(600);
setWindowTitle(tr("FFT Deconvolver"));
QGridLayout *lay = new QGridLayout(this);
QPushButton *saveTestSignalButton = new QPushButton(tr("Create test signal .wav"), this);
lay->addWidget(saveTestSignalButton, 0, 0, 1, 2);
connect(saveTestSignalButton, &QPushButton::clicked, this,
&DeconvolverDialog::saveTestSignalButtonClicked);
QLabel *testLabel = new QLabel(tr("Test Signal"), this);
lay->addWidget(testLabel, 1, 0, 1, 2);
testLabel->setAlignment(Qt::AlignCenter);
testFilenameEdit = new QLineEdit(this);
lay->addWidget(testFilenameEdit, 2, 0, 1, 1);
testFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(testFilenameButton, 2, 1, 1, 1);
connect(testFilenameButton, &QPushButton::clicked,
this, &DeconvolverDialog::testFilenameButtonClicked);
QLabel *responseLabel = new QLabel(tr("Response Signal"), this);
lay->addWidget(responseLabel, 3, 0, 1, 2);
responseLabel->setAlignment(Qt::AlignCenter);
responseFilenameEdit = new QLineEdit(this);
lay->addWidget(responseFilenameEdit, 4, 0, 1, 1);
responseFilenameButton = new QPushButton(tr("Open"), this);
lay->addWidget(responseFilenameButton, 4, 1, 1, 1);
connect(responseFilenameButton, &QPushButton::clicked,
this, &DeconvolverDialog::responseFilenameButtonClicked);
QLabel *IRLabel = new QLabel(tr("Impulse response file"), this);
lay->addWidget(IRLabel, 5, 0, 1, 2);
IRLabel->setAlignment(Qt::AlignCenter);
QButtonGroup *IRGroup = new QButtonGroup(this);
IRCabinetRadioButton = new QRadioButton(
tr("Cabinet impulse response"), this);
IRGroup->addButton(IRCabinetRadioButton);
lay->addWidget(IRCabinetRadioButton, 6, 0, 1, 1);
IRFileRadioButton = new QRadioButton(tr("File"), this);
IRFileRadioButton->setChecked(true);
IRGroup->addButton(IRFileRadioButton);
lay->addWidget(IRFileRadioButton, 7, 0, 1, 1);
connect(IRGroup, QOverload<QAbstractButton *>::of(&QButtonGroup::buttonClicked),
this, &DeconvolverDialog::IRGroupClicked);
IRFilenameEdit = new QLineEdit(this);
lay->addWidget(IRFilenameEdit, 8, 0, 1, 1);
IRFilenameButton = new QPushButton(tr("Save"), this);
lay->addWidget(IRFilenameButton, 8, 1, 1, 1);
connect(IRFilenameButton, &QPushButton::clicked,
this, &DeconvolverDialog::IRFilenameButtonClicked);
QWidget *buttonsContainer = new QWidget(this);
lay->addWidget(buttonsContainer, 9, 0, 1, 2);
QHBoxLayout *containerLay = new QHBoxLayout(buttonsContainer);
processButton = new QPushButton(tr("Process"), buttonsContainer);
containerLay->addWidget(processButton);
processButton->setMaximumWidth(80);
processButton->setEnabled(false);
connect(processButton, &QPushButton::clicked,
this, &DeconvolverDialog::processButtonClicked);
QPushButton *closeButton = new QPushButton(tr("Close"), buttonsContainer);
containerLay->addWidget(closeButton);
closeButton->setMaximumWidth(80);
connect(closeButton, &QPushButton::clicked, this,
&DeconvolverDialog::closeButtonClicked);
}
void DeconvolverDialog::testFilenameButtonClicked()
{
QString testFileName = QFileDialog::getOpenFileName(this,
tr("Open Test Signal File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!testFileName.isEmpty())
{
testFilenameEdit->setText(testFileName);
}
checkSignals();
}
void DeconvolverDialog::responseFilenameButtonClicked()
{
QString responseFileName = QFileDialog::getOpenFileName(this,
tr("Open Response File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!responseFileName.isEmpty())
{
responseFilenameEdit->setText(responseFileName);
}
checkSignals();
}
void DeconvolverDialog::IRFilenameButtonClicked()
{
QString IRFileName = QFileDialog::getOpenFileName(this,
tr("Open IR File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!IRFileName.isEmpty())
{
IRFilenameEdit->setText(IRFileName);
}
checkSignals();
}
void DeconvolverDialog::processButtonClicked()
{
QString testFileName = testFilenameEdit->text();
QString responseFileName = responseFilenameEdit->text();
QString IRFileName = IRFilenameEdit->text();
QVector<float> testL;
QVector<float> testR;
int testSampleRate;
SF_INFO sfinfo;
SNDFILE *sndFile;
sfinfo.format = 0;
sndFile = sf_open(testFileName.toUtf8().constData(), SFM_READ, &sfinfo);
if (sndFile != NULL)
{
testSampleRate = sfinfo.samplerate;
QVector<float> tempBuffer(sfinfo.frames * sfinfo.channels);
sf_readf_float(sndFile, tempBuffer.data(), sfinfo.frames);
sf_close(sndFile);
testL.resize(sfinfo.frames);
testR.resize(sfinfo.frames);
for (int i = 0; i < sfinfo.frames * sfinfo.channels; i += sfinfo.channels)
{
float sumFrame = 0.0;
if (sfinfo.channels > 1)
{
for (int j = 1; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels - 1;
testL[i / sfinfo.channels] = tempBuffer[i];
testR[i / sfinfo.channels] = sumFrame;
}
else
{
testL[i] = tempBuffer[i];
testR[i] = tempBuffer[i];
}
}
}
else
{
return;
}
sfinfo.format = 0;
QVector<float> responseL;
QVector<float> responseR;
int responseSampleRate;
sndFile = sf_open(responseFileName.toUtf8().constData(), SFM_READ, &sfinfo);
if (sndFile != NULL)
{
responseSampleRate = sfinfo.samplerate;
QVector<float> tempBuffer(sfinfo.frames * sfinfo.channels);
sf_readf_float(sndFile, tempBuffer.data(), sfinfo.frames);
sf_close(sndFile);
responseL.resize(sfinfo.frames);
responseR.resize(sfinfo.frames);
for (int i = 0; i < sfinfo.frames * sfinfo.channels; i += sfinfo.channels)
{
float sumFrame = 0.0;
if (sfinfo.channels > 1)
{
for (int j = 1; j < sfinfo.channels; j++)
{
sumFrame += tempBuffer[i + j];
}
sumFrame /= sfinfo.channels - 1;
responseL[i / sfinfo.channels] = tempBuffer[i];
responseR[i / sfinfo.channels] = sumFrame;
}
else
{
responseL[i] = tempBuffer[i];
responseR[i] = tempBuffer[i];
}
}
}
else
{
return;
}
int IRSampleRate;
if (testSampleRate >= responseSampleRate)
{
IRSampleRate = testSampleRate;
}
else
{
IRSampleRate = responseSampleRate;
}
testL = resample_vector(testL, testSampleRate, IRSampleRate);
testR = resample_vector(testR, testSampleRate, IRSampleRate);
responseL = resample_vector(responseL, responseSampleRate, IRSampleRate);
responseR = resample_vector(responseR, responseSampleRate, IRSampleRate);
QVector<float> IRL(responseL.size());
QVector<float> IRR(responseR.size());
fft_deconvolver(testL.data(),
testL.size(),
responseL.data(),
responseL.size(),
IRL.data(),
IRL.size(),
20.0 / processor->getSamplingRate(),
20000.0 / processor->getSamplingRate(),
-60.0
);
fft_deconvolver(testR.data(),
testR.size(),
responseR.data(),
responseR.size(),
IRR.data(),
IRR.size(),
20.0 / processor->getSamplingRate(),
20000.0 / processor->getSamplingRate(),
-60.0
);
float cabinetImpulseEnergy = 0.0;
for (int i = 0; i < IRL.size(); i++)
{
cabinetImpulseEnergy += pow(IRL[i], 2);
}
float cabinetImpulseEnergyCoeff = sqrt(0.45 * 48000.0 /
(float)processor->getSamplingRate()) /
sqrt(cabinetImpulseEnergy);
for (int i = 0; i < IRL.size(); i++)
{
IRL[i] *= cabinetImpulseEnergyCoeff;
IRR[i] *= cabinetImpulseEnergyCoeff;
}
if (IRFileRadioButton->isChecked())
{
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = IRL.size();
sfinfo.samplerate = IRSampleRate;
sfinfo.channels = 2;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *outputFile = sf_open(IRFileName.toUtf8().constData(),
SFM_WRITE, &sfinfo);
if (outputFile != NULL)
{
printf("saving\n");
QVector<float> tempBuffer(IRL.size() * 2);
for (int i = 0; i < (IRL.size() * 2 - 1); i += 2)
{
tempBuffer[i] = IRL[i / 2];
tempBuffer[i + 1] = IRR[i / 2];
}
sf_writef_float(outputFile, tempBuffer.data(), IRL.size());
sf_close(outputFile);
}
}
else
{
processor->setCabinetImpulse(IRL, IRR);
}
}
void DeconvolverDialog::closeButtonClicked()
{
close();
}
void DeconvolverDialog::IRGroupClicked(QAbstractButton *button)
{
if (button == IRCabinetRadioButton)
{
IRFilenameEdit->setEnabled(false);
IRFilenameButton->setEnabled(false);
}
else
{
IRFilenameEdit->setEnabled(true);
IRFilenameButton->setEnabled(true);
}
checkSignals();
}
void DeconvolverDialog::checkSignals()
{
if (!(testFilenameEdit->text().isEmpty() ||
responseFilenameEdit->text().isEmpty() ||
(IRFilenameEdit->text().isEmpty() && IRFileRadioButton->isChecked())))
{
processButton->setEnabled(true);
}
else
{
processButton->setEnabled(false);
}
}
void DeconvolverDialog::saveTestSignalButtonClicked()
{
QString testFileName = QFileDialog::getSaveFileName(this,
tr("Test Signal File"), QString(), tr("Sound files (*.wav *.ogg *.flac)"));
if (!testFileName.isEmpty())
{
QVector<float> testSignal(10.0 * processor->getSamplingRate());
generate_logarithmic_sweep(10.0, processor->getSamplingRate(), 20.0,
(float)processor->getSamplingRate() / 2.0,
0.5, testSignal.data());
SF_INFO sfinfo;
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = testSignal.size();
sfinfo.samplerate = processor->getSamplingRate();
sfinfo.channels = 1;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *outputFile = sf_open(testFileName.toUtf8().constData(),
SFM_WRITE, &sfinfo);
if (outputFile != NULL)
{
sf_writef_float(outputFile, testSignal.data(), testSignal.size());
sf_close(outputFile);
}
}
}
| 11,439
|
C++
|
.cpp
| 333
| 29.231231
| 91
| 0.676249
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,354
|
processor.cpp
|
olegkapitonov_tubeAmp-Designer/src/processor.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QSharedPointer>
#include <QFile>
#include <gsl/gsl_spline.h>
#include <sndfile.h>
#include "processor.h"
#include "kpp_tubeamp_dsp.h"
#include "float.h"
#include "math_functions.h"
Processor::Processor(int SR)
{
samplingRate = SR;
preampCorrectionEnabled = false;
cabinetCorrectionEnabled = false;
new_preamp_convproc = nullptr;
new_preamp_correction_convproc = nullptr;
new_convproc = nullptr;
new_correction_convproc = nullptr;
preamp_convproc = nullptr;
preamp_correction_convproc = nullptr;
convproc = nullptr;
correction_convproc = nullptr;
dsp = new mydsp();
dsp->profile = nullptr;
convolverDeleteThread = new ConvolverDeleteThread();
}
Processor::~Processor()
{
cleanProfile();
}
void Processor::cleanProfile()
{
delete preamp_convproc;
delete preamp_correction_convproc;
delete convproc;
delete correction_convproc;
delete new_preamp_convproc;
delete new_preamp_correction_convproc;
delete new_convproc;
delete new_correction_convproc;
delete dsp->profile;
delete dsp;
new_preamp_convproc = nullptr;
new_preamp_correction_convproc = nullptr;
new_convproc = nullptr;
new_correction_convproc = nullptr;
}
// Check *.tapf file signature
int Processor::checkProfileFile(const char *path)
{
int status = 0;
QFile profile_file(path);
profile_file.open(QIODevice::ReadOnly);
if (profile_file.exists())
{
st_profile check_profile;
if (profile_file.read((char*)&check_profile, sizeof(st_profile)) == sizeof(st_profile))
{
if (!strncmp(check_profile.signature, "TaPf", 4))
{
status = 1;
}
}
else status = 1;
profile_file.close();
}
return status;
}
bool Processor::loadProfile(QString filename)
{
if (!checkProfileFile(filename.toUtf8().constData()))
{
return false;
}
cleanProfile();
dsp = new mydsp();
dsp->init(samplingRate);
dsp->controls.volume = 1.0;
dsp->controls.drive = 50.0;
dsp->controls.low = 0.0;
dsp->controls.middle = 0.0;
dsp->controls.high = 0.0;
dsp->controls.mastergain = 100.0;
profileFileName = filename;
dsp->profile = new st_profile;
QFile profile_file(filename);
profile_file.open(QIODevice::ReadOnly);
if (profile_file.exists())
{
currentProfileFile.clear();
currentProfileFile.append(filename);
if (profile_file.read((char*)dsp->profile, sizeof(st_profile)) == sizeof(st_profile))
{
QVector<float> preamp_temp_buffer;
QVector<float> left_temp_buffer;
QVector<float> right_temp_buffer;
// IRs in *.tapf are 48000 Hz,
// calculate ratio for resampling
float ratio = (float)samplingRate/48000.0;
st_impulse preamp_impheader, impheader;
// Load preamp IR data to temp buffer
if (profile_file.read((char*)&preamp_impheader, sizeof(st_impulse)) != sizeof(st_impulse))
{
return false;
}
preamp_temp_buffer.resize(preamp_impheader.sample_count);
if ((quint64)profile_file.read((char*)preamp_temp_buffer.data(),
sizeof(float) * preamp_impheader.sample_count) !=
(sizeof(float) * preamp_impheader.sample_count))
{
return false;
}
// Load cabsym IR data to temp buffers
for (int i=0;i<2;i++)
{
if (profile_file.read((char*)&impheader,
sizeof(st_impulse)) != sizeof(st_impulse))
{
return false;
}
if (impheader.channel==0)
{
left_temp_buffer.resize(impheader.sample_count);
if ((quint64)profile_file.read((char*)left_temp_buffer.data(),
sizeof(float) * impheader.sample_count) !=
sizeof(float) * impheader.sample_count)
{
return false;
}
}
if (impheader.channel==1)
{
right_temp_buffer.resize(impheader.sample_count);
if ((quint64)profile_file.read((char*)right_temp_buffer.data(),
sizeof(float) * impheader.sample_count) !=
sizeof(float) * impheader.sample_count)
{
return false;
}
}
}
preamp_impulse = resample_vector(preamp_temp_buffer, 48000, samplingRate);
left_impulse = resample_vector(left_temp_buffer, 48000, samplingRate);
right_impulse = resample_vector(right_temp_buffer, 48000, samplingRate);
preamp_correction_impulse.resize(preamp_impheader.sample_count * ratio);
left_correction_impulse.resize(impheader.sample_count * ratio);
right_correction_impulse.resize(impheader.sample_count * ratio);
for (int i = 0; i < preamp_impheader.sample_count*ratio; i++)
{
preamp_correction_impulse[i] = 0.0f;
}
preamp_correction_impulse[0] = 1.0f;
for (int i = 0; i < impheader.sample_count * ratio; i++)
{
left_correction_impulse[i] = 0.0f;
right_correction_impulse[i] = 0.0f;
}
left_correction_impulse[0] = 1.0f;
right_correction_impulse[0] = 1.0f;
// Create preamp convolver
preamp_convproc = createMonoConvolver(preamp_impulse);
// Create cabsym convolver
convproc = createStereoConvolver(left_impulse, right_impulse);
// Create preamp correction convolver
preamp_correction_convproc = createMonoConvolver(preamp_correction_impulse);
// Create cabsym correction convolver
correction_convproc = createStereoConvolver(left_correction_impulse,
right_correction_impulse);
profile_file.close();
}
}
else
{
return false;
}
return true;
}
bool Processor::saveProfile(QString filename)
{
FILE * profile_file= fopen(filename.toUtf8().constData(), "wb");
if (profile_file != NULL)
{
QVector<float> savePreampImpulse;
QVector<float> saveLeftImpulse;
QVector<float> saveRightImpulse;
savePreampImpulse = resample_vector(preamp_impulse, samplingRate, 48000);
saveLeftImpulse = resample_vector(left_impulse, samplingRate, 48000);
saveRightImpulse = resample_vector(right_impulse, samplingRate, 48000);
st_impulse impulse_preamp_header, impulse_left_header, impulse_right_header;
impulse_preamp_header.sample_rate = 48000;
impulse_preamp_header.channel = 0;
impulse_preamp_header.sample_count = savePreampImpulse.size();
impulse_left_header.sample_rate = 48000;
impulse_left_header.channel = 0;
impulse_left_header.sample_count = saveLeftImpulse.size();
impulse_right_header.sample_rate = 48000;
impulse_right_header.channel = 1;
impulse_right_header.sample_count = saveRightImpulse.size();
fwrite(dsp->profile, sizeof(st_profile), 1, profile_file);
fwrite(&impulse_preamp_header, sizeof(st_impulse), 1, profile_file);
fwrite(savePreampImpulse.data(), sizeof(float), savePreampImpulse.size(),
profile_file);
fwrite(&impulse_left_header, sizeof(st_impulse), 1, profile_file);
fwrite(saveLeftImpulse.data(), sizeof(float), saveLeftImpulse.size(), profile_file);
fwrite(&impulse_right_header, sizeof(st_impulse), 1, profile_file);
fwrite(saveRightImpulse.data(), sizeof(float), saveRightImpulse.size(),
profile_file);
fclose(profile_file);
return true;
}
return false;
}
QVector<float> Processor::getFrequencyResponse(QVector<float> freqs,
QVector<float> impulse)
{
QVector<float> frequencyResponse(freqs.size());
QVector<double> double_impulse(impulse.size());
for (int i = 0; i < impulse.size(); i++)
{
double_impulse[i] = impulse[i];
}
fftw_complex out[impulse.size() / 2 + 1];
fftw_plan p;
p = fftw_plan_dft_r2c_1d(impulse.size(), double_impulse.data(), out, FFTW_ESTIMATE);
fftw_execute(p);
fftw_destroy_plan(p);
QVector<double> rawFrequencyResponse(impulse.size() / 2);
QVector<double> rawFreqs(impulse.size() / 2);
for (int i = 0; i < rawFrequencyResponse.size(); i++)
{
rawFrequencyResponse[i] = sqrt(pow(out[i + 1][0], 2) + pow(out[i + 1][1], 2));
rawFreqs[i] = ((double)(i + 1) / rawFrequencyResponse.size()) * (samplingRate / 2);
}
gsl_interp_accel *acc = gsl_interp_accel_alloc ();
gsl_spline *spline = gsl_spline_alloc (gsl_interp_cspline, rawFreqs.size());
gsl_spline_init (spline, rawFreqs.data(), rawFrequencyResponse.data(),
rawFreqs.size());
float maxAmplitude = FLT_MIN;
for (int i = 0; i < freqs.size(); i++)
{
frequencyResponse[i] = gsl_spline_eval(spline, freqs[i], acc);
if (frequencyResponse[i] > maxAmplitude)
{
maxAmplitude = frequencyResponse[i];
}
}
for (int i = 0; i < freqs.size(); i++)
{
frequencyResponse[i] /= maxAmplitude;
}
gsl_spline_free(spline);
gsl_interp_accel_free(acc);
return frequencyResponse;
}
QVector<float> Processor::getPreampFrequencyResponse(QVector<float> freqs)
{
return getFrequencyResponse(freqs, preamp_impulse);
}
QVector<float> Processor::getCabinetSumFrequencyResponse(QVector<float> freqs)
{
QVector<float> leftResponse = getFrequencyResponse(freqs, left_impulse);
QVector<float> rightResponse = getFrequencyResponse(freqs, right_impulse);
QVector<float> sumResponse(leftResponse.size());
for (int i = 0; i < sumResponse.size(); i++)
{
sumResponse[i] = (leftResponse[i] + rightResponse[i]) / 2.0;
}
return sumResponse;
}
stControls Processor::getControls()
{
return dsp->controls;
}
void Processor::setControls(stControls newControls)
{
dsp->controls = newControls;
}
st_profile Processor::getProfile()
{
return *(dsp->profile);
}
void Processor::setProfile(st_profile newProfile)
{
*(dsp->profile) = newProfile;
}
float hardClipBottom(float input, float cut)
{
if (input < cut) input = cut;
return input;
}
float Ks(float input, float Upor, float Kreg)
{
return 1.0 / (hardClipBottom((input - Upor) * Kreg, 0) + 1);
}
float Ksplus(float input, float Upor)
{
return Upor - input * Upor;
}
float Processor::tube(float Uin, float Kreg, float Upor, float bias, float cut)
{
// Model of tube nonlinear distortion
return hardClipBottom(Uin * Ks(Uin, Upor, Kreg)
+ Ksplus(Ks(Uin, Upor, Kreg), Upor) + bias, cut);
}
void Processor::setPreampCorrectionImpulseFromFrequencyResponse(QVector<double> w,
QVector<double> A)
{
preamp_correction_impulse.resize(preamp_impulse.size());
frequency_response_to_impulse_response(w.data(),
A.data(),
w.size(),
preamp_correction_impulse.data(),
preamp_correction_impulse.size(),
samplingRate);
if (new_preamp_correction_convproc == nullptr)
{
new_preamp_correction_convproc = createMonoConvolver(preamp_correction_impulse);
}
preampCorrectionEnabled = true;
}
void Processor::setCabinetSumCorrectionImpulseFromFrequencyResponse(QVector<double> w,
QVector<double> A)
{
left_correction_impulse.resize(left_impulse.size());
frequency_response_to_impulse_response(w.data(),
A.data(),
w.size(),
left_correction_impulse.data(),
left_correction_impulse.size(),
samplingRate);
right_correction_impulse.resize(right_impulse.size());
frequency_response_to_impulse_response(w.data(),
A.data(),
w.size(),
right_correction_impulse.data(),
right_correction_impulse.size(),
samplingRate);
if (new_correction_convproc == nullptr)
{
new_correction_convproc = createStereoConvolver(left_correction_impulse,
right_correction_impulse);
}
cabinetCorrectionEnabled = true;
}
void Processor::applyPreampCorrection()
{
fft_convolver(preamp_impulse.data(),
preamp_impulse.size(),
preamp_correction_impulse.data(),
preamp_correction_impulse.size());
if (new_preamp_convproc == nullptr)
{
new_preamp_convproc = createMonoConvolver(preamp_impulse);
}
}
void Processor::applyCabinetSumCorrection()
{
fft_convolver(left_impulse.data(), left_impulse.size(),
left_correction_impulse.data(), left_correction_impulse.size());
fft_convolver(right_impulse.data(), right_impulse.size(),
right_correction_impulse.data(), right_correction_impulse.size());
if (new_convproc == nullptr)
{
new_convproc = createStereoConvolver(left_impulse, right_impulse);
}
}
void Processor::resetPreampCorrection()
{
for (int i = 0; i < preamp_correction_impulse.size(); i++)
{
preamp_correction_impulse[i] = 0.0f;
}
preamp_correction_impulse[0] = 1.0f;
if (new_preamp_correction_convproc == nullptr)
{
new_preamp_correction_convproc = createMonoConvolver(preamp_correction_impulse);
}
preampCorrectionEnabled = false;
}
void Processor::resetCabinetSumCorrection()
{
for (int i = 0; i < left_correction_impulse.size(); i++)
{
left_correction_impulse[i] = 0.0f;
}
left_correction_impulse[0] = 1.0f;
for (int i = 0; i < right_correction_impulse.size(); i++)
{
right_correction_impulse[i] = 0.0f;
}
right_correction_impulse[0] = 1.0f;
if (new_correction_convproc == nullptr)
{
new_correction_convproc = createStereoConvolver(left_correction_impulse,
right_correction_impulse);
}
cabinetCorrectionEnabled = false;
}
int Processor::getSamplingRate()
{
return samplingRate;
}
void Processor::process(float *outL, float *outR, float *in, int nSamples)
{
// Change convolvers if new available
if (new_preamp_convproc != nullptr)
{
freeConvolver(preamp_convproc);
preamp_convproc = new_preamp_convproc;
new_preamp_convproc = nullptr;
}
if (new_preamp_correction_convproc != nullptr)
{
freeConvolver(preamp_correction_convproc);
preamp_correction_convproc = new_preamp_correction_convproc;
new_preamp_correction_convproc = nullptr;
//printf("Exchanged preamp correction convproc\n");
}
if (new_convproc != nullptr)
{
freeConvolver(convproc);
convproc = new_convproc;
new_convproc = nullptr;
}
if (new_correction_convproc != nullptr)
{
freeConvolver(correction_convproc);
correction_convproc = new_correction_convproc;
new_correction_convproc = nullptr;
}
// Preamp convolver
// Zita-convolver accepts 'fragm' number of samples,
// real buffer size may be greater,
// so perform convolution in multiple steps
int bufp = 0;
QVector<float> intermediateBuffer(nSamples);
while (bufp < nSamples)
{
memcpy (preamp_convproc->inpdata(0), in + bufp, fragm * sizeof(float));
preamp_convproc->process (true);
memcpy (intermediateBuffer.data() + bufp, preamp_convproc->outdata(0),
fragm * sizeof(float));
bufp += fragm;
}
// Preamp correction convolver
if (preampCorrectionEnabled)
{
bufp = 0;
while (bufp < nSamples)
{
memcpy (preamp_correction_convproc->inpdata(0), intermediateBuffer.data()
+ bufp, fragm * sizeof(float));
preamp_correction_convproc->process (true);
memcpy (intermediateBuffer.data() + bufp,
preamp_correction_convproc->outdata(0), fragm * sizeof(float));
bufp += fragm;
}
}
// Apply main tubeAmp model from FAUST code
float *inputs[1] = {intermediateBuffer.data()};
float *outputs[1] = {outL};
dsp->compute(nSamples, inputs, outputs);
memcpy(outR, outL, sizeof(float) * nSamples);
// Cabinet simulation convolver
bufp = 0;
while (bufp < nSamples)
{
memcpy (convproc->inpdata(0), outL + bufp, fragm * sizeof(float));
memcpy (convproc->inpdata(1), outR + bufp, fragm * sizeof(float));
convproc->process (true);
memcpy (outL + bufp, convproc->outdata(0), fragm * sizeof(float));
memcpy (outR + bufp, convproc->outdata(1), fragm * sizeof(float));
bufp += fragm;
}
// Cabinet correction convolver
if (cabinetCorrectionEnabled)
{
bufp = 0;
while (bufp < nSamples)
{
memcpy (correction_convproc->inpdata(0), outL + bufp, fragm * sizeof(float));
memcpy (correction_convproc->inpdata(1), outR + bufp, fragm * sizeof(float));
correction_convproc->process (true);
memcpy (outL + bufp, correction_convproc->outdata(0), fragm * sizeof(float));
memcpy (outR + bufp, correction_convproc->outdata(1), fragm * sizeof(float));
bufp += fragm;
}
}
}
void Processor::freeConvolver(Convproc *convolver)
{
convolverDeleteThread->convolver = convolver;
convolverDeleteThread->start();
}
Convproc* Processor::createMonoConvolver(QVector<float> impulse)
{
Convproc *newConv = new Convproc;
newConv->configure (1, 1, impulse.size(),
fragm, fragm, Convproc::MAXPART, 0.0);
newConv->impdata_create (0, 0, 1, impulse.data(),
0, impulse.size());
newConv->start_process(CONVPROC_SCHEDULER_PRIORITY,
CONVPROC_SCHEDULER_CLASS);
return newConv;
}
Convproc* Processor::createStereoConvolver(QVector<float> l_impulse,
QVector<float> r_impulse)
{
Convproc *newConv = new Convproc;
newConv->configure(2, 2, l_impulse.size(), fragm, fragm, Convproc::MAXPART, 0.0);
newConv->impdata_create(0, 0, 1, l_impulse.data(), 0, l_impulse.size());
newConv->impdata_create(1, 1, 1, r_impulse.data(), 0, r_impulse.size());
newConv->start_process(CONVPROC_SCHEDULER_PRIORITY, CONVPROC_SCHEDULER_CLASS);
return newConv;
}
QString Processor::getProfileFileName()
{
return profileFileName;
}
bool Processor::isPreampCorrectionEnabled()
{
return preampCorrectionEnabled;
}
bool Processor::isCabinetCorrectionEnabled()
{
return cabinetCorrectionEnabled;
}
void Processor::setPreampImpulse(QVector<float> data)
{
preamp_impulse = data;
if (new_preamp_convproc == nullptr)
{
new_preamp_convproc = createMonoConvolver(preamp_impulse);
}
}
void Processor::setCabinetImpulse(QVector<float> dataL, QVector<float> dataR)
{
left_impulse = dataL;
right_impulse = dataR;
if (new_convproc == nullptr)
{
new_convproc = createStereoConvolver(left_impulse, right_impulse);
}
}
QVector<float> Processor::getPreampImpulse()
{
return preamp_impulse;
}
QVector<float> Processor::getLeftImpulse()
{
return left_impulse;
}
QVector<float> Processor::getRightImpulse()
{
return right_impulse;
}
void Processor::setPreampCorrectionStatus(bool status)
{
preampCorrectionEnabled = status;
}
void Processor::setCabinetCorrectionStatus(bool status)
{
cabinetCorrectionEnabled = status;
}
void Processor::setProfileFileName(QString name)
{
profileFileName = name;
}
void ConvolverDeleteThread::run()
{
delete convolver;
}
| 20,117
|
C++
|
.cpp
| 589
| 28.382003
| 96
| 0.668508
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,355
|
tubeamp_panel.cpp
|
olegkapitonov_tubeAmp-Designer/src/tubeamp_panel.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QLabel>
#include <QGridLayout>
#include <cmath>
#include "tubeamp_panel.h"
TubeAmpPanel::TubeAmpPanel(QWidget *parent, Processor *prc, Player *plr) :
QFrame(parent)
{
processor = prc;
player = plr;
setFrameShape(QFrame::Panel);
setMinimumWidth(200);
setMaximumWidth(200);
scrollArea = new QScrollArea(this);
QHBoxLayout *mainLayout = new QHBoxLayout(this);
scrollWidget = new QWidget(this);
mainLayout->addWidget(scrollWidget);
scrollWidget->setMinimumHeight(550);
scrollWidget->setMinimumWidth(200);
scrollWidget->setMaximumWidth(200);
scrollArea->setWidget(scrollWidget);
QGridLayout *gbox = new QGridLayout(scrollWidget);
gbox->setAlignment(Qt::AlignHCenter);
QLabel *titleLabel = new QLabel(tr("tubeAmp Controls"), scrollWidget);
gbox->addWidget(titleLabel, 0, 0, 1, 2);
titleLabel->setMaximumHeight(30);
titleLabel->setAlignment(Qt::AlignHCenter);
QFont titleLabelFont = titleLabel->font();
titleLabelFont.setPointSize(15);
titleLabel->setFont(titleLabelFont);
QLabel *driveLabel = new QLabel(tr("Drive"), scrollWidget);
driveLabel->setAlignment(Qt::AlignHCenter);
driveLabel->setMaximumHeight(30);
gbox->addWidget(driveLabel , 1, 0, 1, 1);
driveDial = new TADial(scrollWidget);
gbox->addWidget(driveDial, 2, 0, 1, 1);
connect(driveDial, &TADial::valueChanged, this, &TubeAmpPanel::driveDialValueChanged);
QLabel *bassLabel = new QLabel(tr("Bass"), scrollWidget);
bassLabel->setAlignment(Qt::AlignHCenter);
bassLabel->setMaximumHeight(30);
gbox->addWidget(bassLabel , 1, 1, 1, 1);
bassDial = new TADial(scrollWidget);
gbox->addWidget(bassDial, 2, 1, 1, 1);
connect(bassDial, &TADial::valueChanged, this, &TubeAmpPanel::bassDialValueChanged);
QLabel *middleLabel = new QLabel(tr("Middle"), scrollWidget);
middleLabel->setAlignment(Qt::AlignHCenter);
middleLabel->setMaximumHeight(30);
gbox->addWidget(middleLabel , 3, 0, 1, 1);
middleDial = new TADial(scrollWidget);
gbox->addWidget(middleDial, 4, 0, 1, 1);
connect(middleDial, &TADial::valueChanged, this, &TubeAmpPanel::middleDialValueChanged);
QLabel *trebleLabel = new QLabel(tr("Treble"), scrollWidget);
trebleLabel->setAlignment(Qt::AlignHCenter);
trebleLabel->setMaximumHeight(30);
gbox->addWidget(trebleLabel , 3, 1, 1, 1);
trebleDial = new TADial(scrollWidget);
gbox->addWidget(trebleDial, 4, 1, 1, 1);
connect(trebleDial, &TADial::valueChanged, this, &TubeAmpPanel::trebleDialValueChanged);
QLabel *volumeLabel = new QLabel(tr("Volume"), scrollWidget);
volumeLabel->setAlignment(Qt::AlignHCenter);
volumeLabel->setMaximumHeight(30);
gbox->addWidget(volumeLabel , 5, 0, 1, 1);
volumeDial = new TADial(scrollWidget);
gbox->addWidget(volumeDial, 6, 0, 1, 1);
connect(volumeDial, &TADial::valueChanged, this, &TubeAmpPanel::volumeDialValueChanged);
QLabel *levelLabel = new QLabel(tr("Level"), scrollWidget);
levelLabel->setAlignment(Qt::AlignHCenter);
levelLabel->setMaximumHeight(30);
gbox->addWidget(levelLabel , 5, 1, 1, 1);
levelDial = new TADial(scrollWidget);
gbox->addWidget(levelDial, 6, 1, 1, 1);
connect(levelDial, &TADial::valueChanged, this, &TubeAmpPanel::levelDialValueChanged);
QLabel *inputMeterLabel = new QLabel(tr("Input Level"), scrollWidget);
inputMeterLabel->setMaximumHeight(24);
gbox->addWidget(inputMeterLabel, 7, 0, 1, 2);
inputMeter = new TAMeter(scrollWidget);
inputMeter->setMaximumHeight(24);
gbox->addWidget(inputMeter, 8, 0, 1, 2);
QLabel *outputMeterLabel = new QLabel(tr("Output Level"), scrollWidget);
outputMeterLabel->setMaximumHeight(24);
gbox->addWidget(outputMeterLabel, 9, 0, 1, 2);
outputMeter = new TAMeter(scrollWidget);
outputMeter->setMaximumHeight(24);
gbox->addWidget(outputMeter, 10, 0, 1, 2);
resetControls();
}
void TubeAmpPanel::resizeEvent(QResizeEvent *)
{
scrollArea->setMinimumWidth(width());
scrollArea->setMinimumHeight(height());
}
void TubeAmpPanel::bassDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.low = (newValue - 50) / 5.0;
processor->setControls(controls);
emit dialValueChanged();
}
void TubeAmpPanel::middleDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.middle = (newValue - 50) / 5.0;
processor->setControls(controls);
emit dialValueChanged();
}
void TubeAmpPanel::trebleDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.high = (newValue - 50) / 5.0;
processor->setControls(controls);
emit dialValueChanged();
}
void TubeAmpPanel::driveDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.drive = newValue;
processor->setControls(controls);
emit dialValueChanged();
}
void TubeAmpPanel::volumeDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.mastergain = newValue;
processor->setControls(controls);
emit dialValueChanged();
}
void TubeAmpPanel::levelDialValueChanged(int newValue)
{
stControls controls = processor->getControls();
controls.volume = (float)newValue / 100.0;
processor->setControls(controls);
player->setLevel((float)newValue / 100.0);
emit dialValueChanged();
}
void TubeAmpPanel::resetControls()
{
stControls ctrls = processor->getControls();
driveDial->setValue(ctrls.drive);
bassDial->setValue((ctrls.low + 10.0) * 5.0);
middleDial->setValue((ctrls.middle + 10.0) * 5.0);
trebleDial->setValue((ctrls.high + 10.0) * 5.0);
volumeDial->setValue(ctrls.mastergain);
levelDial->setValue(ctrls.volume * 100.0);
}
void TubeAmpPanel::peakRMSValueChanged(float inputValue, float outputValue)
{
float dbInputValue = 20.0 * log10(inputValue);
float dbOutputValue = 20.0 * log10(outputValue);
inputMeter->setValue(dbInputValue);
outputMeter->setValue(dbOutputValue);
}
| 6,744
|
C++
|
.cpp
| 168
| 37.392857
| 90
| 0.751034
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,356
|
profiler.cpp
|
olegkapitonov_tubeAmp-Designer/src/profiler.cpp
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#include <QSharedPointer>
#include <sndfile.h>
#include <cmath>
#include <cfloat>
#include <QMessageBox>
#include <QDir>
#include <QApplication>
#include "profiler.h"
#include "math_functions.h"
#define EXPER_POINTS_NUM 15
#define TEST_SIGNAL_LENGTH_SEC 3.0
#define TEST_SIGNAL_PAUSE_LENGTH_SEC 0.1
#define RESPONSE_OVERSAMPLING_COEFF 16
// Frequencies used to generate test signal
// to get preamp amplitude response, in rad/s
int calibration_freqs[] = {200,600,1500,2000,2500,3000,3500,
4000,4500,5000,12000,25000,35000,50000,80000};
// Amplitude values for each frequency in test signal
// Different amplitude values are used
// to extend dynamic range
double freq_weights[] = {1.0,0.8,0.75,0.6,0.55,0.5,0.5,
0.4,0.35,0.3,0.25,0.2,0.35,0.75,1.0};
Profiler::Profiler(Processor *prc, Player *plr)
{
processor = prc;
player = plr;
connect(this, &Profiler::warningMessageNeeded, this,
&Profiler::warningMessageNeededSlot);
}
void Profiler::loadResponseFile(QString fileName)
{
responseData.resize(0);
SF_INFO sfinfo;
sfinfo.format = 0;
SNDFILE *responseSndFile = sf_open(fileName.toUtf8(), SFM_READ, &sfinfo);
if (responseSndFile != NULL)
{
responseDataSamplerate = sfinfo.samplerate;
responseDataChannels = sfinfo.channels;
responseData.resize(sfinfo.frames * sfinfo.channels);
sf_readf_float(responseSndFile, responseData.data(), sfinfo.frames);
sf_close(responseSndFile);
}
}
// Real test file contains DI from guitar
QVector<float> Profiler::loadRealTestFile(QVector<float> testSignal, float sampleRate)
{
SF_INFO sfinfo;
sfinfo.format = 0;
int realTestDataSamplerate;
QDir profilesDir(QCoreApplication::applicationDirPath());
profilesDir.cdUp();
profilesDir.cd("share/tubeAmp Designer");
SNDFILE *realTestSndFile = sf_open(QString(profilesDir.absolutePath() +
"/real_test.wav").toUtf8(),
SFM_READ, &sfinfo);
if (realTestSndFile != NULL)
{
realTestDataSamplerate = sfinfo.samplerate;
if (realTestDataSamplerate == sampleRate)
{
int startRealTest = testSignal.length();
testSignal.resize(testSignal.length() + sfinfo.frames * sfinfo.channels);
sf_readf_float(realTestSndFile, &(testSignal.data()[startRealTest]), sfinfo.frames);
}
else
{
QVector<float> tempTestSignal(sfinfo.frames * sfinfo.channels);
sf_readf_float(realTestSndFile, tempTestSignal.data(), sfinfo.frames);
QVector<float> tempTestSignalResampled = resample_vector(tempTestSignal,
realTestDataSamplerate,
sampleRate);
int startRealTest = testSignal.size();
testSignal.resize(testSignal.size() + tempTestSignalResampled.size());
for (int i = 0; i < tempTestSignalResampled.size(); i++)
{
testSignal[i + startRealTest] = tempTestSignalResampled[i];
}
}
sf_close(realTestSndFile);
}
return testSignal;
}
// Calculates amplitude of the test signal
// at which profiled amplifier starts to clip
stCrunchPoint Profiler::findCrunchPoint(int freqIndex,
QVector<float> data,
int samplerate)
{
int periodInSamples = samplerate /
((double)calibration_freqs[freqIndex] /
2.0 / M_PI);
int periodsNum = (double)samplerate *
(double)TEST_SIGNAL_LENGTH_SEC /
periodInSamples;
// Calculate RMS values of each period
// of the test signal
QVector<sPike> pikeArray;
for (int j = 0; j < periodsNum - 2; j++)
{
double rmsSum = 0.0;
for (int i = 0; i < periodInSamples; i++)
{
rmsSum += pow(data[i + j * periodInSamples], 2);
}
double rms = sqrt(sqrt(rmsSum / periodInSamples));
sPike pikeElement;
pikeElement.value = rms;
pikeElement.time = j * periodInSamples + periodInSamples / 2;
pikeArray.append(pikeElement);
}
// Compare initial form of the test signal
// and signal form after the profiled amplifier (response form).
// At point in time when profiled amplifier starts to clip
// the difference between test and response form
// will be maximal. Find this point in time here.
double stepCoeff = (double)pikeArray[pikeArray.size() - 1].value /
pow((double)pikeArray[pikeArray.size() - 1].time, 1);
stCrunchPoint crunchPoint;
crunchPoint.max = 0.0;
crunchPoint.maxtime = 0;
double noiseFloor = pikeArray[0].value * 1.2;
for (int i = 0; i < pikeArray.size() - 1; i++)
{
double denoisedValue = pikeArray[i].value - noiseFloor;
if (denoisedValue < 0)
{
denoisedValue = 0.0;
}
double errorValue = fabs(denoisedValue - pow(pikeArray[i].time, 1) *
stepCoeff);
if (errorValue > crunchPoint.max)
{
crunchPoint.max = errorValue;
crunchPoint.maxtime = pikeArray[i].time;
crunchPoint.rmsAtMax = pikeArray[i].value;
}
}
// Mark point as Bad if the clipping was not reached
// Exclude last two high frequency points from this (hack!)
if ((fabs(crunchPoint.rmsAtMax - pikeArray[pikeArray.size() - 1].value) /
pikeArray[pikeArray.size() - 1].value > 0.2) && (freqIndex != 0) &&
(freqIndex != 13) && (freqIndex != 14))
{
crunchPoint.isBad = true;
}
else
{
crunchPoint.isBad = false;
}
return crunchPoint;
}
// Caclulates all tubeAmp profile components
// by analyzing response signal from profiled amplifier
void Profiler::analyze(ProfilerPresetType preset)
{
emit stopPlaybackNeeded();
if (responseData.size() < (int)((3285485.0 * (double)responseDataSamplerate) / 44100.0 + 1))
{
QMessageBox::critical(nullptr, QObject::tr("Error!"),
tr("Response file is too short!"),
QMessageBox::Ok,
QMessageBox::Ok);
return;
}
// Remove first 1 second from response signal.
responseData.remove(0, 1 * responseDataSamplerate * responseDataChannels);
QVector<float> preamp_impulse(0.1 * processor->getSamplingRate());
double desiredGain;
if (preset == CRYSTALCLEAN_PRESET)
{
QVector<double> A({
0.1,
0.5,
0.75,
1.0,
1.0,
1.0,
1.0,
0.8,
0.5,
0.1
});
QVector<double> w({
125,
314,
628,
1256,
3141,
6283,
12566,
25132,
31415,
62831
});
// Calculate impulse response (for convolver in tubeAmp)
// of the preamp
frequency_response_to_impulse_response(w.data(),
A.data(),
w.size(),
preamp_impulse.data(),
preamp_impulse.size(),
processor->getSamplingRate());
desiredGain = 0.0005;
emit progressChanged(50);
}
else
{
int responseDataSamplerateOversampled = responseDataSamplerate *
RESPONSE_OVERSAMPLING_COEFF;
//int responseDataSamplerateOversampled = 44100.0 * RESPONSE_OVERSAMPLING_COEFF;
int crunchTestSize = ((TEST_SIGNAL_LENGTH_SEC + TEST_SIGNAL_PAUSE_LENGTH_SEC) *
EXPER_POINTS_NUM + 1) * responseDataSamplerate * responseDataChannels;
QVector<float> tempResponseData;
for (int i = 0; i < crunchTestSize; i += responseDataChannels)
{
tempResponseData.append(responseData[i]);
}
// 1. Process "crunch" test part of the response singal.
// This part contains 14 test signals with different frequencies.
// For each frequency we will caclulate amplitudes of input signal
// at which profiled amplifier starts to clip.
// As a result we will get amplitude response of the preamp.
// Oversample crunch test part for better accuracy
QVector<float> responseDataOversampled = resample_vector(tempResponseData,
responseDataSamplerate,
responseDataSamplerateOversampled);
emit progressChanged(20);
stCrunchPoint crunchPoint;
// Calculate amplitude response of the preamp
QVector<double> Aexper(EXPER_POINTS_NUM);
QVector<double> wexper(EXPER_POINTS_NUM);
// Holds maximum amplitude value of the response
// for normalization
double Amax = 0.0;
int badPointsCounter = 0;
for (int j = 0; j < EXPER_POINTS_NUM; j++)
{
QVector<float> A(responseDataOversampled.size());
for (int i = 0; i < responseDataSamplerateOversampled *
TEST_SIGNAL_LENGTH_SEC - 1; i++)
{
A[i] = responseDataOversampled[i +
j * (responseDataSamplerateOversampled *
TEST_SIGNAL_LENGTH_SEC +
responseDataSamplerateOversampled *
TEST_SIGNAL_PAUSE_LENGTH_SEC)];
}
crunchPoint = findCrunchPoint(j, A, responseDataSamplerateOversampled);
if (crunchPoint.isBad)
{
badPointsCounter++;
}
wexper[j] = calibration_freqs[j];
Aexper[j] = (double)responseDataSamplerateOversampled *
(double)TEST_SIGNAL_LENGTH_SEC / crunchPoint.maxtime;
Aexper[j] = Aexper[j] * Aexper[j] / freq_weights[j];
if (Aexper[j]>Amax)
{
Amax = Aexper[j];
}
}
// Calculate Gain level of profiled amplifier
desiredGain = Amax / responseDataSamplerate * 7.0;
// Check if the gain is within a reasonable range
if (desiredGain < 0.0001)
{
QString message = QString(QObject::tr("Response file was created with too"
" low gain,\n"
"for adequate results please increase the gain\n"
"by at least %1 db!")).arg(20.0 * log10(0.001 / desiredGain),
4, 'f', 2);
emit warningMessageNeeded(message);
}
else if (badPointsCounter != 0)
{
QString message = QString(QObject::tr("Response file was created with too"
" low gain,\n"
"for adequate results please increase the gain\n"
"by 10 db!"));
emit warningMessageNeeded(message);
}
if (desiredGain > 0.2)
{
QString message = QString(QObject::tr("Response file was created with too"
" high gain,\n"
"for adequate results please decrease the gain\n"
"by at least %1 db!")).arg(20.0*log10(desiredGain / 0.2),4,'f',2);
emit warningMessageNeeded(message);
}
// Limit the gain to absolute maximum and minimum level
if (desiredGain > 3.16 * 10.0)
{
desiredGain = 3.16 * 10.0;
}
if (desiredGain < 0.0005)
{
desiredGain = 0.0005;
}
// Perform normalization of the preamp amplitude response
for (int i = 0; i < EXPER_POINTS_NUM; i++)
{
Aexper[i] /= Amax;
if (Aexper[i] < 0.01)
{
Aexper[i] = 0.01;
}
}
// Calculate impulse response (for convolver in tubeAmp)
// of the preamp
frequency_response_to_impulse_response(wexper.data(),
Aexper.data(),
wexper.size(),
preamp_impulse.data(),
preamp_impulse.size(),
processor->getSamplingRate());
emit progressChanged(50);
}
// 2. Calculate frequency response of the part after clipping
// (mainly cabinet) by deconvolution
// Get test sweep signal
QVector<float> sweepSignal(processor->getSamplingRate() * 10);
generate_logarithmic_sweep(10.0, processor->getSamplingRate(),
20.0, 44100.0 / 2.0,
0.01,
sweepSignal.data());
// Get test signal after preamp - convolve test sweep
// with preamp impulse response
fft_convolver(sweepSignal.data(), sweepSignal.size(), preamp_impulse.data(),
preamp_impulse.size());
// Get sweep response from profiled amplifier
QVector<float> sweepResponseL;
QVector<float> sweepResponseR;
int sweepStart = ((TEST_SIGNAL_LENGTH_SEC + TEST_SIGNAL_PAUSE_LENGTH_SEC) *
EXPER_POINTS_NUM + 1) * responseDataSamplerate * responseDataChannels;
for (int i = 0;
i < (responseDataSamplerate * 11 * responseDataChannels);
i += responseDataChannels)
{
sweepResponseL.append(responseData[sweepStart + i]);
if (responseDataChannels > 1)
{
float sum = 0.0;
for (int j = 1; j < responseDataChannels; j++)
{
sum += responseData[sweepStart + i + j];
}
sum /= (responseDataChannels - 1);
sweepResponseR.append(sum);
}
else
{
sweepResponseR.append(responseData[sweepStart + i]);
}
}
// Resample to processor sampling rate
QVector<float> sweepResponseResampledL = resample_vector(
sweepResponseL,
responseDataSamplerate,
processor->getSamplingRate()
);
QVector<float> sweepResponseResampledR = resample_vector(
sweepResponseR,
responseDataSamplerate,
processor->getSamplingRate()
);
// Calculate cabinet impulse response by deconvolution
// with test signal after preamp
QVector<float> cabinet_impulseL(processor->getSamplingRate());
QVector<float> cabinet_impulseR(processor->getSamplingRate());
fft_deconvolver(sweepSignal.data(),
sweepSignal.size(),
sweepResponseResampledL.data(),
sweepResponseResampledL.size(),
cabinet_impulseL.data(),
cabinet_impulseL.size(),
30.0 / processor->getSamplingRate(),
10000.0 / processor->getSamplingRate(),
-32.0
);
fft_deconvolver(sweepSignal.data(),
sweepSignal.size(),
sweepResponseResampledR.data(),
sweepResponseResampledR.size(),
cabinet_impulseR.data(),
cabinet_impulseR.size(),
30.0 / processor->getSamplingRate(),
10000.0 / processor->getSamplingRate(),
-32.0
);
emit progressChanged(75);
// 4. Correct cabinet impulse response
// by auto-equalization based on the "real" test.
// Real test signal is recorded DI from guitar.
// Get response on real test signal from profiled amplifier
QVector<float> realTestResponseL;
QVector<float> realTestResponseR;
int realTestStart = ((TEST_SIGNAL_LENGTH_SEC + TEST_SIGNAL_PAUSE_LENGTH_SEC) *
EXPER_POINTS_NUM + 1) * responseDataSamplerate * responseDataChannels +
responseDataSamplerate * responseDataChannels * 11;
for (int i = 0;
i < (responseData.size() - realTestStart);
i += responseDataChannels)
{
realTestResponseL.append(responseData[realTestStart + i]);
float sum = 0.0;
if (responseDataChannels > 1)
{
for (int j = 1; j < responseDataChannels; j++)
{
sum += responseData[realTestStart + i + j];
}
sum /= (responseDataChannels - 1.0);
realTestResponseR.append(sum);
}
else
{
realTestResponseR.append(responseData[realTestStart + i]);
}
}
QVector<float> realTestResponseResampledL = resample_vector(realTestResponseL,
responseDataSamplerate, processor->getSamplingRate());
QVector<float> realTestResponseResampledR = resample_vector(realTestResponseR,
responseDataSamplerate, processor->getSamplingRate());
QVector<float> realTestSignal;
realTestSignal = loadRealTestFile(realTestSignal, processor->getSamplingRate());
float realTestSignalRMS = 0.0;
for (int i = 0; i<realTestSignal.size(); i++)
{
realTestSignalRMS += pow(realTestSignal[i], 2);
}
realTestSignalRMS = sqrt(realTestSignalRMS / realTestSignal.size());
// Normalize to -20 dB standart input level
QVector<float> preampImpulseNormalizeTempBuffer(realTestSignal);
fft_convolver(preampImpulseNormalizeTempBuffer.data(),
preampImpulseNormalizeTempBuffer.size(),
preamp_impulse.data(),
preamp_impulse.size()
);
double preampImpulseNormalizeTempBufferRMS = 0.0;
for (int i = 0; i < preampImpulseNormalizeTempBuffer.size(); i++)
{
preampImpulseNormalizeTempBufferRMS += pow(preampImpulseNormalizeTempBuffer[i], 2);
}
preampImpulseNormalizeTempBufferRMS = sqrt(preampImpulseNormalizeTempBufferRMS /
preampImpulseNormalizeTempBuffer.size());
for (int i = 0; i < preamp_impulse.size(); i++)
{
preamp_impulse[i] *= 0.04 / preampImpulseNormalizeTempBufferRMS;
}
QVector<float> processedDataL(realTestSignal.size());
QVector<float> processedDataR(realTestSignal.size());
// Create background Processor with previously adjusted profile
QSharedPointer<Processor> backProcessor
= QSharedPointer<Processor>(new Processor(processor->getSamplingRate()));
backProcessor->loadProfile(processor->getProfileFileName());
stControls ctrls = processor->getControls();
ctrls.drive = 100.0;
ctrls.mastergain = 100.0;
backProcessor->setControls(ctrls);
processor->setControls(ctrls);
st_profile profile = processor->getProfile();
if (preset == CRYSTALCLEAN_PRESET)
{
profile.preamp_level = 0.005;
profile.amp_level = 0.1;
profile.signature[0] = 'T';
profile.signature[1] = 'a';
profile.signature[2] = 'P';
profile.signature[3] = 'f';
profile.version = 1;
profile.preamp_bias = 0.0;
profile.preamp_Kreg = 0.8;
profile.preamp_Upor = 0.8;
profile.tonestack_low_freq = 20.0;
profile.tonestack_low_band = 400.0;
profile.tonestack_middle_freq = 500.0;
profile.tonestack_middle_band = 400.0;
profile.tonestack_high_freq = 10000.0;
profile.tonestack_high_band = 18000.0;
profile.amp_bias = 0.0;
profile.amp_Kreg = 0.39;
profile.amp_Upor = 0.91;
profile.sag_time = 0.3;
profile.sag_coeff = 0.0;
profile.output_level = 1.0/7.5;
}
if (preset == CLASSIC_PRESET)
{
if (desiredGain <= 0.05)
{
profile.preamp_level = 0.005;
profile.amp_level = desiredGain / 0.005;
}
else
{
profile.amp_level = 10.0;
profile.preamp_level = desiredGain / 10.0;
}
profile.signature[0] = 'T';
profile.signature[1] = 'a';
profile.signature[2] = 'P';
profile.signature[3] = 'f';
profile.version = 1;
profile.preamp_bias = 0.0;
profile.preamp_Kreg = 0.8;
profile.preamp_Upor = 0.8;
profile.tonestack_low_freq = 20.0;
profile.tonestack_low_band = 400.0;
profile.tonestack_middle_freq = 500.0;
profile.tonestack_middle_band = 400.0;
profile.tonestack_high_freq = 10000.0;
profile.tonestack_high_band = 18000.0;
profile.amp_bias = 0.2;
profile.amp_Kreg = 0.7;
profile.amp_Upor = 0.2;
profile.sag_time = 0.3;
profile.sag_coeff = 0.5;
profile.output_level = 1.0/7.5;
}
if (preset == MASTERGAIN_PRESET)
{
if (desiredGain > 3.16)
{
desiredGain = 3.16;
}
profile.preamp_level = desiredGain / 0.1;
profile.amp_level = 0.1;
profile.signature[0] = 'T';
profile.signature[1] = 'a';
profile.signature[2] = 'P';
profile.signature[3] = 'f';
profile.version = 1;
profile.preamp_bias = 0.0; //!!!!!!!!!!!!!!!!!!!!!!
profile.preamp_Kreg = 2.0;
profile.preamp_Upor = 0.2;
profile.tonestack_low_freq = 20.0;
profile.tonestack_low_band = 400.0;
profile.tonestack_middle_freq = 500.0;
profile.tonestack_middle_band = 400.0;
profile.tonestack_high_freq = 10000.0;
profile.tonestack_high_band = 18000.0;
profile.amp_bias = 0.2;
profile.amp_Kreg = 1.0;
profile.amp_Upor = 0.5;
profile.sag_time = 0.1;
profile.sag_coeff = 0.0;
profile.output_level = 1/5.0;
}
processor->setProfile(profile);
processor->setPreampImpulse(preamp_impulse);
processor->setCabinetImpulse(cabinet_impulseL, cabinet_impulseR);
backProcessor->setProfile(profile);
// DIRTY HACK!!!
// Run Auto-Equalizer 4 times to improve accuracy.
// TODO: correct AutoEqualizer code so that one run is enough
for (int i = 0; i < 4; i++)
{
backProcessor->setPreampImpulse(processor->getPreampImpulse());
backProcessor->setCabinetImpulse(processor->getLeftImpulse(), processor->getRightImpulse());
// Cut signals up to multiple of FRAGM (64 samples)
int sizeToFragm = floor(realTestSignal.size() / (double)fragm) * fragm;
realTestSignal.resize(sizeToFragm);
processedDataL.resize(sizeToFragm);
processedDataR.resize(sizeToFragm);
realTestResponseResampledL.resize(sizeToFragm);
realTestResponseResampledR.resize(sizeToFragm);
// Get real test response from previously adjusted Processor
backProcessor->process(processedDataL.data(),
processedDataR.data(),
realTestSignal.data(),
realTestSignal.size());
emit progressChanged(85);
QVector<double> processedDataDouble(processedDataL.size());
for (int i = 0; i < processedDataL.size(); i++)
{
processedDataDouble[i] = (processedDataL[i] + processedDataR[i]) / 2.0;
}
QVector<double> realTestResponseResampledDouble(realTestResponseResampledL.size());
for (int i = 0; i < realTestResponseResampledL.size(); i++)
{
realTestResponseResampledDouble[i] = (realTestResponseResampledL[i] +
realTestResponseResampledR[i]) / 2.0;
}
// Calculate auto-equalizer correction
// between real test responses from Processor
// and from profiled amplifier
int averageSpectrumSize = 4096;
int autoEqualazierPointsNum = 40;
processor->correctionEqualizerFLogValues.resize(autoEqualazierPointsNum);
processor->correctionEqualizerDbValues.resize(autoEqualazierPointsNum);
processor->correctionEqualizerFLogValues[0] = log10(10.0);
for (int i = 0; i < autoEqualazierPointsNum - 1; i++)
{
processor->correctionEqualizerFLogValues[i + 1] = (log10(20000.0) - log10(10.0)) *
(double)(i + 1) / (processor->correctionEqualizerFLogValues.size() - 1) + log10(10.0);
}
calulate_autoeq_amplitude_response(averageSpectrumSize,
backProcessor->getSamplingRate(),
processedDataDouble.data(),
processedDataDouble.size(),
realTestResponseResampledDouble.data(),
realTestResponseResampledDouble.size(),
processor->correctionEqualizerFLogValues.data(),
processor->correctionEqualizerDbValues.data(),
autoEqualazierPointsNum
);
/*for (int i = 0; i < processor->correctionEqualizerFLogValues.size(); i++)
{ *
if (processor->correctionEqualizerDbValues[i] > 20.0)
{
processor->correctionEqualizerDbValues[i] = 20.0;
}
if (processor->correctionEqualizerDbValues[i] < (-30.0))
{
processor->correctionEqualizerDbValues[i] = -30.0;
}
}*/
//processor->setPreampImpulse(preamp_impulse);
//processor->setCabinetImpulse(cabinet_impulseL, cabinet_impulseR);
QVector<double> w(processor->correctionEqualizerFLogValues.size());
QVector<double> A(processor->correctionEqualizerFLogValues.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->correctionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->correctionEqualizerDbValues[i] / 20.0);
}
// Set correction frequency response to the main Processor
processor->setCabinetSumCorrectionImpulseFromFrequencyResponse(w, A);
// Process dummy data to apply changes
QVector<float> dummyData(fragm);
processor->process(dummyData.data(),
dummyData.data(),
dummyData.data(),
fragm);
// Apply cabinet frequency response correction
// to cabinet impulse response
processor->applyCabinetSumCorrection();
processor->resetCabinetSumCorrection();
processor->correctionEqualizerFLogValues.resize(4);
processor->correctionEqualizerDbValues.resize(4);
processor->correctionEqualizerFLogValues[0] = log10(10.0);
processor->correctionEqualizerFLogValues[1] = log10(1000.0);
processor->correctionEqualizerFLogValues[2] = log10(20000.0);
processor->correctionEqualizerFLogValues[3] = log10(22000.0);
processor->correctionEqualizerDbValues[0] = 0.0;
processor->correctionEqualizerDbValues[1] = 0.0;
processor->correctionEqualizerDbValues[2] = 0.0;
processor->correctionEqualizerDbValues[3] = 0.0;
// Process dummy data to apply changes
processor->process(dummyData.data(), dummyData.data(), dummyData.data(), fragm);
}
processor->setProfileFileName(":/profiles/British Crunch.tapf");
// Send real test DI sound and real test response
// from profiled amplifier to the Player
player->setDiData(realTestSignal);
player->setRefData(realTestResponseResampledL, realTestResponseResampledR);
// Equalize RMS of the real test response from profiled amplifier
// with sound from the Processor
// Normalize cabinet impulse to -20 dB level
{
QVector<float> processedDataL(player->diData.size());
QVector<float> processedDataR(player->diData.size());
QSharedPointer<Processor> backProcessor
= QSharedPointer<Processor>(new Processor(processor->getSamplingRate()));
backProcessor->loadProfile(processor->getProfileFileName());
backProcessor->setControls(processor->getControls());
backProcessor->setProfile(processor->getProfile());
backProcessor->setPreampImpulse(processor->getPreampImpulse());
backProcessor->setCabinetImpulse(processor->getLeftImpulse(),
processor->getRightImpulse());
QVector<double> w(processor->correctionEqualizerFLogValues.size());
QVector<double> A(processor->correctionEqualizerFLogValues.size());
if (processor->correctionEqualizerFLogValues.size() >= 3)
{
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->correctionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->correctionEqualizerDbValues[i] / 20.0);
}
backProcessor->setCabinetSumCorrectionImpulseFromFrequencyResponse(w, A);
}
if (processor->preampCorrectionEqualizerFLogValues.size() >= 3)
{
w.resize(processor->preampCorrectionEqualizerFLogValues.size());
A.resize(processor->preampCorrectionEqualizerFLogValues.size());
for (int i = 0; i < w.size(); i++)
{
w[i] = 2.0 * M_PI * pow(10.0, processor->preampCorrectionEqualizerFLogValues[i]);
A[i] = pow(10.0, processor->preampCorrectionEqualizerDbValues[i] / 20.0);
}
backProcessor->setPreampCorrectionImpulseFromFrequencyResponse(w, A);
}
int sizeToFragm = floor(player->diData.size() / (double)fragm) * fragm;
processedDataL.resize(sizeToFragm);
processedDataR.resize(sizeToFragm);
backProcessor->process(processedDataL.data(),
processedDataR.data(),
player->diData.data(),
sizeToFragm);
double rmsProcessedData = 0.0;
for (int i = 0; i < processedDataL.size(); i++)
{
rmsProcessedData += pow((processedDataL[i] +
processedDataR[i]) / 2.0, 2);
}
rmsProcessedData = sqrt(rmsProcessedData / processedDataL.size());
double cabinetImpulseCorrectionCoeff = 0.1 / rmsProcessedData;
QVector<float> cabinetImpulseCorrectonBufferL = processor->getLeftImpulse();
QVector<float> cabinetImpulseCorrectonBufferR = processor->getRightImpulse();
for (int i = 0; i < cabinet_impulseL.size(); i++)
{
cabinetImpulseCorrectonBufferL[i] *= cabinetImpulseCorrectionCoeff;
cabinetImpulseCorrectonBufferR[i] *= cabinetImpulseCorrectionCoeff;
}
processor->setCabinetImpulse(cabinetImpulseCorrectonBufferL,
cabinetImpulseCorrectonBufferR
);
double rmsRefData = 0.0;
for (int i = 0; i < player->refDataL.size(); i++)
{
rmsRefData += pow((player->refDataL[i] + player->refDataR[i]) / 2.0, 2);
}
rmsRefData = sqrt(rmsRefData / player->refDataL.size());
double rmsRatio = rmsRefData / 0.1;
for (int i = 0; i < player->refDataL.size(); i++)
{
player->refDataL[i] /= rmsRatio;
player->refDataR[i] /= rmsRatio;
}
}
}
void Profiler::createTestFile(QString fileName, int version)
{
switch (version)
{
case 0:
createTestFile_v1(fileName);
break;
}
}
void Profiler::createTestFile_v1(QString fileName)
{
// Define some variables for the sound
float sampleRate = 44100.0; // hertz
int nSamples_signal = (int)(TEST_SIGNAL_LENGTH_SEC * sampleRate);
int nSamples_pause = (int)(TEST_SIGNAL_PAUSE_LENGTH_SEC * sampleRate);
QVector<float> testSignal;
// Create 1 sec of silence
for (int i = 0; i < sampleRate * 1; i++)
{
testSignal.append(0.0);
}
// Create "crunch" test signal
for (int j = 0; j < EXPER_POINTS_NUM; j++)
{
int i;
float frameData;
for(i = 0; i < nSamples_signal; i++ )
{
frameData = freq_weights[j] * pow((double)i / ((double)nSamples_signal -
1.0), 2) * sin(calibration_freqs[j] * (double)i / sampleRate);
testSignal.append(frameData);
}
for (int k = testSignal.size() - 1; k >= 0; k--)
{
if ((testSignal[k] * testSignal[k - 1]) < 0.0)
{
break;
}
else
{
testSignal[k] = 0.0;
}
}
for(i=0; i < nSamples_pause; i++ )
{
frameData = 0;
testSignal.append(frameData);
}
}
// 1 sec of silence
for (int i = 0; i < sampleRate * 1; i++)
{
testSignal.append(0.0);
}
// Create sweep signal
int startSweep = testSignal.length();
testSignal.resize(testSignal.length() + sampleRate * 10);
generate_logarithmic_sweep(10.0, sampleRate, 20.0, sampleRate / 2.0,
0.01, &(testSignal.data()[startSweep]));
int startBlank = testSignal.size();
testSignal.resize(testSignal.size() + sampleRate * 1);
for (int i = 0; i < sampleRate * 1; i++)
{
testSignal[i + startBlank] = 0.0;
}
// Add "real" test signal from wav file
testSignal = loadRealTestFile(testSignal, sampleRate);
SF_INFO sfinfo;
sfinfo.format = SF_FORMAT_WAV | SF_FORMAT_PCM_16;
sfinfo.frames = testSignal.size();
sfinfo.samplerate = (int)sampleRate;
sfinfo.channels = 1;
sfinfo.sections = 1;
sfinfo.seekable = 1;
SNDFILE *testFile = sf_open(fileName.toUtf8().constData(), SFM_WRITE, &sfinfo);
if (testFile != NULL)
{
sf_writef_float(testFile, testSignal.data(), testSignal.size());
}
sf_close(testFile);
}
void ProfilerThread::run()
{
profiler->analyze(presetType);
}
void Profiler::warningMessageNeededSlot(QString message)
{
QMessageBox::warning(nullptr, QObject::tr("Warning!"),
message,
QMessageBox::Ok,
QMessageBox::Ok);
}
| 33,087
|
C++
|
.cpp
| 842
| 31.31829
| 96
| 0.635235
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,357
|
nonlinear_widget.h
|
olegkapitonov_tubeAmp-Designer/src/nonlinear_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef NONLINEARWIDGET_H
#define NONLINEARWIDGET_H
#include <QtWidgets/QWidget>
#include <QPixmap>
#include <QPolygon>
#include <QString>
#include <QFont>
class NonlinearWidget : public QWidget
{
Q_OBJECT
public:
NonlinearWidget(QWidget *parent = nullptr);
QVector<double> inValues;
QVector<double> outValues;
double maxIn;
double maxOut;
void drawBackground();
protected:
QRect backr;
QFont infoFont;
QString text;
QPixmap *backbuffer;
QPoint infop;
QRect backinfor;
int stepWidth;
int stepHeight;
QRect margin;
void paintEvent(QPaintEvent *);
void resizeEvent(QResizeEvent *);
void mouseMoveEvent(QMouseEvent *);
};
#endif //NONLINEARWIDGET_H
| 1,546
|
C++
|
.h
| 50
| 28.52
| 80
| 0.731938
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,358
|
centralwidget.h
|
olegkapitonov_tubeAmp-Designer/src/centralwidget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef CENTRALWIDGET_H
#define CENTRALWIDGET_H
#include <QVBoxLayout>
#include <QHBoxLayout>
#include <QPushButton>
#include <QScrollArea>
#include <QEvent>
#include <QtWidgets/QWidget>
#include "block_edit_widget.h"
#include "preamp_filter_edit_widget.h"
#include "preamp_nonlinear_edit_widget.h"
#include "tonestack_edit_widget.h"
#include "amp_nonlinear_edit_widget.h"
#include "cabinet_edit_widget.h"
#include "player_panel.h"
#include "tubeamp_panel.h"
#include "processor.h"
#include "player.h"
class CentralWidget : public QWidget
{
Q_OBJECT
public:
CentralWidget(QWidget *parent, Processor *prc, Player *plr);
PlayerPanel *playerPanel;
void reloadBlocks();
void updateBlocks();
private:
QPushButton *preAmpFilterButton;
QPushButton *preAmpParamsButton;
QPushButton *toneStackButton;
QPushButton *powerAmpParamsButton;
QPushButton *cabSymButton;
QScrollArea *centralArea;
BlockEditWidget *activeBlockEdit;
PreampFilterEditWidget *preampFilterEditWidget;
PreampNonlinearEditWidget *preampNonlinearEditWidget;
TonestackEditWidget *tonestackEditWidget;
AmpNonlinearEditWidget *ampNonlinearEditWidget;
CabinetEditWidget *cabinetEditWidget;
TubeAmpPanel *tubeAmpPanel;
Processor *processor;
Player *player;
void uncheckModuleSelectBar();
bool eventFilter(QObject *o, QEvent *e);
void adjustWidget(QWidget *widget);
public slots:
void preAmpFilterButtonClicked();
void preAmpParamsButtonClicked();
void toneStackButtonClicked();
void powerAmpParamsButtonClicked();
void cabSymButtonClicked();
void dialValueChanged();
};
#endif // CENTRALWIDGET_H
| 2,470
|
C++
|
.h
| 72
| 32.041667
| 80
| 0.77204
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,359
|
load_dialog.h
|
olegkapitonov_tubeAmp-Designer/src/load_dialog.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef LOADDIALOG_H
#define LOADDIALOG_H
#include <QDialog>
#include <QPushButton>
#include <QLineEdit>
class LoadDialog : public QDialog
{
Q_OBJECT
public:
LoadDialog(QWidget *parent = nullptr);
QString getDiFileName();
QString getRefFileName();
private:
QString diFileName;
QString refFileName;
QPushButton *okButton;
QLineEdit *diFilenameEdit;
QLineEdit *refFilenameEdit;
void checkFilenames();
public slots:
void diFilenameButtonClicked();
void refFilenameButtonClicked();
void cancelButtonClicked();
void okButtonClicked();
void lineEditEdited(QString);
};
#endif // LOADDIALOG_H
| 1,474
|
C++
|
.h
| 45
| 30.466667
| 80
| 0.735543
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,360
|
preamp_filter_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/preamp_filter_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PREAMPFILTEREDITWIDGET_H
#define PREAMPFILTEREDITWIDGET_H
#include <QtWidgets/QWidget>
#include <QPushButton>
#include "block_edit_widget.h"
#include "processor.h"
#include "equalizer_widget.h"
#include "freq_response_widget.h"
#include "file_resampling_thread.h"
#include "message_widget.h"
class PreampFilterEditWidget : public BlockEditWidget
{
Q_OBJECT
public:
PreampFilterEditWidget(QWidget *parent = nullptr, Processor *prc = nullptr);
private:
enum DisableStatus {STAT_DISABLED, STAT_ENABLED};
QPushButton *loadButton;
QPushButton *saveButton;
QPushButton *resetButton;
QPushButton *applyButton;
QPushButton *disableButton;
Processor *processor;
EqualizerWidget *equalizer;
FileResamplingThread *fileResamplingThread;
MessageWidget *msg;
DisableStatus disableStatus;
virtual void recalculate();
virtual void resetControls();
public slots:
void responseChangedSlot();
void applyButtonClicked();
void resetButtonClicked();
void saveButtonClicked();
void loadButtonClicked();
void disableButtonClicked();
void fileResamplingThreadFinished();
};
#endif // PREAMPFILTEREDITWIDGET_H
| 1,999
|
C++
|
.h
| 57
| 32.719298
| 80
| 0.760125
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,361
|
profile.h
|
olegkapitonov_tubeAmp-Designer/src/profile.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PROFILE_H
#define PROFILE_H
// Structure with tubeAmp model
// parameters.
typedef struct
{
char signature[4];
uint32_t version;
float preamp_level;
float preamp_bias;
float preamp_Kreg;
float preamp_Upor;
float tonestack_low_freq;
float tonestack_low_band;
float tonestack_middle_freq;
float tonestack_middle_band;
float tonestack_high_freq;
float tonestack_high_band;
float amp_level;
float amp_bias;
float amp_Kreg;
float amp_Upor;
float sag_time;
float sag_coeff;
float output_level;
}st_profile;
// Header structure of
// impulse response data
// in *.tapf profile file
typedef struct {
int sample_rate;
int channel;
int sample_count;
}st_impulse;
#endif
| 1,616
|
C++
|
.h
| 53
| 27.264151
| 80
| 0.702581
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,362
|
message_widget.h
|
olegkapitonov_tubeAmp-Designer/src/message_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef MESSAGE_WIDGET_H
#define MESSAGE_WIDGET_H
#include <QDialog>
#include <QProgressBar>
#include <QLabel>
class MessageWidget : public QDialog
{
Q_OBJECT
public:
MessageWidget(QWidget *parent = nullptr);
void setTitle(QString title);
void setMessage(QString message);
void setProgressValue(int value);
private:
QProgressBar *progressBar;
QLabel *messageLabel;
};
#endif //MESSAGE_WIDGET_H
| 1,262
|
C++
|
.h
| 36
| 33
| 80
| 0.725185
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,363
|
amp_nonlinear_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/amp_nonlinear_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef AMPNONLINEAREDITWIDGET_H
#define AMPNONLINEAREDITWIDGET_H
#include <QtWidgets/QWidget>
#include <QPushButton>
#include "block_edit_widget.h"
#include "nonlinear_widget.h"
#include "processor.h"
#include "slide_box_widget.h"
class AmpNonlinearEditWidget : public BlockEditWidget
{
Q_OBJECT
public:
AmpNonlinearEditWidget(QWidget *parent = nullptr, Processor *prc = nullptr);
virtual void recalculate();
virtual void resetControls();
virtual void updateControls();
private:
Processor *processor;
NonlinearWidget *nonlinear;
SlideBoxWidget *biasSlide;
SlideBoxWidget *uporSlide;
SlideBoxWidget *kregSlide;
SlideBoxWidget *levelSlide;
SlideBoxWidget *sagCoeffSlide;
SlideBoxWidget *sagTimeSlide;
SlideBoxWidget *masterOutputLevelSlide;
public slots:
void biasChanged(float value);
void uporChanged(float value);
void kregChanged(float value);
void levelChanged(float value);
void sagCoeffChanged(float value);
void sagTimeChanged(float value);
void masterOutputLevelChanged(float value);
};
#endif // AMPNONLINEAREDITWIDGET_H
| 1,930
|
C++
|
.h
| 54
| 33.444444
| 80
| 0.759786
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,364
|
tubeamp_panel.h
|
olegkapitonov_tubeAmp-Designer/src/tubeamp_panel.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef TUBEAMPPANEL_H
#define TUBEAMPPANEL_H
#include <QFrame>
#include <QScrollArea>
#include "tadial.h"
#include "tameter.h"
#include "processor.h"
#include "player.h"
class TubeAmpPanel : public QFrame
{
Q_OBJECT
public:
TubeAmpPanel(QWidget *parent = nullptr, Processor *prc = nullptr,
Player *plr = nullptr);
void resetControls();
private:
TADial *driveDial;
TADial *bassDial;
TADial *middleDial;
TADial *trebleDial;
TADial *volumeDial;
TADial *levelDial;
TAMeter *inputMeter;
TAMeter *outputMeter;
Processor *processor;
Player *player;
QWidget *scrollWidget;
QScrollArea *scrollArea;
void resizeEvent(QResizeEvent *);
public slots:
void bassDialValueChanged(int newValue);
void middleDialValueChanged(int newValue);
void trebleDialValueChanged(int newValue);
void driveDialValueChanged(int newValue);
void volumeDialValueChanged(int newValue);
void levelDialValueChanged(int newValue);
void peakRMSValueChanged(float inputValue, float outputValue);
signals:
void dialValueChanged();
};
#endif // TUBEAMPPANEL_H
| 1,934
|
C++
|
.h
| 59
| 30.322034
| 80
| 0.74704
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,365
|
equalizer_widget.h
|
olegkapitonov_tubeAmp-Designer/src/equalizer_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef EQUALIZERWIDGET_H
#define EQUALIZERWIDGET_H
#include <QtWidgets/QWidget>
#include <QPixmap>
#include <QPolygon>
#include <QString>
#include <QFont>
#include <QVector>
#include "freq_response_widget.h"
class EqualizerWidget : public QWidget
{
Q_OBJECT
public:
EqualizerWidget(QWidget *parent = nullptr);
QVector<double> fLogValuesEq;
QVector<double> dbValuesEq;
QVector<double> fLogValuesFr;
QVector<double> dbValuesFr;
int maxDb;
void drawBackground();
void resetEq();
protected:
QPoint mousePressPoint;
double activeFLog;
double activeDb;
int activePoint;
QRect backr;
QFont infoFont;
QString text;
QPixmap *backbuffer;
QPoint infop;
QRect backinfor;
int stepWidth;
int stepHeight;
int dbInStep;
QRect margin;
bool isResponseChanged = false;
void paintEvent(QPaintEvent *);
void mouseMoveEvent(QMouseEvent *);
void mousePressEvent(QMouseEvent *);
void mouseDoubleClickEvent(QMouseEvent *);
void mouseReleaseEvent(QMouseEvent *);
void resizeEvent(QResizeEvent *);
public slots:
signals:
void responseChanged();
};
#endif // EQUALIZERWIDGET_H
| 1,979
|
C++
|
.h
| 66
| 27.484848
| 80
| 0.744849
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,366
|
tameter.h
|
olegkapitonov_tubeAmp-Designer/src/tameter.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef TAMETER_H
#define TAMETER_H
#include <QWidget>
#include <QPoint>
#include <QPaintEvent>
#include <QMouseEvent>
class TAMeter : public QWidget
{
Q_OBJECT
public:
TAMeter(QWidget *parent);
void setValue(float v);
private:
float value;
void paintEvent(QPaintEvent *);
};
#endif //TAMETER_H
| 1,167
|
C++
|
.h
| 35
| 31.142857
| 80
| 0.7063
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,367
|
deconvolver_dialog.h
|
olegkapitonov_tubeAmp-Designer/src/deconvolver_dialog.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef DECONVOLVERDIALOG_H
#define DECONVOLVERDIALOG_H
#include <QDialog>
#include <QPushButton>
#include <QLineEdit>
#include <QRadioButton>
#include <QButtonGroup>
#include "processor.h"
class DeconvolverDialog : public QDialog
{
Q_OBJECT
public:
DeconvolverDialog(Processor *prc, QWidget *parent = nullptr);
private:
Processor *processor;
QPushButton *processButton;
QLineEdit *testFilenameEdit;
QLineEdit *responseFilenameEdit;
QLineEdit *IRFilenameEdit;
QPushButton *testFilenameButton;
QPushButton *responseFilenameButton;
QPushButton *IRFilenameButton;
QRadioButton *IRCabinetRadioButton;
QRadioButton *IRFileRadioButton;
void checkSignals();
public slots:
void testFilenameButtonClicked();
void responseFilenameButtonClicked();
void IRFilenameButtonClicked();
void closeButtonClicked();
void processButtonClicked();
void saveTestSignalButtonClicked();
void IRGroupClicked(QAbstractButton *button);
};
#endif // DECONVOLVERDIALOG_H
| 1,840
|
C++
|
.h
| 53
| 32.396226
| 80
| 0.762683
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,368
|
player.h
|
olegkapitonov_tubeAmp-Designer/src/player.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PLAYER_H
#define PLAYER_H
#include <QVector>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <jack/jack.h>
#include <jack/types.h>
#include <jack/session.h>
#include "processor.h"
class Player;
class EqualDataRMSThread : public QThread
{
Q_OBJECT
void run() override;
public:
Player *player;
Processor *processor;
};
class Player : public QObject
{
Q_OBJECT
public:
Player();
~Player();
jack_port_t *input_port;
jack_port_t *output_port_left;
jack_port_t *output_port_right;
jack_client_t *client;
int simple_quit;
void setProcessor(Processor *prc);
int connectToJack();
int activate();
void setLevel(float lev);
float getLevel();
void setDiData(QVector<float> data);
void setRefData(QVector<float> dataL, QVector<float> dataR);
void equalDataRMS();
QVector<float> diData;
QVector<float> refDataL;
QVector<float> refDataR;
unsigned int diPos;
unsigned int refPos;
enum PlayerStatus{
PS_STOP,
PS_PAUSE,
PS_PLAY_DI,
PS_PLAY_REF,
PS_MONITOR
};
PlayerStatus status;
Processor *processor;
void setStatus(PlayerStatus newStatus);
int getSampleRate();
void incRMScounter();
void setInputLevel(float dbInputLevel);
float inputLevel = 1.0;
bool isEqualDataRMSThreadRunning = false;
private:
int sampleRate;
EqualDataRMSThread *equalDataRMSThread;
float level = 1.0;
private slots:
void equalDataRMSThreadFinished();
signals:
void dataChanged();
void peakRMSValueCalculated(float inputValue, float outputValue);
void equalRMSFinished();
};
#endif //PLAYER_H
| 2,519
|
C++
|
.h
| 91
| 25.010989
| 80
| 0.731748
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,369
|
slide_box_widget.h
|
olegkapitonov_tubeAmp-Designer/src/slide_box_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef SLIDEBOXWIDGET_H
#define SLIDEBOXWIDGET_H
#include <QtWidgets/QWidget>
#include <QSlider>
#include <QLabel>
class SlideBoxWidget : public QWidget
{
Q_OBJECT
public:
SlideBoxWidget(const QString &text, float min, float max, QWidget *parent = nullptr);
float minValue;
float maxValue;
void setValue(float value);
private:
QLabel *valueLabel;
QSlider *slider;
protected slots:
void sliderValueChanged(int value);
signals:
void valueChanged(float value);
};
#endif //SLIDEBOXWIDGET_H
| 1,364
|
C++
|
.h
| 40
| 31.95
| 87
| 0.729627
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,370
|
freq_response_widget.h
|
olegkapitonov_tubeAmp-Designer/src/freq_response_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef FREQRESPONSEWIDGET_H
#define FREQRESPONSEWIDGET_H
#include <QtWidgets/QWidget>
#include <QPixmap>
#include <QPolygon>
#include <QString>
#include <QFont>
class FreqResponseWidget : public QWidget
{
Q_OBJECT
public:
FreqResponseWidget(QWidget *parent = nullptr);
QVector<double> fLogValues;
QVector<double> dbValues;
int maxDb;
void drawBackground();
protected:
QRect backr;
QFont infoFont;
QString text;
QPixmap *backbuffer;
QPoint infop;
QRect backinfor;
int stepWidth;
int stepHeight;
int dbInStep;
QRect margin;
void paintEvent(QPaintEvent *);
void resizeEvent(QResizeEvent *);
void mouseMoveEvent(QMouseEvent *);
public slots:
};
#endif // FREQRESPONSEWIDGET_H
| 1,575
|
C++
|
.h
| 51
| 28.470588
| 80
| 0.734572
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,371
|
player_panel.h
|
olegkapitonov_tubeAmp-Designer/src/player_panel.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PLAYERPANEL_H
#define PLAYERPANEL_H
#include <QFrame>
#include <QPushButton>
#include <QMessageBox>
#include <QSlider>
#include "load_dialog.h"
#include "player.h"
#include "file_resampling_thread.h"
#include "processor.h"
#include "message_widget.h"
class PlayerPanel : public QFrame
{
Q_OBJECT
public:
PlayerPanel(QWidget *parent = nullptr, Player *plr = nullptr, Processor *prc = nullptr);
int getInputLevelSliderValue();
private:
bool diFileResamplingThreadWorking;
bool refFileResamplingThreadWorking;
QPushButton *buttonStop;
QPushButton *buttonPlay;
QPushButton *buttonMonitor;
QPushButton *buttonLoad;
QPushButton *diButton;
QPushButton *equalRMSButton;
QSlider *inputLevelSlider;
LoadDialog *loadDialog;
bool playReferenceTrack;
bool playPaused;
Player *player;
Processor *processor;
MessageWidget *msg;
FileResamplingThread *diFileResamplingThread;
FileResamplingThread *refFileResamplingThread;
void resamplingFinished();
public slots:
void diButtonClicked();
void loadButtonClicked();
void loadDialogFinished(int result);
void buttonPlayClicked();
void buttonStopClicked();
void buttonMonitorClicked();
void equalRMSButtonClicked();
void diFileResamplingThreadFinished();
void refFileResamplingThreadFinished();
void playerDataChanged();
void sliderValueChanged(int value);
void playerEqualRMSFinished();
void stopPlayback();
};
#endif // PLAYERPANEL_H
| 2,307
|
C++
|
.h
| 70
| 30.471429
| 90
| 0.763751
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,372
|
file_resampling_thread.h
|
olegkapitonov_tubeAmp-Designer/src/file_resampling_thread.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef FILE_RESAMPLING_THREAD_H
#define FILE_RESAMPLING_THREAD_H
#include <QThread>
#include <sndfile.h>
#include <cmath>
#include "player.h"
class FileResamplingThread : public QThread
{
Q_OBJECT
void run() override;
public:
FileResamplingThread();
QString filename;
int samplingRate;
bool stereoMode;
QVector<float> dataL;
QVector<float> dataR;
private:
void loadFile(const char *filename);
};
#endif //FILE_RESAMPLING_THREAD_H
| 1,308
|
C++
|
.h
| 39
| 31.333333
| 80
| 0.72315
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,373
|
tonestack_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/tonestack_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef TONESTACKEDITWIDGET_H
#define TONESTACKEDITWIDGET_H
#include <QtWidgets/QWidget>
#include <QPushButton>
#include "block_edit_widget.h"
#include "freq_response_widget.h"
#include "processor.h"
#include "slide_box_widget.h"
class TonestackEditWidget : public BlockEditWidget
{
Q_OBJECT
public:
TonestackEditWidget(QWidget *parent = nullptr, Processor *prc = nullptr);
virtual void recalculate();
virtual void resetControls();
private:
Processor *processor;
FreqResponseWidget * freqResponse;
SlideBoxWidget *bassFreqSlide;
SlideBoxWidget *bassBandSlide;
SlideBoxWidget *middleFreqSlide;
SlideBoxWidget *middleBandSlide;
SlideBoxWidget *trebleFreqSlide;
SlideBoxWidget *trebleBandSlide;
public slots:
void bassFreqChanged(float value);
void bassBandChanged(float value);
void middleFreqChanged(float value);
void middleBandChanged(float value);
void trebleFreqChanged(float value);
void trebleBandChanged(float value);
};
#endif // TONESTACKEDITWIDGET_H
| 1,855
|
C++
|
.h
| 51
| 34.078431
| 80
| 0.759352
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,374
|
processor.h
|
olegkapitonov_tubeAmp-Designer/src/processor.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PROCESSOR_H
#define PROCESSOR_H
#include <stdint.h>
#include <QVector>
#include <QString>
#include <QThread>
#include "profile.h"
#include <zita-convolver.h>
// Defines for compatability with
// FAUST generated code
#define VOLUME_CTRL controls.volume
#define DRIVE_CTRL controls.drive
#define MASTERGAIN_CTRL controls.mastergain
#define AMP_BIAS_CTRL profile->amp_bias
#define AMP_KREG_CTRL profile->amp_Kreg
#define AMP_UPOR_CTRL profile->amp_Upor
#define PREAMP_BIAS_CTRL profile->preamp_bias
#define PREAMP_KREG_CTRL profile->preamp_Kreg
#define PREAMP_UPOR_CTRL profile->preamp_Upor
#define LOW_CTRL controls.low
#define MIDDLE_CTRL controls.middle
#define HIGH_CTRL controls.high
#define LOW_FREQ_CTRL profile->tonestack_low_freq
#define MIDDLE_FREQ_CTRL profile->tonestack_middle_freq
#define HIGH_FREQ_CTRL profile->tonestack_high_freq
#define LOW_BAND_CTRL profile->tonestack_low_band
#define MIDDLE_BAND_CTRL profile->tonestack_middle_band
#define HIGH_BAND_CTRL profile->tonestack_high_band
#define PREAMP_LEVEL profile->preamp_level
#define AMP_LEVEL profile->amp_level
#define SAG_TIME profile->sag_time
#define SAG_COEFF profile->sag_coeff
#define OUTPUT_LEVEL profile->output_level
// Zita-convolver parameters
#define CONVPROC_SCHEDULER_PRIORITY 0
#define CONVPROC_SCHEDULER_CLASS SCHED_FIFO
#define THREAD_SYNC_MODE true
#define fragm 64
struct stControls
{
float volume;
float drive;
float low;
float middle;
float high;
float mastergain;
};
using namespace std;
#include "faust-support.h"
class mydsp;
class ConvolverDeleteThread : public QThread
{
Q_OBJECT
void run() override;
public:
Convproc *convolver;
};
class Processor
{
public:
Processor(int SR);
~Processor();
bool loadProfile(QString filename);
bool saveProfile(QString filename);
QVector<float> getPreampFrequencyResponse(QVector<float> freqs);
QVector<float> getCabinetSumFrequencyResponse(QVector<float> freqs);
QVector<double> preampCorrectionEqualizerFLogValues;
QVector<double> preampCorrectionEqualizerDbValues;
QVector<double> correctionEqualizerFLogValues;
QVector<double> correctionEqualizerDbValues;
stControls getControls();
void setControls(stControls newControls);
st_profile getProfile();
void setProfile(st_profile newProfile);
float tube(float Uin, float Kreg, float Upor, float bias, float cut);
void setPreampCorrectionImpulseFromFrequencyResponse(QVector<double> w, QVector<double> A);
void setCabinetSumCorrectionImpulseFromFrequencyResponse(QVector<double> w, QVector<double> A);
void applyPreampCorrection();
void applyCabinetSumCorrection();
void resetPreampCorrection();
void resetCabinetSumCorrection();
int getSamplingRate();
void process(float *outL, float *outR, float *in, int nSamples);
QString getProfileFileName();
void setProfileFileName(QString name);
bool isPreampCorrectionEnabled();
bool isCabinetCorrectionEnabled();
void cleanProfile();
void setPreampImpulse(QVector<float> data);
void setCabinetImpulse(QVector<float> dataL, QVector<float> dataR);
void setPreampCorrectionStatus(bool status);
void setCabinetCorrectionStatus(bool status);
QVector<float> getPreampImpulse();
QVector<float> getLeftImpulse();
QVector<float> getRightImpulse();
private:
Convproc *preamp_convproc;
Convproc *preamp_correction_convproc;
Convproc *convproc;
Convproc *correction_convproc;
Convproc *new_preamp_convproc;
Convproc *new_preamp_correction_convproc;
Convproc *new_convproc;
Convproc *new_correction_convproc;
QVector<float> preamp_impulse;
QVector<float> left_impulse;
QVector<float> right_impulse;
QVector<float> preamp_correction_impulse;
QVector<float> left_correction_impulse;
QVector<float> right_correction_impulse;
bool preampCorrectionEnabled;
bool cabinetCorrectionEnabled;
ConvolverDeleteThread *convolverDeleteThread;
QString currentProfileFile;
int samplingRate;
mydsp *dsp;
QString profileFileName;
void freeConvolver(Convproc *convolver);
int checkProfileFile(const char *path);
QVector<float> getFrequencyResponse(QVector<float> freqs, QVector<float> impulse);
Convproc* createMonoConvolver(QVector<float> impulse);
Convproc* createStereoConvolver(QVector<float> left_impulse, QVector<float> right_impulse);
};
#endif //PROCESSOR_H
| 5,201
|
C++
|
.h
| 142
| 34.176056
| 97
| 0.792415
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,375
|
mainwindow.h
|
olegkapitonov_tubeAmp-Designer/src/mainwindow.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QtCore/QVariant>
#include <QtWidgets/QAction>
#include <QtWidgets/QApplication>
#include <QtWidgets/QMainWindow>
#include <QtWidgets/QMenu>
#include <QtWidgets/QMenuBar>
#include <QtWidgets/QStatusBar>
#include <QtWidgets/QToolBar>
#include <QtWidgets/QWidget>
#include "processor.h"
#include "player.h"
#include "centralwidget.h"
#include "profiler_dialog.h"
#include "convolver_dialog.h"
#include "deconvolver_dialog.h"
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent, Processor *prc, Player *plr);
CentralWidget *centralWidget;
private:
QAction *actionOpen;
QAction *actionSave;
QAction *actionSave_As;
QAction *actionQuit;
QAction *actionAbout;
QAction *actionProfiler;
QAction *actionConvolver;
QAction *actionDeconvolver;
QMenuBar *menuBar;
QMenu *menuFile;
QMenu *menuTools;
QMenu *menuHelp;
QStatusBar *statusBar;
ProfilerDialog *profilerDialog;
ConvolverDialog *convolverDialog;
DeconvolverDialog *deconvolverDialog;
Processor *processor;
Player *player;
private slots:
void actionOpenTriggered();
void actionSaveTriggered();
void actionSaveAsTriggered();
void actionProfilerTriggered();
void actionConvolverTriggered();
void actionDeconvolverTriggered();
void actionQuitTriggered();
void actionAboutTriggered();
void profilerDialogAccepted();
};
#endif // MAINWINDOW_H
| 2,287
|
C++
|
.h
| 72
| 29.513889
| 80
| 0.76158
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,376
|
profiler_dialog.h
|
olegkapitonov_tubeAmp-Designer/src/profiler_dialog.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PROFILER_DIALOG_H
#define PROFILER_DIALOG_H
#include <QDialog>
#include <QComboBox>
#include <QPushButton>
#include <QLineEdit>
#include <QGroupBox>
#include <QRadioButton>
#include <QMessageBox>
#include "processor.h"
#include "player.h"
#include "profiler.h"
#include "message_widget.h"
#include "player_panel.h"
class ProfilerDialog : public QDialog
{
Q_OBJECT
public:
ProfilerDialog(Processor *prc, Player *plr, PlayerPanel *pnl, QWidget *parent = nullptr);
private:
Processor *processor;
Player *player;
PlayerPanel *playerPanel;
QComboBox *testSignalComboBox;
QPushButton *createTestSignalWavButton;
QLineEdit *responseFileEdit;
QPushButton *responseFileOpenButton;
QGroupBox *presetGroupBox;
QRadioButton *classicPresetRadioButton;
QRadioButton *mastergainPresetRadioButton;
QRadioButton *crystalcleanPresetRadioButton;
QPushButton *analyzeButton;
QPushButton *cancelButton;
QString responseFileName;
ProfilerThread *profilerThread;
Profiler *profiler;
MessageWidget *msg;
private slots:
void analyzeButtonClick();
void responseFileOpenButtonClick();
void cancelButtonClick();
void createTestSignalWavButtonClick();
void profilerThreadFinished();
void profilerProgressChanged(int progress);
};
#endif //PROFILER_DIALOG_H
| 2,147
|
C++
|
.h
| 64
| 31.28125
| 91
| 0.767037
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,377
|
profiler.h
|
olegkapitonov_tubeAmp-Designer/src/profiler.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PROFILER_H
#define PROFILER_H
#include <QString>
#include <QVector>
#include <QThread>
#include "processor.h"
#include "player.h"
enum ProfilerPresetType {CRYSTALCLEAN_PRESET, CLASSIC_PRESET, MASTERGAIN_PRESET};
struct stCrunchPoint
{
double max;
double rmsAtMax;
int maxtime;
bool isBad;
};
struct sPike
{
double value;
int time;
};
class Profiler : public QObject
{
Q_OBJECT
public:
Profiler(Processor *prc, Player *plr);
void loadResponseFile(QString fileName);
void createTestFile(QString fileName, int version);
void analyze(ProfilerPresetType preset);
private:
Processor *processor;
Player *player;
int responseDataSamplerate;
int responseDataChannels;
QVector<float> responseData;
stCrunchPoint findCrunchPoint(int freqIndex, QVector<float> data, int samplerate);
QVector<float> loadRealTestFile(QVector<float> testSignal, float sampleRate);
void createTestFile_v1(QString fileName);
private slots:
void warningMessageNeededSlot(QString message);
signals:
void warningMessageNeeded(QString message);
void progressChanged(int progress);
void stopPlaybackNeeded();
};
class ProfilerThread : public QThread
{
Q_OBJECT
void run() override;
public:
Profiler *profiler;
ProfilerPresetType presetType;
};
#endif //PROFILER_H
| 2,152
|
C++
|
.h
| 71
| 28.056338
| 84
| 0.758119
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,378
|
preamp_nonlinear_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/preamp_nonlinear_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef PREAMPNONLINEAREDITWIDGET_H
#define PREAMPNONLINEAREDITWIDGET_H
#include <QtWidgets/QWidget>
#include <QPushButton>
#include "block_edit_widget.h"
#include "nonlinear_widget.h"
#include "processor.h"
#include "slide_box_widget.h"
class PreampNonlinearEditWidget : public BlockEditWidget
{
Q_OBJECT
public:
PreampNonlinearEditWidget(QWidget *parent = nullptr, Processor *prc = nullptr);
virtual void recalculate();
virtual void resetControls();
virtual void updateControls();
private:
Processor *processor;
NonlinearWidget *nonlinear;
SlideBoxWidget *biasSlide;
SlideBoxWidget *uporSlide;
SlideBoxWidget *kregSlide;
SlideBoxWidget *levelSlide;
public slots:
void biasChanged(float value);
void uporChanged(float value);
void kregChanged(float value);
void levelChanged(float value);
};
#endif // PREAMPNONLINEAREDITWIDGET_H
| 1,719
|
C++
|
.h
| 48
| 33.604167
| 81
| 0.751807
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,379
|
math_functions.h
|
olegkapitonov_tubeAmp-Designer/src/math_functions.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef MATHFUNCTIONS_H
#define MATHFUNCTIONS_H
#include <QVector>
struct s_fftw_complex
{
double real;
double imagine;
};
void frequency_response_to_impulse_response(double w_in[],
double A_in[],
int n_count,
float IR[],
int IR_n_count,
int rate);
void fft_convolver(float signal[], int signal_n_count, float impulse_response[], int ir_n_count);
void fft_deconvolver(float signal_a[],
int signal_a_n_count,
float signal_c[],
int signal_c_n_count,
float impulse_response[],
int ir_n_count,
float lowcut_relative_frequency,
float highcut_relative_frequency,
float noisegate_threshold_db
);
enum FFT_AVERAGE_TYPE {FFT_AVERAGE_MEAN, FFT_AVERAGE_MAX};
void fft_average(double *buffer,
double *avrage_spectrum,
int n_spectrum,
int n_samples,
FFT_AVERAGE_TYPE type);
void calulate_autoeq_amplitude_response(int n_spectrum,
int sample_rate,
double *current_signal,
int n_current_samples,
double *ref_signal,
int n_ref_samples,
double *f_log_values,
double *db_values,
int n_autoeq_points
);
void generate_logarithmic_sweep(double length_sec,
int sample_rate,
double f_start,
double f_end,
double sweep_amplitude,
float data[]);
QVector<float> resample_vector(QVector<float> sourceBuffer,
float sourceSamplerate,
float targetSamplerate);
#endif //MATHFUNCTIONS_H
| 3,174
|
C++
|
.h
| 69
| 29.217391
| 97
| 0.498061
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,380
|
convolver_dialog.h
|
olegkapitonov_tubeAmp-Designer/src/convolver_dialog.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef CONVOLVERDIALOG_H
#define CONVOLVERDIALOG_H
#include <QDialog>
#include <QPushButton>
#include <QLineEdit>
#include <QRadioButton>
#include <QButtonGroup>
#include "processor.h"
class ConvolverDialog : public QDialog
{
Q_OBJECT
public:
ConvolverDialog(Processor *prc, QWidget *parent = nullptr);
private:
Processor *processor;
QPushButton *processButton;
QLineEdit *inputFilenameEdit;
QLineEdit *outputFilenameEdit;
QLineEdit *IRFilenameEdit;
QPushButton *outputFilenameButton;
QPushButton *inputFilenameButton;
QRadioButton *inputCabinetRadioButton;
QRadioButton *inputFileRadioButton;
QRadioButton *outputCabinetRadioButton;
QRadioButton *outputFileRadioButton;
void checkSignals();
public slots:
void inputFilenameButtonClicked();
void outputFilenameButtonClicked();
void IRFilenameButtonClicked();
void closeButtonClicked();
void processButtonClicked();
void outputGroupClicked(QAbstractButton *button);
void inputGroupClicked(QAbstractButton *button);
};
#endif // CONVOLVERDIALOG_H
| 1,899
|
C++
|
.h
| 54
| 32.814815
| 80
| 0.766248
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,381
|
tadial.h
|
olegkapitonov_tubeAmp-Designer/src/tadial.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef TADIAL_H
#define TADIAL_H
#include <QWidget>
#include <QPoint>
#include <QPaintEvent>
#include <QMouseEvent>
class TADial : public QWidget
{
Q_OBJECT
public:
TADial(QWidget *parent);
void setValue(int v);
private:
QPoint startScroll;
int value;
int startValue;
int dialSize;
int dialTop;
int dialLeft;
int dialMaxAngle;
void paintEvent(QPaintEvent *);
void mouseMoveEvent(QMouseEvent *event);
void mousePressEvent(QMouseEvent *event);
void resizeEvent(QResizeEvent *);
signals:
void valueChanged(int newValue);
};
#endif //TADIAL_H
| 1,456
|
C++
|
.h
| 46
| 28.782609
| 80
| 0.709493
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,382
|
block_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/block_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef BLOCKEDITWIDGET_H
#define BLOCKEDITWIDGET_H
#include <QFrame>
class BlockEditWidget : public QFrame
{
Q_OBJECT
public:
BlockEditWidget(QWidget *parent = nullptr);
virtual void recalculate() {};
virtual void resetControls() {};
virtual void updateControls() {};
private:
public slots:
};
#endif // BLOCKEDITWIDGET_H
| 1,189
|
C++
|
.h
| 33
| 33.939394
| 80
| 0.71578
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,383
|
cabinet_edit_widget.h
|
olegkapitonov_tubeAmp-Designer/src/cabinet_edit_widget.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef CABINETEDITWIDGET_H
#define CABINETEDITWIDGET_H
#include <QtWidgets/QWidget>
#include <QPushButton>
#include <QMessageBox>
#include "block_edit_widget.h"
#include "processor.h"
#include "player.h"
#include "equalizer_widget.h"
#include "freq_response_widget.h"
#include "file_resampling_thread.h"
#include "message_widget.h"
class AutoEqThread : public QThread
{
Q_OBJECT
void run() override;
public:
Player *player;
Processor *processor;
EqualizerWidget *equalizer;
signals:
void progressChanged(int progress);
};
class CabinetEditWidget : public BlockEditWidget
{
Q_OBJECT
public:
CabinetEditWidget(QWidget *parent, Processor *prc, Player *plr);
private:
enum DisableStatus {STAT_DISABLED, STAT_ENABLED};
QPushButton *loadButton;
QPushButton *saveButton;
QPushButton *resetButton;
QPushButton *applyButton;
QPushButton *autoEqButton;
QPushButton *disableButton;
Processor *processor;
Player *player;
EqualizerWidget *equalizer;
AutoEqThread *autoEqThread;
MessageWidget *msg;
DisableStatus disableStatus;
FileResamplingThread *fileResamplingThread;
virtual void recalculate();
virtual void resetControls();
public slots:
void responseChanged();
void applyButtonClicked();
void resetButtonClicked();
void autoEqButtonClicked();
void saveButtonClicked();
void loadButtonClicked();
void disableButtonClicked();
void autoEqThreadFinished();
void fileResamplingThreadFinished();
void autoEqThreadProgressChanged(int progress);
};
#endif // CABINETEDITWIDGET_H
| 2,401
|
C++
|
.h
| 76
| 29.197368
| 80
| 0.763455
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,384
|
faust-support.h
|
olegkapitonov_tubeAmp-Designer/src/faust-support.h
|
/*
* Copyright (C) 2018-2020 Oleg Kapitonov
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* --------------------------------------------------------------------------
*/
#ifndef FAUST_SUPPORT_H
#define FAUST_SUPPORT_H
#include <map>
// Needed for compatability with FAUST generated code
struct Meta : std::map<const char*, const char*>
{
void declare(const char *key, const char *value)
{
(*this)[key] = value;
}
const char* get(const char *key, const char *def)
{
if (this->find(key) != this->end())
return (*this)[key];
else
return def;
}
};
class UI {
public:
UI(){};
void openVerticalBox(const char *) {};
void closeBox() {};
};
class dsp {
public:
stControls controls;
st_profile *profile;
dsp() {}
virtual ~dsp() {}
/* Return instance number of audio inputs */
virtual int getNumInputs() = 0;
/* Return instance number of audio outputs */
virtual int getNumOutputs() = 0;
/**
* Trigger the ui_interface parameter with instance specific calls
* to 'addBtton', 'addVerticalSlider'... in order to build the UI.
*
* @param ui_interface - the user interface builder
*/
virtual void buildUserInterface(UI* ui_interface) = 0;
/* Returns the sample rate currently used by the instance */
virtual int getSampleRate() = 0;
/**
* Global init, calls the following methods:
* - static class 'classInit': static tables initialization
* - 'instanceInit': constants and instance state initialization
*
* @param samplingRate - the sampling rate in Hertz
*/
virtual void init(int samplingRate) = 0;
/**
* Init instance state
*
* @param samplingRate - the sampling rate in Hertz
*/
virtual void instanceInit(int samplingRate) = 0;
/**
* Init instance constant state
*
* @param samplingRate - the sampling rate in Hertz
*/
virtual void instanceConstants(int samplingRate) = 0;
/* Init default control parameters values */
virtual void instanceResetUserInterface() = 0;
/* Init instance state (delay lines...) */
virtual void instanceClear() = 0;
/**
* Return a clone of the instance.
*
* @return a copy of the instance on success, otherwise a null pointer.
*/
virtual dsp* clone() = 0;
/**
* Trigger the Meta* parameter with instance specific calls to 'declare' (key, value) metadata.
*
* @param m - the Meta* meta user
*/
virtual void metadata(Meta* m) = 0;
/**
* DSP instance computation, to be called with successive in/out audio buffers.
*
* @param count - the number of frames to compute
* @param inputs - the input audio buffers as an array of non-interleaved FAUSTFLOAT samples (eiher float, double or quad)
* @param outputs - the output audio buffers as an array of non-interleaved FAUSTFLOAT samples (eiher float, double or quad)
*
*/
virtual void compute(int count, float** inputs, float** outputs) = 0;
/**
* DSP instance computation: alternative method to be used by subclasses.
*
* @param date_usec - the timestamp in microsec given by audio driver.
* @param count - the number of frames to compute
* @param inputs - the input audio buffers as an array of non-interleaved FAUSTFLOAT samples (eiher float, double or quad)
* @param outputs - the output audio buffers as an array of non-interleaved FAUSTFLOAT samples (eiher float, double or quad)
*
*/
virtual void compute(double, int count, float** inputs, float** outputs) { compute(count, inputs, outputs); }
};
#endif
| 4,641
|
C++
|
.h
| 118
| 32.29661
| 132
| 0.62739
|
olegkapitonov/tubeAmp-Designer
| 33
| 8
| 6
|
GPL-3.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,385
|
person_re_id.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/person_re_id.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "person_re_id.h"
#include <chrono>
#include <fstream>
#include <utility>
#include <vector>
#include <opencv2/core/mat.hpp>
#include <inference_engine.hpp>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/utils.h>
#include <nx/sdk/helpers/uuid_helper.h>
#include <nx/sdk/uuid.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/helpers/log_utils.h>
#include "config.h"
#include "exceptions.h"
#include "openvino_object_detection_analytics_plugin_ini.h"
#include "network.h"
#include "utils.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::chrono;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
PersonReId::PersonReId(
std::filesystem::path modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config> config):
Network(modelDir, logUtils, config)
{
init(config);
}
cv::Mat PersonReId::run(const cv::Mat& person)
{
using namespace std::chrono;
using namespace InferenceEngine;
const auto startTime = high_resolution_clock::now();
InferRequest inferenceRequest = m_network.CreateInferRequest();
auto frameBlob = inferenceRequest.GetBlob(m_inputName);
matU8ToBlob<uint8_t>(person, frameBlob);
inferenceRequest.StartAsync();
cv::Mat result;
if (inferenceRequest.Wait(IInferRequest::WaitMode::RESULT_READY) == OK)
{
const InferenceEngine::Blob::Ptr attributesBlob = inferenceRequest.GetBlob(m_outputName);
const auto outputValues = attributesBlob->buffer().as<float*>();
cv::Mat1f(1, kChannelCount, outputValues).copyTo(result);
}
else
{
throw InferenceError("Person re-identification failed.");
}
const auto finishTime = high_resolution_clock::now();
const auto duration = duration_cast<milliseconds>(finishTime - startTime);
NX_OUTPUT << "Person re-identification duration: " << duration.count() << " ms.";
return result;
}
//-------------------------------------------------------------------------------------------------
// private
void PersonReId::prepareOutputBlobs(InferenceEngine::CNNNetwork network)
{
Network::prepareOutputBlobs(network);
const auto outputDims = m_output->getTensorDesc().getDims();
static const std::string kDescription = "as output dimension size";
const auto outputDimsSize = (int) outputDims.size();
static constexpr int kExpectedOutputDimsSize = 2;
if (!checkLayerParameter(kDescription, outputDimsSize, kExpectedOutputDimsSize))
throw ModelLoadingError("Failed to prepare output of network.");
const auto channelCount = (int) outputDims.at(1);
if (!checkLayerParameter("as channel count", channelCount, kChannelCount))
throw ModelLoadingError("Failed to prepare output of network.");
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 3,019
|
C++
|
.cpp
| 73
| 37.739726
| 99
| 0.720875
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,386
|
network.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/network.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "network.h"
#include <chrono>
#include <fstream>
#include <vector>
//#include <ext_list.hpp>
#include <inference_engine.hpp>
//#include <ie_extension.h>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/utils.h>
#include <nx/sdk/helpers/log_utils.h>
#include "config.h"
#include "exceptions.h"
#include "openvino_object_detection_analytics_plugin_ini.h"
#include "openvino_log_utils.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::string_literals;
using namespace nx::sdk;
Network::Network(
std::filesystem::path modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config>& /*config*/)
:
logUtils(logUtils),
m_modelDir(std::move(modelDir))
{
}
void Network::init(const std::shared_ptr<const Config>& config)
{
setConfig(config);
}
//std::string Network::getCpuExtensionsLibraryName()
//{
// using namespace std::string_literals;
//
// std::string name;
// std::string baseName;
// std::vector<cpu_feature_t> features = cpuFeatures();
// for (const cpu_feature_t& cpuFeature: features)
// {
// if (cpuFeature == CPU_FEATURE_SSE4_2)
// baseName = "cpu_extension_sse4";
// else if (cpuFeature == CPU_FEATURE_AVX2)
// baseName = "cpu_extension_avx2";
// else if (cpuFeature == CPU_FEATURE_AVX512F)
// baseName = "cpu_extension_avx512";
// #if defined(WIN32)
// name = baseName + ".dll"s;
// #else
// name = "lib"s + baseName + ".so"s;
// #endif
// const std::filesystem::path path = m_modelDir / name;
// if (std::filesystem::exists(path))
// return path.string();
// }
// throw CpuIsIncompatibleError("CPU is not supported.");
//}
void Network::setConfig(const std::shared_ptr<const Config>& config)
{
// using namespace InferenceEngine;
// #if defined(WIN32)
// SetDllDirectoryW(m_modelDir.wstring().c_str());
// #endif
// auto extensionPtr = make_so_pointer<IExtension>(getCpuExtensionsLibraryName());
// m_inferenceEngine.AddExtension(std::static_pointer_cast<IExtension>(extensionPtr), "CPU");
// #if defined(WIN32)
// SetDllDirectoryW(nullptr);
// #endif
m_config = config;
prepareNetwork();
loadNetworkIntoInferenceEngine();
createInferenceRequest();
}
//-------------------------------------------------------------------------------------------------
// private
/**
* Loads person and vehicle detection model.
*
* Ability to select other models will be added in future versions of OpenVINO object detection
* plugin.
*
*/
InferenceEngine::CNNNetwork Network::loadModel()
{
const auto modelXml = m_modelDir / modelBasePath().replace_extension("xml");
NX_OUTPUT << " Read network from: " << modelXml;
const auto modelBin = m_modelDir / modelBasePath().replace_extension("bin");
NX_OUTPUT << " Read weights from: " << modelBin;
InferenceEngine::Core ie;
InferenceEngine::CNNNetwork result = ie.ReadNetwork(modelXml.string(), modelBin.string());
NX_OUTPUT << " Batch size is forced to 1.";
result.setBatchSize(1);
return result;
}
bool Network::checkLayerParameter(
const std::string& parameterDescription,
int actualValue,
int expectedValue) const
{
if (actualValue != expectedValue)
{
NX_OUTPUT << "This plugin accepts " << purpose() << " networks that have "
<< std::to_string(expectedValue) << " " << parameterDescription
<< "(actual value: " << std::to_string(actualValue) << ").";
return false;
}
return true;
}
void Network::createInferenceRequest()
{
m_inferenceRequest = m_network.CreateInferRequest();
}
void Network::prepareInputBlobs(InferenceEngine::CNNNetwork network)
{
NX_OUTPUT << " Prepare input blobs.";
InferenceEngine::InputsDataMap inputInfo(network.getInputsInfo());
const auto inputSize = (int) inputInfo.size();
if (!checkLayerParameter("input(s)", inputSize, expectedInputSize()))
throw ModelLoadingError("Failed to prepare input of network.");
m_inputName = inputInfo.begin()->first;
m_input = inputInfo.begin()->second;
if (inputPrecision())
m_input->setPrecision(*inputPrecision());
if (inputLayout())
m_input->setLayout(*inputLayout());
}
void Network::prepareOutputBlobs(InferenceEngine::CNNNetwork network)
{
NX_OUTPUT << " Prepare output blobs.";
InferenceEngine::OutputsDataMap outputInfo(network.getOutputsInfo());
const auto outputSize = (int) outputInfo.size();
if (!checkLayerParameter("output(s)", outputSize, expectedOutputSize()))
throw ModelLoadingError("Failed to prepare output of network.");
m_outputName = outputInfo.begin()->first;
m_output = outputInfo.begin()->second;
if (outputPrecision())
m_output->setPrecision(*outputPrecision());
if (outputLayout())
m_output->setLayout(*outputLayout());
}
void Network::prepareNetwork()
{
NX_OUTPUT << "Loading " << purpose() << " model.";
m_cnnNetwork = loadModel();
prepareInputBlobs(m_cnnNetwork);
prepareOutputBlobs(m_cnnNetwork);
}
void Network::loadNetworkIntoInferenceEngine()
{
std::map<std::string, std::string> networkConfig;
const auto kKeyCpuThreadCount = InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM;
networkConfig[kKeyCpuThreadCount] = std::to_string(m_config->threadCount);
m_network = m_inferenceEngine.LoadNetwork(m_cnnNetwork, "CPU", networkConfig);
}
std::filesystem::path Network::modelBasePath() const
{
return std::filesystem::path(modelName());
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 5,881
|
C++
|
.cpp
| 160
| 33.675
| 99
| 0.679838
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,387
|
track.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/track.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "track.h"
#include "detection.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
Track::Track(const nx::sdk::Uuid& id, int maxDetectionCount):
m_id(id),
m_maxDetectionCount(maxDetectionCount)
{
}
nx::sdk::Uuid Track::id() const
{
return m_id;
}
std::shared_ptr<BestShot> Track::bestShot() const
{
return m_bestShot;
}
Status Track::status() const
{
return m_status;
}
void Track::addDetection(
int64_t timestampUs,
const DetectionPtr& detection,
bool isTrackStarted)
{
if (!detections.empty() && detections.size() == (size_t) m_maxDetectionCount)
detections.erase(detections.begin());
if (isTrackStarted)
{
if (m_status == Status::inactive)
m_status = Status::started;
else if (m_status == Status::started)
m_status = Status::active;
}
detections.push_back(detection);
}
void Track::activate()
{
m_status = Status::active;
}
void Track::finish()
{
m_status = Status::finished;
if (detections.empty())
return;
DetectionPtr& bestShotDetection = detections[0];
for (DetectionPtr& detection: detections)
if (detection->confidence > bestShotDetection->confidence)
bestShotDetection = detection;
m_bestShot = std::make_shared<BestShot>(BestShot({
bestShotDetection->boundingBox,
bestShotDetection->confidence,
bestShotDetection->timestampUs,
/*trackId*/ m_id,
}));
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 1,669
|
C++
|
.cpp
| 58
| 24.275862
| 94
| 0.685464
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,388
|
object_detection_processor.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/object_detection_processor.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_detection_processor.h"
#include <vector>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/helpers/log_utils.h>
#include "config.h"
#include "exceptions.h"
#include "roi_processor.h"
#include "track.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::string_literals;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
ObjectDetectionProcessor::ObjectDetectionProcessor(
const std::filesystem::path& modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config>& config)
:
m_config(config),
m_modelDir(modelDir),
logUtils(logUtils),
m_objectDetector(std::make_unique<ObjectDetector>(modelDir, logUtils, config)),
m_personTracker(std::make_unique<PersonTracker>(modelDir, logUtils, config)),
m_roiProcessor(std::make_unique<RoiProcessor>(logUtils, config))
{
}
ObjectDetectionProcessor::Result ObjectDetectionProcessor::run(
const Frame& frame,
bool needToDetectObjects)
{
if (m_terminated)
return {};
Result result;
reinitializeObjectTrackerOnFrameSizeChanges(frame);
DetectionList detections;
if (needToDetectObjects)
{
try
{
detections = m_objectDetector->run(frame);
}
catch (const ObjectDetectionError& e)
{
m_terminated = true;
throw FrameProcessingError("Object detection error: "s + e.what());
}
}
PersonTracker::Result trackerResult;
try
{
trackerResult = m_personTracker->run(frame, detections);
}
catch (const ObjectTrackingError& e)
{
m_terminated = true;
throw FrameProcessingError("Object tracking error: "s + e.what());
}
RoiProcessor::Result roiEvents;
try
{
roiEvents = m_roiProcessor->run(
trackerResult.tracks,
trackerResult.events,
!needToDetectObjects);
}
catch (const RoiError &e)
{
m_terminated = true;
throw FrameProcessingError("Regions of interests error: "s + e.what());
}
return {
trackerResult.bestShots,
trackerResult.detections,
trackerResult.events,
roiEvents,
};
}
void ObjectDetectionProcessor::setConfig(const std::shared_ptr<const Config> config)
{
m_config = config;
m_objectDetector->setConfig(config);
m_personTracker->setConfig(config);
m_roiProcessor->setConfig(config);
}
void ObjectDetectionProcessor::reinitializeObjectTrackerOnFrameSizeChanges(const Frame& frame)
{
const bool frameSizeUnset = m_previousFrameWidth == 0 && m_previousFrameHeight == 0;
if (frameSizeUnset)
{
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
return;
}
const bool frameSizeChanged = frame.width != m_previousFrameWidth ||
frame.height != m_previousFrameHeight;
if (frameSizeChanged)
{
m_personTracker = std::make_unique<PersonTracker>(m_modelDir, logUtils, m_config);
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
}
}
void ObjectDetectionProcessor::setFps(float fps)
{
Config newConfig(*m_config);
newConfig.fps = fps;
m_config = std::make_shared<const Config>(newConfig);
m_personTracker->setConfig(m_config, /*updateFpsOnly*/ true);
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 3,621
|
C++
|
.cpp
| 111
| 27.225225
| 94
| 0.7
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,389
|
roi_processor.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/roi_processor.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "roi_processor.h"
#include <memory>
#include <vector>
#include <unordered_set>
#include <boost/geometry/strategies/strategies.hpp>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/utils.h>
#include <nx/sdk/uuid.h>
#include <nx/sdk/helpers/log_utils.h>
#include <nx/sdk/helpers/uuid_helper.h>
#include "config.h"
#include "exceptions.h"
#include "openvino_object_detection_analytics_plugin_ini.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::chrono;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
RoiProcessor::RoiProcessor(
LogUtils logUtils,
const std::shared_ptr<const Config> config):
m_config(config),
logUtils(logUtils)
{
}
void RoiProcessor::setConfig(const std::shared_ptr<const Config> config)
{
m_config = config;
m_tracksContexts.clear();
}
RoiProcessor::Result RoiProcessor::run(
const TrackList& tracks,
const EventList& events,
bool analyzeOnlyForDisappearanceInAreaDetectedEvents)
{
try
{
using namespace std::chrono;
const auto startTime = high_resolution_clock::now();
const Result result = runImpl(
tracks,
events,
analyzeOnlyForDisappearanceInAreaDetectedEvents);
const auto finishTime = high_resolution_clock::now();
const auto duration = duration_cast<milliseconds>(finishTime - startTime);
NX_OUTPUT << "ROI processing duration: " << duration.count() << " ms.";
return result;
}
catch (const std::exception& e)
{
throw RoiError("ROI processing error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
void RoiProcessor::ensureTracksContexts(const TrackList& tracks)
{
std::unordered_set<nx::sdk::Uuid> currentTracksIds;
for (const auto& track: tracks)
currentTracksIds.insert(track->id());
for (auto it = m_tracksContexts.begin(); it != m_tracksContexts.end();)
{
if (currentTracksIds.find(it->first) == currentTracksIds.end())
it = m_tracksContexts.erase(it);
else
++it;
}
for (auto& trackId: currentTracksIds)
{
if (m_tracksContexts.find(trackId) == m_tracksContexts.end())
{
m_tracksContexts.insert(std::make_pair(trackId, std::make_unique<TrackContext>()));
for (auto& roiArea: m_config->areas)
{
m_tracksContexts[trackId]->areasContexts.insert(
std::make_pair(roiArea, std::make_unique<TrackContext::AreaContext>()));
}
}
}
}
EventList RoiProcessor::lineCrossedDetection(const TrackList& tracks)
{
EventList result;
for (const auto& track: tracks)
{
auto& trackContext = *m_tracksContexts.at(track->id());
auto& linesCrossed = trackContext.linesCrossed;
for (int detectionIndex = trackContext.detectionIndex;
detectionIndex < track->detections.size();
++detectionIndex)
{
if (detectionIndex == 0)
continue;
const Line trackLine = Line{
bg::return_centroid<Point>(
track->detections[detectionIndex - 1]->boundingBox),
bg::return_centroid<Point>(
track->detections[detectionIndex]->boundingBox),
};
for (const auto& roiLine: m_config->lines)
{
const bool trackDidNotCrossLine = linesCrossed.find(roiLine) == linesCrossed.end();
if (trackDidNotCrossLine)
{
const auto direction = intersectionDirection(roiLine->value, trackLine);
if (direction != Direction::none &&
(direction == roiLine->direction ||
roiLine->direction == Direction::absent))
{
linesCrossed.insert(roiLine);
result.push_back(std::make_shared<LineCrossed>(LineCrossed(
track->detections[detectionIndex]->timestampUs,
track->id(),
direction,
roiLine
)));
}
}
else
{
const Rect lastDetectionBoundingBox =
track->detections[detectionIndex]->boundingBox;
if (!bg::intersects(lastDetectionBoundingBox, roiLine->value))
linesCrossed.erase(roiLine);
}
}
}
}
return result;
}
bool RoiProcessor::objectIntersectedArea(const Line& centerMovement, const RoiAreaPtr& area)
{
const Line areaCenterLine = linePerpendicular(centerMovement, area->getCentroid());
const boost::optional<Point> intersection =
lineSegmentIntersectionPoint(areaCenterLine, centerMovement);
return intersection && bg::within(intersection.value(), area->value);
}
RoiProcessor::AreaState RoiProcessor::newInstantState(
RoiProcessor::AreaState state,
const Rect& boundingBox,
const RoiAreaPtr& area)
{
const float objectPartInsideArea = rectFractionInsidePolygon(boundingBox, area->value);
const bool insideArea = objectPartInsideArea >= (1 - area->detectionSensitivity);
if (state == AreaState::none)
{
if (insideArea)
return AreaState::appeared;
else
return AreaState::outside;
}
else if (state == AreaState::outside ||
state == AreaState::exited ||
state == AreaState::disappeared)
{
if (insideArea)
return AreaState::entered;
else
return AreaState::outside;
}
else if (state == AreaState::inside ||
state == AreaState::entered ||
state == AreaState::appeared)
{
if (insideArea)
return AreaState::inside;
else
return AreaState::exited;
}
return AreaState::none;
}
EventPtr RoiProcessor::generateAreaCrossedEvent(
TrackContext::AreaContext& areaContext,
const RoiAreaPtr& area,
const TrackPtr& track,
const int detectionIndex)
{
if (track->detections.empty())
return {};
auto lastDetectionCenter = bg::return_centroid<Point>(
track->detections[detectionIndex]->boundingBox);
if (!areaContext.detectionBoundingBoxCenterBeforeAreaEnter)
{
areaContext.detectionBoundingBoxCenterBeforeAreaEnter = lastDetectionCenter;
return {};
}
const Line centerMovement{
areaContext.detectionBoundingBoxCenterBeforeAreaEnter.value(),
lastDetectionCenter,
};
if (objectIntersectedArea(centerMovement, area) && area->crossingEnabled)
return std::make_shared<AreaCrossed>(AreaCrossed(
areaContext.newFilteredStateTimestampUs, track->id(), area));
return {};
}
EventList RoiProcessor::generateImpulseAreaEvents(
RoiProcessor::TrackContext::AreaContext& areaContext,
const RoiAreaPtr& area,
const TrackPtr& track)
{
EventList result;
if (areaContext.filteredState == AreaState::entered && area->entranceDetectionEnabled)
{
result.push_back(std::make_shared<AreaEntranceDetected>(AreaEntranceDetected(
areaContext.newFilteredStateTimestampUs,
track->id(),
area)));
}
else if (areaContext.filteredState == AreaState::exited && area->exitDetectionEnabled)
{
result.push_back(std::make_shared<AreaExitDetected>(AreaExitDetected(
areaContext.newFilteredStateTimestampUs,
track->id(),
area)));
}
else if (areaContext.filteredState == AreaState::appeared && area->appearanceDetectionEnabled)
{
result.push_back(std::make_shared<AppearanceInAreaDetected>(AppearanceInAreaDetected(
areaContext.newFilteredStateTimestampUs,
track->id(),
area)));
}
return result;
}
PersonLostEventMap convertEventListToPersonLostEventMap(const EventList& events)
{
PersonLostEventMap result;
for (const auto& event: events)
{
auto objectEvent = std::dynamic_pointer_cast<PersonLost>(event);
if (objectEvent)
result.insert(std::make_pair(objectEvent->trackId, objectEvent));
}
return result;
}
EventList RoiProcessor::monitorArea(const TrackList& tracks)
{
if (tracks.empty())
return {};
EventList result;
for (const auto& track: tracks)
{
if (track->status() == Status::inactive)
continue;
auto& trackContext = *m_tracksContexts.at(track->id());
auto& areasContexts = trackContext.areasContexts;
for (auto& pair: areasContexts)
{
const auto& area = pair.first;
auto& areaContext = *pair.second;
for (int detectionIndex = trackContext.detectionIndex;
detectionIndex < track->detections.size();
++detectionIndex)
{
areaContext.instantState = newInstantState(
areaContext.instantState,
track->detections[detectionIndex]->boundingBox,
area);
static const int64_t kFilteredStateDelayUs = 1000000;
const int64_t timestampUs = track->detections[detectionIndex]->timestampUs;
if (areaContext.filteredState == AreaState::entered ||
areaContext.filteredState == AreaState::appeared)
{
areaContext.filteredState = AreaState::inside;
}
else if (areaContext.filteredState == AreaState::exited)
{
areaContext.filteredState = AreaState::outside;
}
else if (areaContext.filteredState == AreaState::disappeared)
{
areaContext.filteredState = AreaState::none;
}
if (area->loiteringDetectionEnabled)
{
if (areaContext.filteredState == AreaState::appeared ||
areaContext.filteredState == AreaState::entered ||
areaContext.filteredState == AreaState::inside)
{
areaContext.intervalCalculator.registerTimestamp(
track->detections[detectionIndex]->timestampUs);
}
else if (areaContext.filteredState == AreaState::exited)
{
areaContext.intervalCalculator.pause();
}
microseconds timeIntervalInsideArea =
areaContext.intervalCalculator.duration();
NX_OUTPUT << "Track: " << nx::sdk::UuidHelper::toStdString(track->id()) <<
", " << "inside: " <<
duration_cast<milliseconds>(timeIntervalInsideArea).count() << " ms.";
if (timeIntervalInsideArea >= area->loiteringDetectionDuration
&& !m_loiteringStartTimestampUs)
{
m_loiteringStartTimestampUs =
areaContext.intervalCalculator.firstTimestamp();
result.push_back(std::make_shared<Loitering>(*m_loiteringStartTimestampUs));
}
}
if (areaContext.instantState == AreaState::entered ||
areaContext.instantState == AreaState::appeared ||
areaContext.instantState == AreaState::exited ||
areaContext.instantState == AreaState::disappeared)
{
areaContext.newFilteredStateTimestampUs = timestampUs;
areaContext.newFilteredState = areaContext.instantState;
}
else
{
if (areaContext.newFilteredStateTimestampUs !=
std::numeric_limits<int64_t>::max() &&
timestampUs >=
areaContext.newFilteredStateTimestampUs + kFilteredStateDelayUs)
{
if (((areaContext.newFilteredState == AreaState::appeared ||
areaContext.newFilteredState == AreaState::entered) &&
(areaContext.filteredState == AreaState::outside ||
areaContext.filteredState == AreaState::none)) ||
((areaContext.newFilteredState == AreaState::disappeared ||
areaContext.newFilteredState == AreaState::exited) &&
areaContext.filteredState == AreaState::inside))
{
areaContext.filteredState = areaContext.newFilteredState;
EventList areaImpulseEvents = generateImpulseAreaEvents(
areaContext,
area,
track);
result.insert(
result.end(),
areaImpulseEvents.begin(),
areaImpulseEvents.end());
if (detectionIndex > 0 &&
areaContext.filteredState == AreaState::entered)
{
areaContext.detectionBoundingBoxCenterBeforeAreaEnter =
bg::return_centroid<Point>(
track->detections[detectionIndex - 1]->boundingBox);
}
else if (areaContext.filteredState == AreaState::exited ||
areaContext.filteredState == AreaState::disappeared ||
areaContext.filteredState == AreaState::outside)
{
EventPtr areaCrossedEvent = generateAreaCrossedEvent(
areaContext,
area,
track,
detectionIndex);
if (areaCrossedEvent)
result.push_back(areaCrossedEvent);
}
}
areaContext.newFilteredStateTimestampUs =
std::numeric_limits<int64_t>::max();
}
}
}
}
}
return result;
}
EventList RoiProcessor::generateDisappearanceInAreaAndLoiteringFinishEvents(
const EventList& events)
{
if (events.empty())
return {};
EventList result;
PersonLostEventMap trackIdToPersonLostEventMap = convertEventListToPersonLostEventMap(events);
int64_t timestampUs = events[0]->timestampUs;
bool loiteringActive = false;
for (const auto& trackIdAndTrackContext: m_tracksContexts)
{
const auto& trackId = trackIdAndTrackContext.first;
auto& trackContext = trackIdAndTrackContext.second;
auto& areasContexts = trackContext->areasContexts;
for (auto& areaAndAreaContext: areasContexts)
{
const auto& area = areaAndAreaContext.first;
auto& areaContext = *areaAndAreaContext.second;
const microseconds timeIntervalInsideAreaUs =
areaContext.intervalCalculator.duration();
if (trackIdToPersonLostEventMap.find(trackId) != trackIdToPersonLostEventMap.end())
{
if (areaContext.filteredState == AreaState::entered ||
areaContext.filteredState == AreaState::appeared ||
areaContext.filteredState == AreaState::inside)
{
if (area->disappearanceDetectionEnabled)
{
const auto& personLostEvent = trackIdToPersonLostEventMap[trackId];
result.push_back(std::make_shared<DisappearanceInAreaDetected>(
DisappearanceInAreaDetected(
personLostEvent->timestampUs,
trackId,
area)));
}
areaContext.instantState = AreaState::disappeared;
areaContext.filteredState = AreaState::disappeared;
}
}
else if (timeIntervalInsideAreaUs >= area->loiteringDetectionDuration)
{
loiteringActive = true;
}
}
}
if (m_loiteringStartTimestampUs && !loiteringActive)
{
result.push_back(std::make_shared<Loitering>(
timestampUs, timestampUs - *m_loiteringStartTimestampUs));
m_loiteringStartTimestampUs = std::nullopt;
}
return result;
}
RoiProcessor::Result RoiProcessor::runImpl(
const TrackList& tracks,
const EventList& events,
bool analyzeOnlyForDisappearanceInAreaDetectedEvents)
{
EventList result = generateDisappearanceInAreaAndLoiteringFinishEvents(events);
if (analyzeOnlyForDisappearanceInAreaDetectedEvents)
return result;
ensureTracksContexts(tracks);
EventList lineEvents = lineCrossedDetection(tracks);
EventList areaEvents = monitorArea(tracks);
for (const auto& track: tracks)
{
auto &trackContext = *m_tracksContexts.at(track->id());
trackContext.detectionIndex = (int) track->detections.size() - 1;
}
result.reserve(lineEvents.size() + areaEvents.size());
result.insert(result.end(), lineEvents.begin(), lineEvents.end());
result.insert(result.end(), areaEvents.begin(), areaEvents.end());
return result;
}
void RoiProcessor::IntervalCalculator::registerTimestamp(int64_t timestampUs)
{
if (m_firstTimestamp == 0)
m_firstTimestamp = timestampUs;
if (m_lastIntervalStartTimestampUs == 0)
m_lastIntervalStartTimestampUs = timestampUs;
m_lastTimestamp = timestampUs;
}
void RoiProcessor::IntervalCalculator::pause()
{
m_durationWithoutLastInterval +=
microseconds(m_lastTimestamp - m_lastIntervalStartTimestampUs);
m_lastIntervalStartTimestampUs = 0;
}
int64_t RoiProcessor::IntervalCalculator::firstTimestamp() const
{
return m_firstTimestamp;
}
std::chrono::microseconds RoiProcessor::IntervalCalculator::duration() const
{
return m_durationWithoutLastInterval +
microseconds(m_lastTimestamp - m_lastIntervalStartTimestampUs);
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 19,111
|
C++
|
.cpp
| 459
| 29.464052
| 100
| 0.588254
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,390
|
object_detector.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/object_detector.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_detector.h"
#include <opencv2/core/mat.hpp>
#include <inference_engine.hpp>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/utils.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/helpers/log_utils.h>
#include <chrono>
#include <fstream>
#include <vector>
#include "config.h"
#include "exceptions.h"
#include "openvino_object_detection_analytics_plugin_ini.h"
#include "utils.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
ObjectDetector::ObjectDetector(
std::filesystem::path modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config>& config)
:
Network(std::move(modelDir), std::move(logUtils), config)
{
init(config);
}
DetectionList ObjectDetector::run(const Frame& frame)
{
try
{
return runImpl(frame);
}
catch (const std::exception& e)
{
throw ObjectDetectionError(e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
DetectionList ObjectDetector::runImpl(const Frame& frame)
{
if (!m_config)
throw InferenceError("Config is not set.");
using namespace std::chrono;
using namespace InferenceEngine;
const auto startTime = high_resolution_clock::now();
Blob::Ptr frameBlob = m_inferenceRequest.GetBlob(m_inputName);
matU8ToBlob<uint8_t>(frame.cvMat, frameBlob);
m_inferenceRequest.StartAsync();
DetectionList result;
if (m_inferenceRequest.Wait(IInferRequest::WaitMode::RESULT_READY) == OK)
{
using FloatPrecision = PrecisionTrait<Precision::FP32>::value_type*;
Blob::Ptr outputBlob = m_inferenceRequest.GetBlob(m_outputName);
const float* const detections = outputBlob->buffer().as<FloatPrecision>();
for (int i = 0; i < m_maxProposalCount; ++i)
{
const float* const detectionPtr = detections + i * m_objectSize;
const auto detection = convertRawDetectionToDetection(
/*rawDetection*/ detectionPtr,
/*timestampUs*/ frame.timestampUs);
if (detection)
result.push_back(detection);
}
}
const auto finishTime = high_resolution_clock::now();
const auto duration = duration_cast<milliseconds>(finishTime - startTime);
NX_OUTPUT << "Detection duration: " << duration.count() << " ms.";
return result;
}
/**
* Loads person and vehicle detection model.
*
* Ability to select other models will be added in future versions of OpenVINO object detection
* plugin.
*
*/
InferenceEngine::CNNNetwork ObjectDetector::loadModel()
{
const auto modelLabels = m_modelDir / modelBasePath().replace_extension("labels");
NX_OUTPUT << " Read labels from: " << modelLabels;
std::ifstream inputFile(modelLabels.string());
m_labels.clear();
std::copy(
std::istream_iterator<std::string>(inputFile),
std::istream_iterator<std::string>(),
std::back_inserter(m_labels));
if (m_labels.empty())
NX_OUTPUT << " No labels were read, please check that file: '"
<< modelLabels.string() << "' is present, readable and contains labels.";
return Network::loadModel();
}
void ObjectDetector::prepareInputBlobs(InferenceEngine::CNNNetwork network)
{
Network::prepareInputBlobs(network);
InferenceEngine::InputsDataMap inputInfo(network.getInputsInfo());
auto inputInfoItem = inputInfo.begin();
const InferenceEngine::TensorDesc& inputDescription = inputInfoItem->second->getTensorDesc();
m_networkInputHeight = getTensorHeight(inputDescription);
m_networkInputWidth = getTensorWidth(inputDescription);
}
void ObjectDetector::prepareOutputBlobs(InferenceEngine::CNNNetwork network)
{
Network::prepareOutputBlobs(network);
InferenceEngine::CNNLayerPtr layer = network.getLayerByName(m_outputName.c_str());
const int classesCount = layer->GetParamAsInt("num_classes");
const int labelsSize = (int) m_labels.size();
if (labelsSize != classesCount)
{
if (labelsSize == classesCount - 1)
{
m_labels.insert(m_labels.begin(), "Background");
NX_OUTPUT << "Added missing background label.";
}
else
{
m_labels.clear();
NX_OUTPUT << "Label count does not match to number of classes of network.";
}
}
const auto outputDims = m_output->getTensorDesc().getDims();
constexpr int kExpectedOutputDimsSize = 4;
const auto outputDimsSize = (int) outputDims.size();
std::string description = "as output dimension size";
if (!checkLayerParameter(description, outputDimsSize, kExpectedOutputDimsSize))
throw ModelLoadingError("Failed to prepare output of network.");
constexpr int kMaxProposalCountIndex = 2;
m_maxProposalCount = (int) outputDims[kMaxProposalCountIndex];
constexpr int kObjectSizeIndex = 3;
m_objectSize = (int) outputDims[kObjectSizeIndex];
constexpr int kExpectedObjectSize = 7;
description = "as the last dimension of output";
if (!checkLayerParameter(description, m_objectSize, kExpectedObjectSize))
throw ModelLoadingError("Failed to prepare output of network.");
}
void ObjectDetector::setImageInfoBlob()
{
using namespace InferenceEngine;
auto blob = m_inferenceRequest.GetBlob(m_inputName);
auto data = blob->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
data[0] = static_cast<float>(m_networkInputHeight); //< height
data[1] = static_cast<float>(m_networkInputWidth); //< width
data[2] = 1;
}
void ObjectDetector::createInferenceRequest()
{
Network::createInferenceRequest();
setImageInfoBlob();
}
std::shared_ptr<Detection> ObjectDetector::convertRawDetectionToDetection(
const float rawDetection[],
int64_t timestampUs) const
{
const float imageId = rawDetection[0];
// Proposal count for the current frame is smaller than m_maxProposalCount.
if (imageId < 0)
return {};
const auto labelIndex = (std::vector<std::string>::size_type) rawDetection[1];
const std::string label = m_labels.empty() ?
nx::kit::utils::format("#%d", labelIndex) : m_labels[labelIndex];
const float confidence = rawDetection[2];
// In current version of OpenVINO object detection plugin only persons are detected.
const bool isPerson = label == "person";
const bool isConfident = confidence > m_config->minDetectionConfidence;
if (!isPerson || !isConfident)
return {};
const float xMin = rawDetection[3];
const float yMin = rawDetection[4];
const float xMax = rawDetection[5];
const float yMax = rawDetection[6];
NX_OUTPUT << "Detection:";
NX_OUTPUT << " Label: " << label;
NX_OUTPUT << " Confidence: " << confidence;
NX_OUTPUT << " Coordinates: (" << xMin << "; " << yMin << "); "
<< "(" << xMax << "; " << yMax << ")";
const auto result = std::make_shared<Detection>(Detection{
/*boundingBox*/ {{xMin, yMin}, {xMax, yMax}},
/*confidence*/ confidence,
/*trackId*/ Uuid(),
/*timestampUs*/ timestampUs,
});
return result;
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 7,502
|
C++
|
.cpp
| 186
| 35.172043
| 99
| 0.683278
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,391
|
openvino_log_utils.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/openvino_log_utils.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "openvino_log_utils.h"
#include <inference_engine.hpp>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#define NX_DEBUG_ENABLE_OUTPUT (this->logUtils.enableOutput)
#include <nx/kit/debug.h>
#include <nx/sdk/helpers/log_utils.h>
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
OpenVinoLogUtils::OpenVinoLogUtils(sdk::LogUtils logUtils):
logUtils(logUtils)
{
}
void OpenVinoLogUtils::printInferenceEnginePluginVersion(
const InferenceEngine::Version& version) const
{
NX_OUTPUT << " Plugin version: "
<< version.apiVersion.major << "." << version.apiVersion.minor;
NX_OUTPUT << " Plugin name: " << (version.description ? version.description : "UNKNOWN");
NX_OUTPUT << " Plugin build: " << (version.buildNumber ? version.buildNumber : "UNKNOWN");
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 1,000
|
C++
|
.cpp
| 21
| 44.952381
| 97
| 0.742798
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,392
|
person_tracker.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/person_tracker.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "person_tracker.h"
#include <boost/optional.hpp>
#include <opencv2/core/core.hpp>
#include "exceptions.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::tbm;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
PersonTracker::PersonTracker(
std::filesystem::path modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config>& config)
:
logUtils(std::move(logUtils)),
m_modelDir(std::move(modelDir))
{
setConfig(config);
}
void PersonTracker::setConfig(const std::shared_ptr<const Config> config, bool updateFpsOnly)
{
if (!updateFpsOnly)
{
m_personReIdDescriptor = std::make_shared<PersonReIdDescriptor>(PersonReIdDescriptor(
m_modelDir, logUtils, config));
}
m_tracker = createPersonTrackerByMatching(config->minFrameCountIntervalBetweenTracks());
}
PersonTracker::Result PersonTracker::run(const Frame& frame, const DetectionList& detections)
{
try
{
return runImpl(frame, detections);
}
catch (const cv::Exception& e)
{
throw ObjectTrackingError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
throw ObjectTrackingError(e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
PersonTracker::Result PersonTracker::runImpl(const Frame& frame, const DetectionList& detections)
{
TrackedObjects detectionsToTrack = convertDetectionsToTrackedObjects(frame, detections);
// Perform tracking and extract tracked detections.
m_tracker->process(frame.cvMat, detectionsToTrack, (uint64_t) frame.timestampUs);
const TrackedObjects trackedDetections = m_tracker->trackedDetections();
DetectionInternalList detectionsInternal =
convertTrackedObjectsToDetections(frame, trackedDetections, m_idMapper.get());
processDetections(frame, detectionsInternal);
finishTracks();
EventList events = generateEvents();
BestShotList bestShots = extractBestShots();
cleanup();
DetectionList resultingDetections = extractDetectionList(detectionsInternal);
return {
std::move(bestShots),
std::move(resultingDetections),
std::move(events),
std::move(extractTracks()),
};
}
BestShotList PersonTracker::extractBestShots() const
{
BestShotList result;
for (const auto& pair: m_tracks)
{
const TrackPtr& track = pair.second;
if (track->bestShot())
result.push_back(track->bestShot());
}
return result;
}
TrackList PersonTracker::extractTracks() const
{
TrackList result;
result.reserve(m_tracks.size());
for (const auto& pair: m_tracks)
result.push_back(pair.second);
return result;
}
void PersonTracker::finishTracks()
{
const std::unordered_map<size_t, cv::tbm::Track> cvTracks = m_tracker->tracks();
for (const auto& pair: cvTracks)
{
const size_t cvTrackId = pair.first;
const cv::tbm::Track& cvTrack = pair.second;
const Uuid trackId = m_idMapper->get(cvTrack.first_object.object_id);
if (m_tracker->isTrackForgotten(cvTrackId))
{
if (m_tracks.find(trackId) != m_tracks.end())
m_tracks[trackId]->finish();
}
}
}
cv::Ptr<ITrackerByMatching> PersonTracker::createPersonTrackerByMatching(int forgetDelay)
{
TrackerParams params;
params.aff_thr_fast = 0.9F;
params.aff_thr_strong = 0.5F;
params.drop_forgotten_tracks = false; //< To detect just finished tracks
params.forget_delay = forgetDelay;
params.max_num_objects_in_track = 0; //< Unlimited
cv::Ptr<ITrackerByMatching> tracker = createTrackerByMatching(params);
static const Size kDescriptorFastSize(16, 32);
std::shared_ptr<IImageDescriptor> descriptorFast =
std::make_shared<ResizedImageDescriptor>(
kDescriptorFastSize, InterpolationFlags::INTER_LINEAR);
std::shared_ptr<IDescriptorDistance> distanceFast =
std::make_shared<MatchTemplateDistance>();
tracker->setDescriptorFast(descriptorFast);
tracker->setDistanceFast(distanceFast);
std::shared_ptr<IDescriptorDistance> distanceStrong =
std::make_shared<CosDistance>(CosDistance(cv::Size(
/*_width*/ PersonReId::kChannelCount,
/*_height*/ 1)));
tracker->setDescriptorStrong(m_personReIdDescriptor);
tracker->setDistanceStrong(distanceStrong);
return tracker;
}
/**
* Cleanup ids of the objects that belong to the forgotten tracks.
*/
void PersonTracker::cleanupIds()
{
std::set<Uuid> validIds;
for (const auto& pair: m_tracks)
validIds.insert(pair.first);
m_idMapper->removeAllExcept(validIds);
}
void PersonTracker::cleanupTracks()
{
for (const auto& pair: m_tracker->tracks())
{
const auto cvTrackId = (int64_t) pair.first;
if (m_tracker->isTrackForgotten((size_t) cvTrackId))
{
const cv::tbm::Track& cvTrack = pair.second;
const Uuid trackId = m_idMapper->get(cvTrack.first_object.object_id);
m_tracks.erase(trackId);
}
}
}
void PersonTracker::cleanup()
{
cleanupTracks();
m_tracker->dropForgottenTracks();
cleanupIds();
}
std::shared_ptr<Track> PersonTracker::getOrCreateTrack(const Uuid& trackId)
{
std::shared_ptr<Track> result;
if (m_tracks.find(trackId) == m_tracks.end())
{
result = std::make_shared<Track>(
/*id*/ trackId,
/*maxDetectionCount*/ m_tracker->params().max_num_objects_in_track);
m_tracks.insert(std::make_pair(trackId, result));
}
else
{
result = m_tracks[trackId];
}
return result;
}
void PersonTracker::copyDetectionsHistoryToTrack(
const Frame& frame,
int64_t cvTrackId,
Track* track) const
{
const cv::tbm::Track& cvTrack = m_tracker->tracks().at((size_t) cvTrackId);
for (const TrackedObject& trackedDetection: cvTrack.objects)
{
if ((int64_t) trackedDetection.timestamp == frame.timestampUs)
continue;
std::shared_ptr<const DetectionInternal> detection = convertTrackedObjectToDetection(
frame,
trackedDetection,
m_idMapper.get());
track->addDetection(
(int64_t) trackedDetection.timestamp,
detection->detection,
/*isTrackStarted*/ false);
}
}
void PersonTracker::processDetection(
const Frame& frame,
const std::shared_ptr<DetectionInternal>& detection)
{
const int64_t cvTrackId = detection->cvTrackId;
TrackPtr track = getOrCreateTrack(m_idMapper->get(cvTrackId));
track->addDetection(frame.timestampUs, detection->detection);
const Status trackStatus = track->status();
if (trackStatus == Status::started)
copyDetectionsHistoryToTrack(frame, cvTrackId, track.get());
}
void PersonTracker::processDetections(
const Frame& frame,
const DetectionInternalList& detectionsInternal)
{
for (const DetectionInternalPtr& detection: detectionsInternal)
processDetection(frame, detection);
}
EventList PersonTracker::generateEvents()
{
if (m_tracks.empty())
return {};
EventList result;
bool allTracksAreFinished = true;
for (const auto& pair: m_tracks)
{
const TrackPtr& track = pair.second;
if (track->status() == Status::started)
{
track->activate();
const auto objectEnterEvent = std::make_shared<PersonDetected>(PersonDetected{
track->detections[0]->timestampUs,
track->id(),
});
result.push_back(objectEnterEvent);
if (!m_personDetectionStartTimestampUs)
{
const auto timestampUs = track->detections[0]->timestampUs;
result.push_back(std::make_shared<PeopleDetected>(timestampUs));
m_personDetectionStartTimestampUs = timestampUs;
}
allTracksAreFinished = false;
}
else if (track->status() == Status::finished)
{
const auto event = std::make_shared<PersonLost>(PersonLost{
track->detections[track->detections.size() - 1]->timestampUs,
track->id(),
});
result.push_back(event);
}
else
{
allTracksAreFinished = false;
}
}
if (m_personDetectionStartTimestampUs && allTracksAreFinished)
{
const auto& track = m_tracks.begin()->second;
const auto timestampUs = track->detections[track->detections.size() - 1]->timestampUs;
result.push_back(std::make_shared<PeopleDetected>(
timestampUs, timestampUs - *m_personDetectionStartTimestampUs));
m_personDetectionStartTimestampUs = std::nullopt;
}
return result;
}
PersonTracker::PersonReIdDescriptor::PersonReIdDescriptor(
std::filesystem::path modelDir,
LogUtils logUtils,
const std::shared_ptr<const Config> config):
m_personReId(std::make_shared<PersonReId>(
/*modelDir*/ modelDir,
/*logUtils*/ logUtils,
/*config*/ config))
{
}
Size PersonTracker::PersonReIdDescriptor::size() const
{
return {
/*_width*/ PersonReId::kChannelCount,
/*_height*/ 1,
};
}
void PersonTracker::PersonReIdDescriptor::compute(const Mat& mat, Mat& descriptor)
{
m_personReId->run(mat).copyTo(descriptor);
}
void PersonTracker::PersonReIdDescriptor::compute(
const std::vector<Mat>& mats,
std::vector<Mat>& descriptors)
{
descriptors.resize(mats.size());
for (size_t i = 0; i < mats.size(); ++i)
compute(mats[i], descriptors[i]);
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 10,012
|
C++
|
.cpp
| 288
| 28.642361
| 99
| 0.672565
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,393
|
object_tracker_utils.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/object_tracker_utils.cpp
|
//// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_tracker_utils.h"
#include <nx/sdk/helpers/uuid_helper.h>
#include <opencv_tbm/tracking_by_matching.hpp>
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace cv;
using namespace cv::tbm;
using namespace nx::sdk;
Uuid IdMapper::get(int64_t id)
{
const auto it = m_map.find(id);
if (it == m_map.end())
{
Uuid result = UuidHelper::randomUuid();
m_map[id] = result;
return result;
}
return it->second;
}
void IdMapper::removeAllExcept(const std::set<Uuid>& idsToKeep)
{
for (auto it = m_map.begin(); it != m_map.end();)
{
if (idsToKeep.find(it->second) == idsToKeep.end())
it = m_map.erase(it);
else
++it;
}
}
/**
* Convert detection from opencv::tbm format to the plugin format.
*/
DetectionInternalPtr convertTrackedObjectToDetection(
const Frame& frame,
const TrackedObject& trackedDetection,
IdMapper* idMapper)
{
const auto boundingBox = convertCvRectToBoostRect(
trackedDetection.rect,
frame.width,
frame.height);
auto detection = std::make_shared<Detection>(Detection{
boundingBox,
(float) trackedDetection.confidence,
/*trackId*/ idMapper->get(trackedDetection.object_id),
(int64_t) trackedDetection.timestamp,
});
return std::make_shared<DetectionInternal>(DetectionInternal{
detection,
/*cvTrackId*/ trackedDetection.object_id,
});
}
/**
* Convert detections from opencv::tbm format to the plugin format, restoring the classLabels.
*/
DetectionInternalList convertTrackedObjectsToDetections(
const Frame& frame,
const TrackedObjects& trackedDetections,
IdMapper* idMapper)
{
DetectionInternalList result;
for (const cv::tbm::TrackedObject& trackedDetection: trackedDetections)
{
result.push_back(convertTrackedObjectToDetection(
frame,
trackedDetection,
idMapper));
}
return result;
}
/**
* Convert detections from the plugin format to the format of opencv::tbm.
*/
TrackedObjects convertDetectionsToTrackedObjects(
const Frame& frame,
const DetectionList& detections)
{
TrackedObjects result;
for (const std::shared_ptr<Detection>& detection: detections)
{
const cv::Rect cvRect = convertBoostRectToCvRect(
detection->boundingBox, frame.width, frame.height);
result.push_back(TrackedObject(
/*rect*/ cvRect,
/*confidence*/ detection->confidence,
/*frame_idx*/ (int) frame.index,
/*object_id*/ -1)); //< Placeholder, to be filled in PersonTracker::process().
}
return result;
}
DetectionList extractDetectionList(const DetectionInternalList& detectionsInternal)
{
DetectionList result;
for (const std::shared_ptr<DetectionInternal>& detection: detectionsInternal)
result.push_back(detection->detection);
return result;
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 3,193
|
C++
|
.cpp
| 98
| 27.081633
| 96
| 0.688312
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,394
|
utils.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/utils.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "utils.h"
#include <inference_engine.hpp>
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
bool toBool(std::string str)
{
std::transform(str.begin(), str.begin(), str.end(), ::tolower);
return str == "true" || str == "1";
}
int getTensorWidth(const InferenceEngine::TensorDesc& description)
{
const auto& layout = description.getLayout();
const auto& dims = description.getDims();
const auto& size = dims.size();
if (size >= 2 && (
layout == InferenceEngine::Layout::NCHW ||
layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW ||
layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW ||
layout == InferenceEngine::Layout::CHW ||
layout == InferenceEngine::Layout::HW
))
{
// Regardless of the layout, dimensions are stored in the fixed order.
return (int) dims.back();
}
else
{
THROW_IE_EXCEPTION << "Tensor does not have width dimension";
}
}
int getTensorHeight(const InferenceEngine::TensorDesc& description)
{
const auto& layout = description.getLayout();
const auto& dims = description.getDims();
const auto& size = dims.size();
if (size >= 2 && (
layout == InferenceEngine::Layout::NCHW ||
layout == InferenceEngine::Layout::NHWC ||
layout == InferenceEngine::Layout::NCDHW ||
layout == InferenceEngine::Layout::NDHWC ||
layout == InferenceEngine::Layout::OIHW ||
layout == InferenceEngine::Layout::CHW ||
layout == InferenceEngine::Layout::HW
))
{
// Regardless of the layout, dimensions are stored in the fixed order.
return (int) dims.at(size - 2);
}
else
{
THROW_IE_EXCEPTION << "Tensor does not have height dimension";
}
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 2,063
|
C++
|
.cpp
| 56
| 31.142857
| 94
| 0.647
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,397
|
engine.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/plugin/engine.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "engine.h"
#include <thread>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/json.h>
#include <nx/sdk/analytics/helpers/engine.h>
#include <nx/sdk/analytics/helpers/plugin.h>
#include <nx/sdk/analytics/i_device_agent.h>
#include <nx/sdk/i_device_info.h>
#include <nx/sdk/analytics/i_uncompressed_video_frame.h>
#include <nx/sdk/uuid.h>
#include "device_agent.h"
#include "lib/openvino_object_detection_analytics_plugin_ini.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Engine::Engine(Plugin* plugin) noexcept:
nx::sdk::analytics::Engine(NX_DEBUG_ENABLE_OUTPUT),
m_plugin(plugin)
{
obtainPluginHomeDir();
}
Engine::~Engine() noexcept
{
}
void Engine::doObtainDeviceAgent(Result<IDeviceAgent*>* outResult, const IDeviceInfo* deviceInfo)
{
*outResult = new DeviceAgent(this, deviceInfo);
}
void Engine::obtainPluginHomeDir() noexcept
{
const auto utilityProvider = m_plugin->utilityProvider();
NX_KIT_ASSERT(utilityProvider);
m_pluginHomeDir = std::filesystem::path(utilityProvider->homeDir());
if (m_pluginHomeDir.empty())
NX_PRINT << "Plugin home dir: absent";
else
NX_PRINT << "Plugin home dir: " << nx::kit::utils::toString(m_pluginHomeDir.string());
}
std::string Engine::manifestString() const noexcept
{
constexpr int kSecondsPerDay = 86400;
const int coreCount = (int) std::thread::hardware_concurrency();
using nx::kit::Json;
std::vector<Json> lineItems;
for (int i = 1; i <= RoiLine::kMaxCount; ++i)
{
using namespace settings::roi_line;
lineItems.push_back(Json::object {
{ "type", "LineFigure"},
{ "name", polyline::name(i) },
{ "caption", "Line #" + std::to_string(i) },
{ "maxPoints", RoiLine::kMaxPoints },
});
}
Json linesGroupBox = Json::object {
{ "type", "GroupBox" },
{ "caption", "Lines" },
{ "items", Json(lineItems) }
};
std::vector<Json> areaMonitoringItems;
for (int i = 1; i <= RoiArea::kMaxCount; ++i)
{
using namespace settings::roi_area;
std::vector<Json> areaItems{};
const std::string namePrefix = settingPrefix(i);
areaItems.push_back(Json::object {
{ "type", "PolygonFigure" },
{ "name", polygon::name(i) },
{ "caption", "Area #" + std::to_string(i) },
{ "maxPoints", RoiArea::kMaxPoints },
});
areaItems.push_back(Json::object {
{ "type", "CheckBoxGroup" },
{ "name", namePrefix + "DetectionsEnabled" },
{ "caption", "Detections" },
{ "description", "Choose which events you want to capture with the plugin.<br>"
"Define event response rules in Event Rules window." },
{ "defaultValue", Json::array {
entrance_detection_enabled::name(i),
exit_detection_enabled::name(i),
appearance_detection_enabled::name(i),
disappearance_detection_enabled::name(i),
loitering_detection_enabled::name(i),
crossing_enabled::name(i),
}},
{ "range", Json::array {
entrance_detection_enabled::name(i),
exit_detection_enabled::name(i),
appearance_detection_enabled::name(i),
disappearance_detection_enabled::name(i),
loitering_detection_enabled::name(i),
crossing_enabled::name(i),
}},
{ "itemCaptions", Json::object {
{ entrance_detection_enabled::name(i), "Entrance" },
{ exit_detection_enabled::name(i), "Exit" },
{ appearance_detection_enabled::name(i), "Appearance" },
{ disappearance_detection_enabled::name(i), "Disappearance" },
{ loitering_detection_enabled::name(i), "Loitering" },
{ crossing_enabled::name(i), "Crossing" },
}},
});
areaItems.push_back(Json::object {
{ "type", "SpinBox" },
{ "name", detection_sensitivity::name(i) },
{ "caption", "Detection Sensitivity (%)" },
{ "description", "Sensitivity of object intersection with area." },
{ "defaultValue", (int) (detection_sensitivity::kDefault * 100) },
{ "minValue", 1 },
{ "maxValue", 100 },
});
areaItems.push_back(Json::object {
{ "type", "SpinBox" },
{ "name", loitering_detection_duration::name(i) },
{ "caption", "Loitering Duration (s)" },
{ "description", "Total time the object was in the area for triggering the event." },
{ "defaultValue", (int) loitering_detection_duration::kDefault.count() },
{ "minValue", 1 },
{ "maxValue", kSecondsPerDay },
});
areaMonitoringItems.push_back(Json::object {
{ "type", "GroupBox" },
{ "caption", "Area" },
{ "items", Json(areaItems) }
});
}
using D = Config::Default;
const auto performance = Json::object {
{ "type", "GroupBox" },
{ "caption", "Performance" },
{ "items", Json::array {
Json::object {
{ "type", "SpinBox" },
{ "caption", "CPU Cores" },
{ "description", "Max number of logical CPU cores used for "
"object detection and tracking."},
{ "name", "threadCount" },
{ "defaultValue", coreCount },
{ "minValue", 1 },
{ "maxValue", coreCount },
},
} },
};
const auto objectDetection = Json::object {
{ "type", "GroupBox" },
{ "caption", "Object Detection" },
{ "items", Json::array {
Json::object {
{ "type", "SpinBox" },
{ "caption", "Person Confidence (%)" },
{ "description",
"With this or greater percentage of confidence, the object "
"is identified as a person. "},
{ "name", "minDetectionConfidence" },
{ "defaultValue",
(int) round((double) D::kMinDetectionConfidence * 100) },
{ "minValue", 1 },
{ "maxValue", 100 },
},
Json::object {
{ "type", "SpinBox" },
{ "caption", "Detection Frequency (fps)" },
{ "description",
"Changing this value alters how often the detection runs."
"<br>A higher value will improve performance but can cause "
"inaccuracies." },
{ "name", "detectionFrequencyFps" },
{ "defaultValue", D::kDetectionFrequencyFps },
{ "minValue", 1 },
{ "maxValue", 100 },
},
} },
};
const auto objectTracking = Json::object {
{ "type", "GroupBox" },
{ "caption", "Object Tracking" },
{ "items", Json::array {
Json::object {
{ "type", "SpinBox" },
{ "caption", "Track Reset Timeout (s)" },
{ "description",
"Tracks are considered different if detections of the same "
"person are separated by more than this value." },
{ "name", "minIntervalBetweenTracks" },
{ "defaultValue", (int) D::kMinIntervalBetweenTracks.count() },
{ "minValue", 1 },
{ "maxValue", kSecondsPerDay },
},
} },
};
const Json manifestJson = Json::object {
{ "capabilities", "needUncompressedVideoFrames_bgr" },
{ "streamTypeFilter", "motion|uncompressedVideo" },
{ "preferredStream", "secondary" },
{ "deviceAgentSettingsModel", Json::object {
{ "type", "Settings" },
{ "sections", Json::array {
Json::object {
{ "type", "Section" },
{ "caption", "General" },
{ "items", Json::array {
performance,
objectDetection,
objectTracking,
} },
},
Json::object {
{ "type", "Section" },
{ "caption", "Line Crossing" },
{ "items", Json::array { linesGroupBox } },
},
Json::object {
{ "type", "Section" },
{ "caption", "Area Monitoring" },
{ "items", Json(areaMonitoringItems) },
},
} },
} },
};
return manifestJson.dump();
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
namespace {
static const std::string kPluginManifest = R"json(
{
"id": "nx.openvino_object_detection",
"name": "OpenVINO Object Detection",
"description": ")json"
"This is an example open-source plugin demonstrating people detection and tracking. It "
"may or may not fulfil the requirements of the particular video management system in "
"terms of reliability and detection quality - use at your own risk. It is based on "
"OpenVINO technology by Intel, and runs on Intel processors only."
R"json(",
"version": "1.0.0"
})json";
} // namespace
extern "C" NX_PLUGIN_API nx::sdk::IPlugin* createNxPlugin()
{
return new nx::sdk::analytics::Plugin(
kPluginManifest,
[](nx::sdk::analytics::Plugin* plugin)
{
return new nx::vms_server_plugins::analytics::openvino_object_detection::Engine(plugin);
});
}
| 10,055
|
C++
|
.cpp
| 247
| 30
| 100
| 0.539681
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,398
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/plugin/device_agent.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <string>
#include <system_error>
#include <optional>
#include <boost/predef.h>
#include <filesystem>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#include <nx/kit/debug.h>
#include <nx/kit/json.h>
#include <nx/kit/utils.h>
#include <nx/sdk/helpers/error.h>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_track_best_shot_packet.h>
#include <nx/sdk/analytics/i_motion_metadata_packet.h>
#include "engine.h"
#include "lib/config.h"
#include "lib/exceptions.h"
#include "lib/geometry.h"
#include "lib/openvino_object_detection_analytics_plugin_ini.h"
#include "lib/best_shot.h"
#include "lib/settings.h"
#if BOOST_OS_WINDOWS
#define WIN32_LEAN_AND_MEAN
#if _WIN32_WINNT < 0x0502
#define _WIN32_WINNT 0x0502
#endif
#include <windows.h>
#endif
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
using namespace std::chrono;
using namespace std::string_literals;
namespace {
#if BOOST_OS_WINDOWS
class DllDirectorySetter
{
public:
explicit DllDirectorySetter(std::filesystem::path const& newPath)
{
std::wstring oldPath(MAX_PATH, L'\0');
while (true)
{
std::size_t size = GetDllDirectoryW(oldPath.size(), oldPath.data());
if (size == 0) {
if (DWORD errCode = GetLastError())
throw std::system_error(errCode, std::system_category(),
"GetDllDirectoryW failed");
break;
}
if (size <= oldPath.size())
{
oldPath.resize(size);
m_oldPath = oldPath;
break;
}
oldPath.resize(size + 1);
}
if (!SetDllDirectoryW(newPath.c_str()))
throw std::system_error(GetLastError(), std::system_category(),
"SetDllDirectoryW("s + newPath.string() + ") failed"s);
}
~DllDirectorySetter() noexcept(false)
{
if (!SetDllDirectoryW(m_oldPath ? m_oldPath->c_str() : nullptr))
throw std::system_error(GetLastError(), std::system_category(),
"SetDllDirectoryW("s + (m_oldPath ? m_oldPath->string() : "nullptr") + ") failed"s);
}
private:
std::optional<std::filesystem::path> m_oldPath;
};
#else
class DllDirectorySetter
{
public:
explicit DllDirectorySetter(std::filesystem::path const& /*newPath*/)
{
}
};
#endif // BOOST_OS_WINDOWS
} // namespace
Ptr<ObjectMetadataPacket> convertDetectionsToObjectMetadataPacket(
const DetectionList& detections,
int64_t timestampUs)
{
if (detections.empty())
return nullptr;
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
for (const DetectionPtr& detection: detections)
{
const auto objectMetadata = makePtr<ObjectMetadata>();
objectMetadata->setBoundingBox(convertBoostRectToNxRect(detection->boundingBox));
objectMetadata->setConfidence(detection->confidence);
objectMetadata->setTrackId(detection->trackId);
objectMetadata->setTypeId(kPersonObjectType);
objectMetadataPacket->addItem(objectMetadata.get());
}
objectMetadataPacket->setTimestampUs(timestampUs);
return objectMetadataPacket;
}
MetadataPacketList convertBestShotsToMetadataPacketList(const BestShotList& bestShots)
{
MetadataPacketList result;
for (const std::shared_ptr<BestShot>& bestShot: bestShots)
{
auto objectTrackBestShotPacket = makePtr<ObjectTrackBestShotPacket>(
bestShot->trackId,
bestShot->timestampUs,
convertBoostRectToNxRect(bestShot->boundingBox)
);
result.push_back(objectTrackBestShotPacket);
}
return result;
}
MetadataPacketList convertEventsToEventMetadataPackets(const EventList& events)
{
if (events.empty())
return {};
MetadataPacketList eventMetadataPackets;
for (const EventPtr& event: events)
{
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
const auto eventMetadata = makePtr<EventMetadata>();
eventMetadata->setCaption(event->caption());
eventMetadata->setDescription(event->description());
eventMetadata->setTypeId(event->type());
eventMetadata->setIsActive(event->isActive);
eventMetadataPacket->addItem(eventMetadata.get());
eventMetadataPacket->setTimestampUs(event->timestampUs);
eventMetadataPackets.push_back(eventMetadataPacket);
}
return eventMetadataPackets;
}
MetadataPacketList convertObjectDetectionResultToMetadataPacketList(
const ObjectDetectionProcessor::Result& objectDetectionResult,
int64_t timestampUs)
{
MetadataPacketList result;
const Ptr<IMetadataPacket> objectMetadataPacket = convertDetectionsToObjectMetadataPacket(
objectDetectionResult.detections,
timestampUs);
if (objectMetadataPacket)
result.push_back(objectMetadataPacket);
const MetadataPacketList objectTrackBestShotPackets = convertBestShotsToMetadataPacketList(
objectDetectionResult.bestShots);
result.insert(
result.end(),
std::make_move_iterator(objectTrackBestShotPackets.begin()),
std::make_move_iterator(objectTrackBestShotPackets.end()));
const MetadataPacketList eventMetadataPackets = convertEventsToEventMetadataPackets(
objectDetectionResult.events);
result.insert(
result.end(),
std::make_move_iterator(eventMetadataPackets.begin()),
std::make_move_iterator(eventMetadataPackets.end()));
const MetadataPacketList roiEventMetadataPackets = convertEventsToEventMetadataPackets(
objectDetectionResult.roiEvents);
result.insert(
result.end(),
std::make_move_iterator(roiEventMetadataPackets.begin()),
std::make_move_iterator(roiEventMetadataPackets.end()));
return result;
}
//-------------------------------------------------------------------------------------------------
// public
DeviceAgent::DeviceAgent(Engine* engine, const nx::sdk::IDeviceInfo* deviceInfo) noexcept
:
ConsumingDeviceAgent(deviceInfo, NX_DEBUG_ENABLE_OUTPUT),
m_engine(engine)
{
}
std::string DeviceAgent::manifestString() const noexcept
{
return /*suppress newline*/ 1 + (const char*)
R"json(
{
"typeLibrary": {
"eventTypes": [
{
"id": ")json" + PersonDetected::kType + R"json(",
"name": ")json" + PersonDetected::kName + R"json("
},
{
"id": ")json" + PersonLost::kType + R"json(",
"name": ")json" + PersonLost::kName + R"json("
},
{
"id": ")json" + PeopleDetected::kType + R"json(",
"name": ")json" + PeopleDetected::kName + R"json(",
"flags": "stateDependent"
},
{
"id": ")json" + LineCrossed::kType + R"json(",
"name": ")json" + LineCrossed::kName + R"json("
},
{
"id": ")json" + AreaCrossed::kType + R"json(",
"name": ")json" + AreaCrossed::kName + R"json("
},
{
"id": ")json" + AreaEntranceDetected::kType + R"json(",
"name": ")json" + AreaEntranceDetected::kName + R"json("
},
{
"id": ")json" + AreaExitDetected::kType + R"json(",
"name": ")json" + AreaExitDetected::kName + R"json("
},
{
"id": ")json" + AppearanceInAreaDetected::kType + R"json(",
"name": ")json" + AppearanceInAreaDetected::kName + R"json("
},
{
"id": ")json" + DisappearanceInAreaDetected::kType + R"json(",
"name": ")json" + DisappearanceInAreaDetected::kName + R"json("
},
{
"id": ")json" + Loitering::kType + R"json(",
"name": ")json" + Loitering::kName + R"json(",
"flags": "stateDependent"
}
]
},
"supportedTypes": [
{
"objectTypeId": ")json" + kPersonObjectType + R"json(",
"attributes": []
},
{
"eventTypeId": ")json" + PersonDetected::kType + R"json("
},
{
"eventTypeId": ")json" + PersonLost::kType + R"json("
},
{
"eventTypeId": ")json" + PeopleDetected::kType + R"json("
},
{
"eventTypeId": ")json" + LineCrossed::kType + R"json("
},
{
"eventTypeId": ")json" + AreaCrossed::kType + R"json("
},
{
"eventTypeId": ")json" + AreaEntranceDetected::kType + R"json("
},
{
"eventTypeId": ")json" + AreaExitDetected::kType + R"json("
},
{
"eventTypeId": ")json" + AppearanceInAreaDetected::kType + R"json("
},
{
"eventTypeId": ")json" + DisappearanceInAreaDetected::kType + R"json("
},
{
"eventTypeId": ")json" + Loitering::kType + R"json("
}
]
})json";
}
Result<const ISettingsResponse*> DeviceAgent::settingsReceived()
{
try {
DllDirectorySetter dllDirecorySetter(m_engine->pluginHomeDir());
parseSettings();
} catch (std::exception const &e) {
return error(ErrorCode::internalError, e.what());
}
return nullptr;
}
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame) noexcept
{
if (!m_config)
return true;
m_terminated = m_terminated || m_objectDetectionProcessor->isTerminated();
if (m_terminated)
{
if (!m_terminatedPrevious)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Plugin is in broken state.",
"Disable the plugin.");
m_terminatedPrevious = true;
}
return true;
}
using namespace std::chrono;
const auto startTime = high_resolution_clock::now();
processFrame(videoFrame);
const auto finishTime = high_resolution_clock::now();
const auto duration = duration_cast<milliseconds>(finishTime - startTime);
NX_OUTPUT << "Frame processing duration: " << duration.count() << " ms.";
return true;
}
//-------------------------------------------------------------------------------------------------
// private
void DeviceAgent::pushMetadataPackets(const MetadataPacketList& metadataPackets)
{
for (const Ptr<IMetadataPacket>& metadataPacket: metadataPackets)
{
metadataPacket->addRef();
pushMetadataPacket(metadataPacket.get());
}
}
bool DeviceAgent::needToDetectObjects(const IUncompressedVideoFrame *nxFrame) const noexcept
{
if (m_frameIndex % m_config->objectDetectionPeriod > 0)
return false;
if (m_isExponentialBackoffActive &&
m_frameIndex % (int) m_exponentialBackoffObjectDetectionPeriod == 0)
return true;
const Ptr<IList<IMetadataPacket>> metadataPacketList = nxFrame->metadataList();
if (!metadataPacketList)
return false;
const int metadataPacketCount = metadataPacketList->count();
if (metadataPacketCount == 0)
return false;
for (int i = 0; i < metadataPacketCount; ++i)
{
const auto metadataPacket = metadataPacketList->at(i);
const auto motionPacket = metadataPacket->queryInterface<IMotionMetadataPacket>();
if (motionPacket)
return !motionPacket->isEmpty();
}
return false;
}
void DeviceAgent::updateConfigOnFpsChange(const Frame& frame)
{
m_mediaStreamStatistics->onData(microseconds(frame.timestampUs), 0, true);
const float fps = m_mediaStreamStatistics->getFrameRate();
static const float kFpsChangeFpsDifferenceThreshold = 0.10F;
static const int64_t kFpsChangeTimestampUsDifferenceThreshold = 5000000;
if (fps > 0 &&
fabs(fps - m_fps) / m_fps > kFpsChangeFpsDifferenceThreshold &&
frame.timestampUs - m_fpsChangeTimestampUs > kFpsChangeTimestampUsDifferenceThreshold)
{
m_fpsChangeTimestampUs = frame.timestampUs;
NX_OUTPUT << "FPS: " << fps;
m_fps = fps;
m_objectDetectionProcessor->setFps(fps);
Config newConfig(*m_config);
newConfig.objectDetectionPeriod = calcObjectDetectionPeriod();
m_config = std::make_shared<const Config>(newConfig);
m_exponentialBackoffObjectDetectionPeriod = (float) m_config->objectDetectionPeriod;
}
}
void DeviceAgent::processFrame(const IUncompressedVideoFrame* nxFrame) noexcept
{
++m_frameIndex;
Frame frame(nxFrame, m_frameIndex);
if (m_frameIndex == 0)
m_fpsChangeTimestampUs = frame.timestampUs;
if (m_config)
updateConfigOnFpsChange(frame);
ObjectDetectionProcessor::Result objectDetectionResult;
try
{
objectDetectionResult = m_objectDetectionProcessor->run(
frame, needToDetectObjects(nxFrame));
}
catch (const FrameProcessingError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Frame processing error.",
e.what());
m_terminated = true;
return;
}
if (m_isExponentialBackoffActive)
{
if (objectDetectionResult.detections.empty())
{
if (m_frameIndex % (int) m_exponentialBackoffObjectDetectionPeriod == 0 &&
m_exponentialBackoffObjectDetectionPeriod <
m_maxExponentialBackoffObjectDetectionPeriod)
{
m_exponentialBackoffObjectDetectionPeriod *= 1.1;
NX_OUTPUT << "Exponential backoff object detection period: " <<
m_exponentialBackoffObjectDetectionPeriod << ".";
}
}
else
{
m_exponentialBackoffObjectDetectionPeriod = (float) m_config->objectDetectionPeriod;
m_isExponentialBackoffActive = false;
}
}
else
{
m_isExponentialBackoffActive = objectDetectionResult.detections.empty();
}
try
{
const MetadataPacketList metadataPacketList =
convertObjectDetectionResultToMetadataPacketList(
objectDetectionResult,
frame.timestampUs);
pushMetadataPackets(metadataPacketList);
}
catch (const std::exception& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Metadata sending error.",
e.what());
m_terminated = true;
}
}
RoiLineList DeviceAgent::parseLinesSettings()
{
using namespace settings::roi_line;
RoiLineList result;
for (int i = 1; i < RoiLine::kMaxCount; ++i)
{
const std::string jsonString = settingValue(polyline::name(i));
const auto roiLine = convertJsonStringToRoiLinePtr(jsonString);
if (roiLine)
{
roiLine->index = i;
result.push_back(roiLine);
}
}
return result;
}
RoiAreaList DeviceAgent::parseAreasSettings()
{
using namespace settings::roi_area;
RoiAreaList result;
for (int i = 1; i <= RoiArea::kMaxCount; ++i)
{
const std::string jsonString = settingValue(polygon::name(i));
const auto roiArea = convertJsonStringToRoiAreaPtr(jsonString);
if (roiArea)
{
roiArea->index = i;
const std::string detectionsEnabled = settingValue(
settingPrefix(i) + "DetectionsEnabled");
roiArea->entranceDetectionEnabled =
detectionsEnabled.find(entrance_detection_enabled::name(i)) != std::string::npos;
roiArea->exitDetectionEnabled =
detectionsEnabled.find(exit_detection_enabled::name(i)) != std::string::npos;
roiArea->appearanceDetectionEnabled =
detectionsEnabled.find(appearance_detection_enabled::name(i)) != std::string::npos;
roiArea->disappearanceDetectionEnabled =
detectionsEnabled.find(disappearance_detection_enabled::name(i)) !=
std::string::npos;
roiArea->loiteringDetectionEnabled =
detectionsEnabled.find(loitering_detection_enabled::name(i)) != std::string::npos;
roiArea->crossingEnabled =
detectionsEnabled.find(crossing_enabled::name(i)) != std::string::npos;
roiArea->detectionSensitivity = std::stof(settingValue(
detection_sensitivity::name(i))) / 100.0F;
roiArea->loiteringDetectionDuration = seconds(std::stoi(settingValue(
loitering_detection_duration::name(i))));
result.push_back(roiArea);
}
}
return result;
}
int DeviceAgent::calcObjectDetectionPeriod() const
{
const int detectionPeriod = std::max((int) (m_fps / (float) m_detectionFrequencyFps), 1);
NX_OUTPUT << "Detection period: " << detectionPeriod;
return detectionPeriod;
}
void DeviceAgent::parseSettings() noexcept
{
if (m_terminated)
return;
try
{
const float minDetectionConfidence =
std::stof(settingValue("minDetectionConfidence")) / 100.0F;
const int threadCount = std::stoi(settingValue("threadCount"));
m_detectionFrequencyFps = std::stoi(settingValue("detectionFrequencyFps"));
const seconds minIntervalBetweenTracks =
seconds(std::stoi(settingValue("minIntervalBetweenTracks")));
const auto lines = parseLinesSettings();
const auto areas = parseAreasSettings();
m_config = std::make_shared<const Config>(Config({
minDetectionConfidence,
threadCount,
calcObjectDetectionPeriod(),
Config::Default::kMinReIdCosineSimilarity,
minIntervalBetweenTracks,
m_fps,
lines,
areas,
}));
m_exponentialBackoffObjectDetectionPeriod = (float) m_config->objectDetectionPeriod;
if (!m_objectDetectionProcessor)
{
m_objectDetectionProcessor = std::make_unique<ObjectDetectionProcessor>(
m_engine->pluginHomeDir(), logUtils, m_config);
}
else
{
m_objectDetectionProcessor->setConfig(m_config);
}
}
catch (const CpuIsIncompatibleError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Error loading plugin.",
"Error loading plugin: "s + e.what());
m_terminated = true;
m_config = nullptr;
}
catch (const std::exception& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Error loading plugin.",
"Error loading plugin: "s + e.what());
m_terminated = true;
m_config = nullptr;
}
}
void DeviceAgent::doSetNeededMetadataTypes(
Result<void>* /*outResult*/, const IMetadataTypes* /*neededMetadataTypes*/)
{
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 19,755
|
C++
|
.cpp
| 530
| 28.90566
| 104
| 0.626548
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,399
|
app.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/app/app.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "app.h"
#include <chrono>
#include <iomanip>
#include <sstream>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#define NX_PRINT_PREFIX (this->logUtils.printPrefix)
#define NX_DEBUG_ENABLE_OUTPUT (this->logUtils.enableOutput)
#include <nx/kit/debug.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/helpers/uuid_helper.h>
#include "lib/frame.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::chrono;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
App::App(std::filesystem::path inputFile, std::filesystem::path modelDir):
logUtils(/*enableOutput*/ true, /*printPrefix*/ ""),
m_inputFile(std::move(inputFile)),
m_modelDir(std::move(modelDir))
{
using namespace std::string_literals;
m_personDetector = std::make_unique<ObjectDetectionProcessor>(m_modelDir, logUtils);
if (!m_videoInput.open(m_inputFile.string()))
throw std::runtime_error("Failed to open '"s + m_inputFile.string() + "'."s);
m_width = (int) m_videoInput.get(cv::CAP_PROP_FRAME_WIDTH);
m_height = (int) m_videoInput.get(cv::CAP_PROP_FRAME_HEIGHT);
}
void App::run()
{
cv::Mat cvFrame = readFrame();
cv::imshow(m_windowName, cvFrame);
cv::setMouseCallback(m_windowName, onMouse, m_drawingData.get());
int64_t frameIndex = 0;
static const int detectionPeriod = 1;
for (;;)
{
if (cvFrame.empty())
break;
if (m_drawingData->lineIsUpdated)
updateConfig();
const int64_t timestampUs = duration_cast<microseconds>(
system_clock::now().time_since_epoch()).count();
const Frame frame(cvFrame, timestampUs, frameIndex);
const bool needToDetectObjects = frameIndex % detectionPeriod == 0;
const auto result = m_personDetector->run(frame, needToDetectObjects);
render(cvFrame, timestampUs, result);
cvFrame = readFrame();
++frameIndex;
}
}
//-------------------------------------------------------------------------------------------------
// private
void App::onMouse(int event, int x, int y, int /*flags*/, void* userData) noexcept
{
auto drawingData = static_cast<DrawingData*>(userData);
switch (event)
{
case cv::EVENT_LBUTTONDOWN:
drawingData->clicked = true;
drawingData->lineIsDrawn = true;
drawingData->p0.x = x;
drawingData->p0.y = y;
drawingData->p1.x = x;
drawingData->p1.y = y;
break;
case cv::EVENT_MOUSEMOVE:
if (drawingData->clicked)
{
drawingData->p1.x = x;
drawingData->p1.y = y;
}
break;
case cv::EVENT_LBUTTONUP:
drawingData->clicked = false;
drawingData->lineIsUpdated = true;
break;
}
}
void App::drawLine(cv::Mat frame) noexcept
{
static const cv::Scalar kGreenColor = cv::Scalar(0, 255, 0);
if (m_drawingData->lineIsDrawn)
cv::line(frame, m_drawingData->p0, m_drawingData->p1, kGreenColor);
}
cv::Mat App::readFrame() noexcept
{
try
{
return readFrameImpl();
}
catch (const std::exception& e)
{
NX_OUTPUT << "Error reading video frame: " << e.what();
return {};
}
catch (...)
{
NX_OUTPUT << "Error reading video frame.";
return {};
}
}
cv::Mat App::readFrameImpl()
{
cv::Mat result;
m_videoInput >> result;
if (result.empty())
NX_OUTPUT << "Video has been finished.";
return result;
}
void App::render(
cv::Mat frame,
int64_t timestampUs,
const ObjectDetectionProcessor::Result& personDetectionResult) noexcept
{
try
{
renderImpl(frame, timestampUs, personDetectionResult);
}
catch (const std::exception& e)
{
NX_OUTPUT << "Rendering error: " << e.what();
}
catch (...)
{
NX_OUTPUT << "Rendering error.";
}
}
void App::renderImpl(
cv::Mat frame,
int64_t timestampUs,
const ObjectDetectionProcessor::Result& personDetectionResult)
{
using namespace std::chrono_literals;
for (const std::shared_ptr<Detection>& detection: personDetectionResult.detections)
{
const auto boundingBox = convertBoostRectToCvRect(
/*rect*/ detection->boundingBox,
/*width*/ m_width,
/*height*/ m_height);
const cv::Scalar kRedColor = cv::Scalar(0, 0, 255);
cv::rectangle(frame, boundingBox, kRedColor); //< Draw bounding box
const nx::sdk::Uuid& trackId = detection->trackId;
std::hash<nx::sdk::Uuid> idHashFunction;
size_t idHash = idHashFunction(trackId) % 1000;
std::stringstream ss;
ss << std::fixed << std::setprecision(2) << detection->confidence;
std::string confidence = ss.str();
const RoiProcessor::Result& lineCrossingEvents = personDetectionResult.roiEvents;
for (const auto& event: lineCrossingEvents)
{
const auto& lineCrossingEvent = std::dynamic_pointer_cast<LineCrossed>(event);
if (lineCrossingEvent->trackId == trackId)
{
if (m_lineCrossingCount.find(trackId) == m_lineCrossingCount.end())
m_lineCrossingCount[trackId] = {
{Direction::absent, 0},
{Direction::left, 0},
{Direction::right, 0},
};
++m_lineCrossingCount[trackId][lineCrossingEvent->direction];
}
}
int lineCrossingCountA = m_lineCrossingCount[trackId][Direction::left];
int lineCrossingCountB = m_lineCrossingCount[trackId][Direction::right];
cv::putText(
frame,
confidence + " " + std::to_string(idHash) + " " +
std::to_string(lineCrossingCountA) + "," + std::to_string(lineCrossingCountB),
cv::Point(boundingBox.x, boundingBox.y /*vertical text displacement*/ - 8),
cv::FONT_HERSHEY_COMPLEX_SMALL,
/*font scale*/ 1,
kRedColor);
cv::putText(
frame,
std::to_string(timestampUs),
cv::Point(0, 16),
cv::FONT_HERSHEY_COMPLEX_SMALL,
/*font scale*/ 1,
kRedColor);
}
drawLine(frame);
cv::imshow(m_windowName, frame);
constexpr std::chrono::milliseconds kDelay = 25ms;
cv::waitKey(kDelay.count());
}
void App::updateConfig()
{
m_drawingData->lineIsUpdated = false;
m_config->lines.clear();
const auto roiLine = std::make_shared<RoiLine>(RoiLine{
{{{(float) m_drawingData->p0.x / m_width, (float) m_drawingData->p0.y / m_height},
{(float) m_drawingData->p1.x / m_width, (float) m_drawingData->p1.y / m_height},}},
Direction::absent,
});
m_config->lines.push_back(roiLine);
m_personDetector->setConfig(m_config);
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 7,199
|
C++
|
.cpp
| 200
| 28.605
| 99
| 0.609844
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,400
|
main.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/app/main.cpp
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include <iostream>
#include <exception>
#include <filesystem>
#include "app.h"
int main(int argc, char** argv)
{
if (argc != 3)
{
std::cout << "Usage:\n"
<< argv[0] << " <input_video_file> <model_dir>\n";
return 1;
}
try
{
auto inputFile = std::filesystem::path(argv[1]);
auto modelDir = std::filesystem::path(argv[2]);
nx::vms_server_plugins::analytics::openvino_object_detection::App app(inputFile, modelDir);
app.run();
}
catch (const std::exception& e)
{
std::cerr << "Failure: " << e.what();
return 1;
}
catch (...)
{
std::cerr << "Failure: unknown exception.";
return 1;
}
return 0;
}
| 835
|
C++
|
.cpp
| 32
| 20.46875
| 99
| 0.570713
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,401
|
engine.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step1/src/nx/vms_server_plugins/analytics/sample/engine.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "engine.h"
#include "device_agent.h"
namespace nx {
namespace vms_server_plugins {
namespace analytics {
namespace sample {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Engine::Engine():
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
nx::sdk::analytics::Engine(/*enableOutput*/ true)
{
}
Engine::~Engine()
{
}
/**
* Called when the Server opens a video-connection to the camera if the plugin is enabled for this
* camera.
*
* @param outResult The pointer to the structure which needs to be filled with the resulting value
* or the error information.
* @param deviceInfo Contains various information about the related device such as its id, vendor,
* model, etc.
*/
void Engine::doObtainDeviceAgent(Result<IDeviceAgent*>* outResult, const IDeviceInfo* deviceInfo)
{
*outResult = new DeviceAgent(deviceInfo);
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the Engine settings.
*/
std::string Engine::manifestString() const
{
// Ask the Server to supply uncompressed video frames in YUV420 format (see
// https://en.wikipedia.org/wiki/YUV).
//
// Note that this format is used internally by the Server, therefore requires minimum
// resources for decoding, thus it is the recommended format.
return /*suppress newline*/ 1 + R"json(
{
"capabilities": "needUncompressedVideoFrames_yuv420"
}
)json";
}
} // namespace sample
} // namespace analytics
} // namespace vms_server_plugins
} // namespace nx
| 1,743
|
C++
|
.cpp
| 51
| 32
| 98
| 0.748663
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,402
|
plugin.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step1/src/nx/vms_server_plugins/analytics/sample/plugin.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "plugin.h"
#include "engine.h"
namespace nx {
namespace vms_server_plugins {
namespace analytics {
namespace sample {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Result<IEngine*> Plugin::doObtainEngine()
{
return new Engine();
}
/**
* JSON with the particular structure. Note that it is possible to fill in the values that are not
* known at compile time.
*
* - id: Unique identifier for a plugin with format "{vendor_id}.{plugin_id}", where
* {vendor_id} is the unique identifier of the plugin creator (person or company name) and
* {plugin_id} is the unique (for a specific vendor) identifier of the plugin.
* - name: A human-readable short name of the plugin (displayed in the "Camera Settings" window
* of the Client).
* - description: Description of the plugin in a few sentences.
* - version: Version of the plugin.
* - vendor: Plugin creator (person or company) name.
*/
std::string Plugin::manifestString() const
{
return /*suppress newline*/ 1 + R"json(
{
"id": "sample.opencv_object_detection",
"name": "OpenCV object detection",
"description": ")json"
"This plugin is for object detection and tracking. It's based on OpenCV."
R"json(",
"version": "1.0.0",
"vendor": "Sample Inc."
}
)json";
}
/**
* Called by the Server to instantiate the Plugin object.
*
* The Server requires the function to have C linkage, which leads to no C++ name mangling in the
* export table of the plugin dynamic library, so that makes it possible to write plugins in any
* language and compiler.
*
* NX_PLUGIN_API is the macro defined by CMake scripts for exporting the function.
*/
extern "C" NX_PLUGIN_API nx::sdk::IPlugin* createNxPlugin()
{
// The object will be freed when the Server calls releaseRef().
return new Plugin();
}
} // namespace sample
} // namespace analytics
} // namespace vms_server_plugins
} // namespace nx
| 2,032
|
C++
|
.cpp
| 58
| 32.655172
| 98
| 0.719227
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,403
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step1/src/nx/vms_server_plugins/analytics/sample/device_agent.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/event_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
namespace nx {
namespace vms_server_plugins {
namespace analytics {
namespace sample {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
/**
* @param deviceInfo Various information about the related device, such as its id, vendor, model,
* etc.
*/
DeviceAgent::DeviceAgent(const nx::sdk::IDeviceInfo* deviceInfo):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
ConsumingDeviceAgent(deviceInfo, /*enableOutput*/ true)
{
}
DeviceAgent::~DeviceAgent()
{
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the DeviceAgent settings.
*/
std::string DeviceAgent::manifestString() const
{
// Tell the Server that the plugin can generate the events and objects of certain types.
// Id values are strings and should be unique. Format of ids:
// `{vendor_id}.{plugin_id}.{event_type_id/object_type_id}`.
//
// See the plugin manifest for the explanation of vendor_id and plugin_id.
return /*suppress newline*/ 1 + R"json(
{
"eventTypes": [
{
"id": ")json" + kNewTrackEventType + R"json(",
"name": "New track started"
}
],
"objectTypes": [
{
"id": ")json" + kHelloWorldObjectType + R"json(",
"name": "Hello, World!"
}
]
}
)json";
}
/**
* Called when the Server sends a new uncompressed frame from a camera.
*/
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame)
{
++m_frameIndex;
m_lastVideoFrameTimestampUs = videoFrame->timestampUs();
auto eventMetadataPacket = generateEventMetadataPacket();
if (eventMetadataPacket)
{
// Send generated metadata packet to the Server.
pushMetadataPacket(eventMetadataPacket.releasePtr());
}
return true; //< There were no errors while processing the video frame.
}
/**
* Serves the similar purpose as pushMetadataPacket(). The differences are:
* - pushMetadataPacket() is called by the plugin, while pullMetadataPackets() is called by Server.
* - pushMetadataPacket() expects one metadata packet, while pullMetadataPacket expects the
* std::vector of them.
*
* There are no strict rules for deciding which method is "better". A rule of thumb is to use
* pushMetadataPacket() when you generate one metadata packet and do not want to store it in the
* class field, and use pullMetadataPackets otherwise.
*/
bool DeviceAgent::pullMetadataPackets(std::vector<IMetadataPacket*>* metadataPackets)
{
metadataPackets->push_back(generateObjectMetadataPacket().releasePtr());
return true; //< There were no errors while filling metadataPackets.
}
void DeviceAgent::doSetNeededMetadataTypes(
nx::sdk::Result<void>* /*outValue*/,
const nx::sdk::analytics::IMetadataTypes* /*neededMetadataTypes*/)
{
}
//-------------------------------------------------------------------------------------------------
// private
Ptr<IMetadataPacket> DeviceAgent::generateEventMetadataPacket()
{
// Generate event every kTrackFrameCount'th frame.
if (m_frameIndex % kTrackFrameCount != 0)
return nullptr;
// EventMetadataPacket contains arbitrary number of EventMetadata.
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
// Bind event metadata packet to the last video frame using a timestamp.
eventMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// EventMetadata contains an information about event.
const auto eventMetadata = makePtr<EventMetadata>();
// Set all required fields.
eventMetadata->setTypeId(kNewTrackEventType);
eventMetadata->setIsActive(true);
eventMetadata->setCaption("New sample plugin track started");
eventMetadata->setDescription("New track #" + std::to_string(m_trackIndex) + " started");
eventMetadataPacket->addItem(eventMetadata.get());
// Generate index and track id for the next track.
++m_trackIndex;
m_trackId = nx::sdk::UuidHelper::randomUuid();
return eventMetadataPacket;
}
Ptr<IMetadataPacket> DeviceAgent::generateObjectMetadataPacket()
{
// ObjectMetadataPacket contains arbitrary number of ObjectMetadata.
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
// Bind the object metadata to the last video frame using a timestamp.
objectMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// ObjectMetadata contains information about an object on the frame.
const auto objectMetadata = makePtr<ObjectMetadata>();
// Set all required fields.
objectMetadata->setTypeId(kHelloWorldObjectType);
objectMetadata->setTrackId(m_trackId);
// Calculate bounding box coordinates each frame so that it moves from the top left corner
// to the bottom right corner during kTrackFrameCount frames.
static constexpr float d = 0.5F / kTrackFrameCount;
static constexpr float width = 0.5F;
static constexpr float height = 0.5F;
const int frameIndexInsideTrack = m_frameIndex % kTrackFrameCount;
const float x = d * frameIndexInsideTrack;
const float y = d * frameIndexInsideTrack;
objectMetadata->setBoundingBox(Rect(x, y, width, height));
objectMetadataPacket->addItem(objectMetadata.get());
return objectMetadataPacket;
}
} // namespace sample
} // namespace analytics
} // namespace vms_server_plugins
} // namespace nx
| 5,860
|
C++
|
.cpp
| 139
| 38.330935
| 99
| 0.730546
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,404
|
engine.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step3/src/sample_company/vms_server_plugins/opencv_object_detection/engine.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "engine.h"
#include "device_agent.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Engine::Engine(std::filesystem::path pluginHomeDir):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
nx::sdk::analytics::Engine(/*enableOutput*/ true),
m_pluginHomeDir(pluginHomeDir)
{
}
Engine::~Engine()
{
}
/**
* Called when the Server opens a video-connection to the camera if the plugin is enabled for this
* camera.
*
* @param outResult The pointer to the structure which needs to be filled with the resulting value
* or the error information.
* @param deviceInfo Contains various information about the related device such as its id, vendor,
* model, etc.
*/
void Engine::doObtainDeviceAgent(Result<IDeviceAgent*>* outResult, const IDeviceInfo* deviceInfo)
{
*outResult = new DeviceAgent(deviceInfo, m_pluginHomeDir);
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the Engine settings.
*/
std::string Engine::manifestString() const
{
// Ask the Server to supply uncompressed video frames in BGR format, as it is native format for
// OpenCV.
return /*suppress newline*/ 1 + R"json(
{
"capabilities": "needUncompressedVideoFrames_bgr"
}
)json";
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 1,668
|
C++
|
.cpp
| 47
| 33.382979
| 99
| 0.758065
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,405
|
object_detector.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step3/src/sample_company/vms_server_plugins/opencv_object_detection/object_detector.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_detector.h"
#include <opencv2/core.hpp>
#include "exceptions.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::dnn;
ObjectDetector::ObjectDetector(std::filesystem::path modelPath):
m_modelPath(std::move(modelPath))
{
}
/**
* Load the model if it is not loaded, do nothing otherwise. In case of errors terminate the
* plugin and throw a specialized exception.
*/
void ObjectDetector::ensureInitialized()
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detector initialization error: object detector is terminated.");
}
if (m_netLoaded)
return;
try
{
loadModel();
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: " + cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: Error: "s + e.what());
}
}
bool ObjectDetector::isTerminated() const
{
return m_terminated;
}
void ObjectDetector::terminate()
{
m_terminated = true;
}
DetectionList ObjectDetector::run(const Frame& frame)
{
if (isTerminated())
throw ObjectDetectorIsTerminatedError("Detection error: object detector is terminated.");
try
{
return runImpl(frame);
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectionError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectionError("Error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
void ObjectDetector::loadModel()
{
// Prepare paths of model weights and definition.
static const auto modelBin = m_modelPath /
std::filesystem::path("MobileNetSSD.caffemodel");
static const auto modelTxt = m_modelPath /
std::filesystem::path("MobileNetSSD.prototxt");
// Load the model for future processing using OpenCV.
m_net = std::make_unique<Net>(
readNetFromCaffe(modelTxt.string(), modelBin.string()));
// Save the whether the net is loaded or not to prevent unnecessary load.
m_netLoaded = !m_net->empty();
if (!m_netLoaded)
throw ObjectDetectorInitializationError("Loading model: network is empty.");
}
std::shared_ptr<Detection> convertRawDetectionToDetection(
const Mat& rawDetections,
int detectionIndex,
const nx::sdk::Uuid trackId)
{
enum class OutputIndex
{
classIndex = 1,
confidence = 2,
xBottomLeft = 3,
yBottomLeft = 4,
xTopRight = 5,
yTopRight = 6,
};
static constexpr float confidenceThreshold = 0.5F; //< Chosen arbitrarily.
const int& i = detectionIndex;
const float confidence = rawDetections.at<float>(i, (int) OutputIndex::confidence);
const auto classIndex = (int) (rawDetections.at<float>(i, (int) OutputIndex::classIndex));
const std::string classLabel = kClasses[(size_t) classIndex];
const bool confidentDetection = confidence >= confidenceThreshold;
bool oneOfRequiredClasses = std::find(
kClassesToDetect.begin(), kClassesToDetect.end(), classLabel) != kClassesToDetect.end();
if (confidentDetection && oneOfRequiredClasses)
{
const float xBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::xBottomLeft);
const float yBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::yBottomLeft);
const float xTopRight = rawDetections.at<float>(i, (int) OutputIndex::xTopRight);
const float yTopRight = rawDetections.at<float>(i, (int) OutputIndex::yTopRight);
const float width = xTopRight - xBottomLeft;
const float height = yTopRight - yBottomLeft;
return std::make_shared<Detection>(Detection{
/*boundingBox*/ nx::sdk::analytics::Rect(xBottomLeft, yBottomLeft, width, height),
classLabel,
confidence,
trackId
});
}
return nullptr;
}
DetectionList ObjectDetector::runImpl(const Frame& frame)
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detection error: object detector is terminated.");
}
const Mat image = frame.cvMat;
// MobileNet SSD parameters.
static const Size netInputImageSize(300, 300);
static constexpr double scaleFactor = 1.0 / 127.5;
static const Scalar mean(127.5, 127.5, 127.5);
static constexpr int kHeightIndex = 2;
static constexpr int kWidthIndex = 3;
const Mat netInputBlob = blobFromImage(image, scaleFactor, netInputImageSize, mean);
m_net->setInput(netInputBlob);
Mat rawDetections = m_net->forward();
const Mat detections(
/*_rows*/ rawDetections.size[kHeightIndex],
/*_cols*/ rawDetections.size[kWidthIndex],
/*_type*/ CV_32F,
/*_s*/ rawDetections.ptr<float>());
DetectionList result;
for (int i = 0; i < detections.rows; ++i)
{
const std::shared_ptr<Detection> detection = convertRawDetectionToDetection(
/*rawDetections*/ detections,
/*detectionIndex*/ i,
/*trackId*/ m_trackId);
if (detection)
{
result.push_back(detection);
return result;
}
}
return {};
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 5,755
|
C++
|
.cpp
| 165
| 29.193939
| 99
| 0.667266
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,406
|
plugin.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step3/src/sample_company/vms_server_plugins/opencv_object_detection/plugin.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "plugin.h"
#include "engine.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Result<IEngine*> Plugin::doObtainEngine()
{
const auto utilityProvider = this->utilityProvider();
const std::filesystem::path pluginHomeDir = utilityProvider->homeDir();
return new Engine(pluginHomeDir);
}
/**
* JSON with the particular structure. Note that it is possible to fill in the values that are not
* known at compile time.
*
* - id: Unique identifier for a plugin with format "{vendor_id}.{plugin_id}", where
* {vendor_id} is the unique identifier of the plugin creator (person or company name) and
* {plugin_id} is the unique (for a specific vendor) identifier of the plugin.
* - name: A human-readable short name of the plugin (displayed in the "Camera Settings" window
* of the Client).
* - description: Description of the plugin in a few sentences.
* - version: Version of the plugin.
* - vendor: Plugin creator (person or company) name.
*/
std::string Plugin::manifestString() const
{
return /*suppress newline*/ 1 + R"json(
{
"id": "sample.opencv_object_detection",
"name": "OpenCV object detection",
"description": ")json"
"This plugin is for object detection and tracking. It's based on OpenCV."
R"json(",
"version": "1.0.0",
"vendor": "Sample Inc."
}
)json";
}
/**
* Called by the Server to instantiate the Plugin object.
*
* The Server requires the function to have C linkage, which leads to no C++ name mangling in the
* export table of the plugin dynamic library, so that makes it possible to write plugins in any
* language and compiler.
*
* NX_PLUGIN_API is the macro defined by CMake scripts for exporting the function.
*/
extern "C" NX_PLUGIN_API nx::sdk::IPlugin* createNxPlugin()
{
// The object will be freed when the Server calls releaseRef().
return new Plugin();
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 2,190
|
C++
|
.cpp
| 58
| 35.241379
| 98
| 0.72646
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,407
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step3/src/sample_company/vms_server_plugins/opencv_object_detection/device_agent.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <exception>
#include <opencv2/core.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/event_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
#include <nx/sdk/helpers/string.h>
#include "detection.h"
#include "exceptions.h"
#include "frame.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
using namespace std::string_literals;
/**
* @param deviceInfo Various information about the related device, such as its id, vendor, model,
* etc.
*/
DeviceAgent::DeviceAgent(
const nx::sdk::IDeviceInfo* deviceInfo,
std::filesystem::path pluginHomeDir):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
ConsumingDeviceAgent(deviceInfo, /*enableOutput*/ true),
m_objectDetector(std::make_unique<ObjectDetector>(pluginHomeDir))
{
}
DeviceAgent::~DeviceAgent()
{
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the DeviceAgent settings.
*/
std::string DeviceAgent::manifestString() const
{
// Tell the Server that the plugin can generate the events and objects of certain types.
// Id values are strings and should be unique. Format of ids:
// `{vendor_id}.{plugin_id}.{event_type_id/object_type_id}`.
//
// See the plugin manifest for the explanation of vendor_id and plugin_id.
return /*suppress newline*/ 1 + R"json(
{
"eventTypes": [
{
"id": ")json" + kNewTrackEventType + R"json(",
"name": "New track started"
}
],
"supportedTypes": [
{
"objectTypeId": ")json" + kPersonObjectType + R"json("
},
{
"objectTypeId": ")json" + kCatObjectType + R"json("
},
{
"objectTypeId": ")json" + kDogObjectType + R"json("
}
]
}
)json";
}
/**
* Called when the Server sends a new uncompressed frame from a camera.
*/
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame)
{
m_terminated = m_terminated || m_objectDetector->isTerminated();
if (m_terminated)
{
if (!m_terminatedPrevious)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Plugin is in broken state.",
"Disable the plugin.");
m_terminatedPrevious = true;
}
return true;
}
m_lastVideoFrameTimestampUs = videoFrame->timestampUs();
// Detecting objects only on every `kDetectionFramePeriod` frame.
if (m_frameIndex % kDetectionFramePeriod == 0)
{
const MetadataPacketList metadataPackets = processFrame(videoFrame);
for (const Ptr<IMetadataPacket>& metadataPacket: metadataPackets)
{
metadataPacket->addRef();
pushMetadataPacket(metadataPacket.get());
}
}
++m_frameIndex;
NX_PRINT << "ZZZZ: ";
return true;
}
void DeviceAgent::doSetNeededMetadataTypes(
nx::sdk::Result<void>* outValue,
const nx::sdk::analytics::IMetadataTypes* /*neededMetadataTypes*/)
{
if (m_terminated)
return;
try
{
m_objectDetector->ensureInitialized();
}
catch (const ObjectDetectorInitializationError& e)
{
*outValue = {ErrorCode::otherError, new String(e.what())};
m_terminated = true;
}
catch (const ObjectDetectorIsTerminatedError& /*e*/)
{
m_terminated = true;
}
};
//-------------------------------------------------------------------------------------------------
// private
Ptr<IMetadataPacket> DeviceAgent::generateEventMetadataPacket()
{
// Generate event every kTrackFrameCount'th frame.
if (m_frameIndex % kTrackFrameCount != 0)
return nullptr;
// EventMetadataPacket contains arbitrary number of EventMetadata.
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
// Bind event metadata packet to the last video frame using a timestamp.
eventMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// Zero duration means that the event is not sustained, but momental.
eventMetadataPacket->setDurationUs(0);
// EventMetadata contains an information about event.
const auto eventMetadata = makePtr<EventMetadata>();
// Set all required fields.
eventMetadata->setTypeId(kNewTrackEventType);
eventMetadata->setIsActive(true);
eventMetadata->setCaption("New sample plugin track started");
eventMetadata->setDescription("New track #" + std::to_string(m_trackIndex) + " started");
eventMetadataPacket->addItem(eventMetadata.get());
// Generate index and track id for the next track.
++m_trackIndex;
m_trackId = nx::sdk::UuidHelper::randomUuid();
return eventMetadataPacket;
}
Ptr<ObjectMetadataPacket> DeviceAgent::detectionsToObjectMetadataPacket(
const DetectionList& detections,
int64_t timestampUs)
{
if (detections.empty())
return nullptr;
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
for (const std::shared_ptr<Detection>& detection: detections)
{
const auto objectMetadata = makePtr<ObjectMetadata>();
objectMetadata->setBoundingBox(detection->boundingBox);
objectMetadata->setConfidence(detection->confidence);
objectMetadata->setTrackId(detection->trackId);
// Convert class label to object metadata type id.
if (detection->classLabel == "person")
objectMetadata->setTypeId(kPersonObjectType);
else if (detection->classLabel == "cat")
objectMetadata->setTypeId(kCatObjectType);
else if (detection->classLabel == "dog")
objectMetadata->setTypeId(kDogObjectType);
// There is no "else", because only the detections with those types are generated.
objectMetadataPacket->addItem(objectMetadata.get());
}
objectMetadataPacket->setTimestampUs(timestampUs);
return objectMetadataPacket;
}
DeviceAgent::MetadataPacketList DeviceAgent::processFrame(
const IUncompressedVideoFrame* videoFrame)
{
const Frame frame(videoFrame, m_frameIndex);
try
{
DetectionList detections = m_objectDetector->run(frame);
const auto& objectMetadataPacket =
detectionsToObjectMetadataPacket(detections, frame.timestampUs);
MetadataPacketList result;
if (objectMetadataPacket)
result.push_back(objectMetadataPacket);
return result;
}
catch (const ObjectDetectionError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Object detection error.",
e.what());
m_terminated = true;
}
return {};
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 7,295
|
C++
|
.cpp
| 200
| 30.86
| 99
| 0.686402
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,408
|
detection.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step3/src/sample_company/vms_server_plugins/opencv_object_detection/detection.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "detection.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
// Class labels for the MobileNet SSD model (VOC dataset).
const std::vector<std::string> kClasses{
"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "dining table", "dog", "horse", "motorbike", "person", "potted plant",
"sheep", "sofa", "train", "tv monitor"
};
const std::vector<std::string> kClassesToDetect{"cat", "dog", "person"};
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 718
|
C++
|
.cpp
| 15
| 45.8
| 94
| 0.705293
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,409
|
engine.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step2/src/sample_company/vms_server_plugins/opencv_object_detection/engine.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "engine.h"
#include "device_agent.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Engine::Engine():
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
nx::sdk::analytics::Engine(/*enableOutput*/ true)
{
}
Engine::~Engine()
{
}
/**
* Called when the Server opens a video-connection to the camera if the plugin is enabled for this
* camera.
*
* @param outResult The pointer to the structure which needs to be filled with the resulting value
* or the error information.
* @param deviceInfo Contains various information about the related device such as its id, vendor,
* model, etc.
*/
void Engine::doObtainDeviceAgent(Result<IDeviceAgent*>* outResult, const IDeviceInfo* deviceInfo)
{
*outResult = new DeviceAgent(deviceInfo);
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the Engine settings.
*/
std::string Engine::manifestString() const
{
// Ask the Server to supply uncompressed video frames in YUV420 format (see
// https://en.wikipedia.org/wiki/YUV).
//
// Note that this format is used internally by the Server, therefore requires minimum
// resources for decoding, thus it is the recommended format.
return /*suppress newline*/ 1 + R"json(
{
"capabilities": "needUncompressedVideoFrames_yuv420"
}
)json";
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 1,754
|
C++
|
.cpp
| 49
| 33.571429
| 98
| 0.752358
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,410
|
plugin.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step2/src/sample_company/vms_server_plugins/opencv_object_detection/plugin.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "plugin.h"
#include "engine.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
Result<IEngine*> Plugin::doObtainEngine()
{
return new Engine();
}
/**
* JSON with the particular structure. Note that it is possible to fill in the values that are not
* known at compile time.
*
* - id: Unique identifier for a plugin with format "{vendor_id}.{plugin_id}", where
* {vendor_id} is the unique identifier of the plugin creator (person or company name) and
* {plugin_id} is the unique (for a specific vendor) identifier of the plugin.
* - name: A human-readable short name of the plugin (displayed in the "Camera Settings" window
* of the Client).
* - description: Description of the plugin in a few sentences.
* - version: Version of the plugin.
* - vendor: Plugin creator (person or company) name.
*/
std::string Plugin::manifestString() const
{
return /*suppress newline*/ 1 + R"json(
{
"id": "sample.opencv_object_detection",
"name": "OpenCV object detection",
"description": ")json"
"This plugin is for object detection and tracking. It's based on OpenCV."
R"json(",
"version": "1.0.0",
"vendor": "Sample Inc."
}
)json";
}
/**
* Called by the Server to instantiate the Plugin object.
*
* The Server requires the function to have C linkage, which leads to no C++ name mangling in the
* export table of the plugin dynamic library, so that makes it possible to write plugins in any
* language and compiler.
*
* NX_PLUGIN_API is the macro defined by CMake scripts for exporting the function.
*/
extern "C" NX_PLUGIN_API nx::sdk::IPlugin* createNxPlugin()
{
// The object will be freed when the Server calls releaseRef().
return new Plugin();
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 2,043
|
C++
|
.cpp
| 56
| 34.053571
| 98
| 0.722587
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,411
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step2/src/sample_company/vms_server_plugins/opencv_object_detection/device_agent.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/event_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
/**
* @param deviceInfo Various information about the related device, such as its id, vendor, model,
* etc.
*/
DeviceAgent::DeviceAgent(const nx::sdk::IDeviceInfo* deviceInfo):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
ConsumingDeviceAgent(deviceInfo, /*enableOutput*/ true)
{
}
DeviceAgent::~DeviceAgent()
{
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the DeviceAgent settings.
*/
std::string DeviceAgent::manifestString() const
{
// Tell the Server that the plugin can generate the events and objects of certain types.
// Id values are strings and should be unique. Format of ids:
// `{vendor_id}.{plugin_id}.{event_type_id/object_type_id}`.
//
// See the plugin manifest for the explanation of vendor_id and plugin_id.
return /*suppress newline*/ 1 + R"json(
{
"eventTypes": [
{
"id": ")json" + kNewTrackEventType + R"json(",
"name": "New track started"
}
],
"supportedTypes": [
{
"objectTypeId": ")json" + kPersonObjectType + R"json("
},
{
"objectTypeId": ")json" + kCatObjectType + R"json("
},
{
"objectTypeId": ")json" + kDogObjectType + R"json("
}
]
}
)json";
}
/**
* Called when the Server sends a new uncompressed frame from a camera.
*/
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame)
{
++m_frameIndex;
m_lastVideoFrameTimestampUs = videoFrame->timestampUs();
auto eventMetadataPacket = generateEventMetadataPacket();
if (eventMetadataPacket)
{
// Send generated metadata packet to the Server.
pushMetadataPacket(eventMetadataPacket.releasePtr());
}
return true; //< There were no errors while processing the video frame.
}
/**
* Serves the similar purpose as pushMetadataPacket(). The differences are:
* - pushMetadataPacket() is called by the plugin, while pullMetadataPackets() is called by Server.
* - pushMetadataPacket() expects one metadata packet, while pullMetadataPacket expects the
* std::vector of them.
*
* There are no strict rules for deciding which method is "better". A rule of thumb is to use
* pushMetadataPacket() when you generate one metadata packet and do not want to store it in the
* class field, and use pullMetadataPackets otherwise.
*/
bool DeviceAgent::pullMetadataPackets(std::vector<IMetadataPacket*>* metadataPackets)
{
metadataPackets->push_back(generateObjectMetadataPacket().releasePtr());
return true; //< There were no errors while filling metadataPackets.
}
void DeviceAgent::doSetNeededMetadataTypes(
nx::sdk::Result<void>* /*outValue*/,
const nx::sdk::analytics::IMetadataTypes* /*neededMetadataTypes*/)
{
}
//-------------------------------------------------------------------------------------------------
// private
Ptr<IMetadataPacket> DeviceAgent::generateEventMetadataPacket()
{
// Generate event every kTrackFrameCount'th frame.
if (m_frameIndex % kTrackFrameCount != 0)
return nullptr;
// EventMetadataPacket contains arbitrary number of EventMetadata.
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
// Bind event metadata packet to the last video frame using a timestamp.
eventMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// Zero duration means that the event is not sustained, but momental.
eventMetadataPacket->setDurationUs(0);
// EventMetadata contains an information about event.
const auto eventMetadata = makePtr<EventMetadata>();
// Set all required fields.
eventMetadata->setTypeId(kNewTrackEventType);
eventMetadata->setIsActive(true);
eventMetadata->setCaption("New sample plugin track started");
eventMetadata->setDescription("New track #" + std::to_string(m_trackIndex) + " started");
eventMetadataPacket->addItem(eventMetadata.get());
// Generate index and track id for the next track.
++m_trackIndex;
m_trackId = nx::sdk::UuidHelper::randomUuid();
return eventMetadataPacket;
}
Ptr<IMetadataPacket> DeviceAgent::generateObjectMetadataPacket()
{
// ObjectMetadataPacket contains arbitrary number of ObjectMetadata.
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
// Bind the object metadata to the last video frame using a timestamp.
objectMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// ObjectMetadata contains information about an object on the frame.
const auto objectMetadata = makePtr<ObjectMetadata>();
// Set all required fields.
objectMetadata->setTypeId(kPersonObjectType);
objectMetadata->setTrackId(m_trackId);
// Calculate bounding box coordinates each frame so that it moves from the top left corner
// to the bottom right corner during kTrackFrameCount frames.
static constexpr float d = 0.5F / kTrackFrameCount;
static constexpr float width = 0.5F;
static constexpr float height = 0.5F;
const int frameIndexInsideTrack = m_frameIndex % kTrackFrameCount;
const float x = d * frameIndexInsideTrack;
const float y = d * frameIndexInsideTrack;
objectMetadata->setBoundingBox(Rect(x, y, width, height));
objectMetadataPacket->addItem(objectMetadata.get());
return objectMetadataPacket;
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 6,126
|
C++
|
.cpp
| 144
| 38.451389
| 99
| 0.726067
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,412
|
object_detector.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step5/src/sample_company/vms_server_plugins/opencv_object_detection/object_detector.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_detector.h"
#include <opencv2/core.hpp>
#include "exceptions.h"
#include "frame.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::dnn;
ObjectDetector::ObjectDetector(std::filesystem::path modelPath):
m_modelPath(std::move(modelPath))
{
}
/**
* Load the model if it is not loaded, do nothing otherwise. In case of errors terminate the
* plugin and throw a specialized exception.
*/
void ObjectDetector::ensureInitialized()
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detector initialization error: object detector is terminated.");
}
if (m_netLoaded)
return;
try
{
loadModel();
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: " + cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: Error: "s + e.what());
}
}
bool ObjectDetector::isTerminated() const
{
return m_terminated;
}
void ObjectDetector::terminate()
{
m_terminated = true;
}
DetectionList ObjectDetector::run(const Frame& frame)
{
if (isTerminated())
throw ObjectDetectorIsTerminatedError("Detection error: object detector is terminated.");
try
{
return runImpl(frame);
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectionError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectionError("Error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
void ObjectDetector::loadModel()
{
// Prepare paths of model weights and definition.
static const auto modelBin = m_modelPath /
std::filesystem::path("MobileNetSSD.caffemodel");
static const auto modelTxt = m_modelPath /
std::filesystem::path("MobileNetSSD.prototxt");
// Load the model for future processing using OpenCV.
m_net = std::make_unique<Net>(
readNetFromCaffe(modelTxt.string(), modelBin.string()));
// Save the whether the net is loaded or not to prevent unnecessary load.
m_netLoaded = !m_net->empty();
if (!m_netLoaded)
throw ObjectDetectorInitializationError("Loading model: network is empty.");
}
std::shared_ptr<Detection> convertRawDetectionToDetection(
const Mat& rawDetections,
int detectionIndex)
{
enum class OutputIndex
{
classIndex = 1,
confidence = 2,
xBottomLeft = 3,
yBottomLeft = 4,
xTopRight = 5,
yTopRight = 6,
};
static constexpr float confidenceThreshold = 0.5F; //< Chosen arbitrarily.
const int& i = detectionIndex;
const float confidence = rawDetections.at<float>(i, (int) OutputIndex::confidence);
const auto classIndex = (int) (rawDetections.at<float>(i, (int) OutputIndex::classIndex));
const std::string classLabel = kClasses[(size_t) classIndex];
const bool confidentDetection = confidence >= confidenceThreshold;
const bool oneOfRequiredClasses = std::find(
kClassesToDetect.begin(), kClassesToDetect.end(), classLabel) != kClassesToDetect.end();
if (confidentDetection && oneOfRequiredClasses)
{
const float xBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::xBottomLeft);
const float yBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::yBottomLeft);
const float xTopRight = rawDetections.at<float>(i, (int) OutputIndex::xTopRight);
const float yTopRight = rawDetections.at<float>(i, (int) OutputIndex::yTopRight);
const float width = xTopRight - xBottomLeft;
const float height = yTopRight - yBottomLeft;
return std::make_shared<Detection>(Detection{
/*boundingBox*/ nx::sdk::analytics::Rect(xBottomLeft, yBottomLeft, width, height),
classLabel,
confidence,
/*trackId*/ nx::sdk::Uuid() //< Will be filled with real value in ObjectTracker.
});
}
return nullptr;
}
DetectionList ObjectDetector::runImpl(const Frame& frame)
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detection error: object detector is terminated.");
}
const Mat image = frame.cvMat;
// MobileNet SSD parameters.
static const Size netInputImageSize(300, 300);
static constexpr double scaleFactor = 1.0 / 127.5;
static const Scalar mean(127.5, 127.5, 127.5);
static constexpr int kHeightIndex = 2;
static constexpr int kWidthIndex = 3;
const Mat netInputBlob = blobFromImage(image, scaleFactor, netInputImageSize, mean);
m_net->setInput(netInputBlob);
Mat rawDetections = m_net->forward();
const Mat detections(
/*_rows*/ rawDetections.size[kHeightIndex],
/*_cols*/ rawDetections.size[kWidthIndex],
/*_type*/ CV_32F,
/*_s*/ rawDetections.ptr<float>());
DetectionList result;
for (int i = 0; i < detections.rows; ++i)
{
const std::shared_ptr<Detection> detection = convertRawDetectionToDetection(detections, i);
if (detection)
result.push_back(detection);
}
return result;
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 5,680
|
C++
|
.cpp
| 159
| 30.289308
| 99
| 0.675287
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,413
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step5/src/sample_company/vms_server_plugins/opencv_object_detection/device_agent.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <exception>
#include <cctype>
#include <opencv2/core.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/event_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
#include <nx/sdk/helpers/string.h>
#include "detection.h"
#include "exceptions.h"
#include "frame.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
using namespace std::string_literals;
/**
* @param deviceInfo Various information about the related device, such as its id, vendor, model,
* etc.
*/
DeviceAgent::DeviceAgent(
const nx::sdk::IDeviceInfo* deviceInfo,
std::filesystem::path pluginHomeDir):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
ConsumingDeviceAgent(deviceInfo, /*enableOutput*/ true),
m_objectDetector(std::make_unique<ObjectDetector>(pluginHomeDir)),
m_objectTracker(std::make_unique<ObjectTracker>())
{
}
DeviceAgent::~DeviceAgent()
{
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the DeviceAgent settings.
*/
std::string DeviceAgent::manifestString() const
{
// Tell the Server that the plugin can generate the events and objects of certain types.
// Id values are strings and should be unique. Format of ids:
// `{vendor_id}.{plugin_id}.{event_type_id/object_type_id}`.
//
// See the plugin manifest for the explanation of vendor_id and plugin_id.
return /*suppress newline*/ 1 + R"json(
{
"eventTypes": [
{
"id": ")json" + kDetectionEventType + R"json(",
"name": "Object detected"
},
{
"id": ")json" + kProlongedDetectionEventType + R"json(",
"name": "Object detected (prolonged)",
"flags": "stateDependent"
}
],
"supportedTypes": [
{
"objectTypeId": ")json" + kPersonObjectType + R"json("
},
{
"objectTypeId": ")json" + kCatObjectType + R"json("
},
{
"objectTypeId": ")json" + kDogObjectType + R"json("
}
]
}
)json";
}
/**
* Called when the Server sends a new uncompressed frame from a camera.
*/
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame)
{
m_terminated = m_terminated || m_objectDetector->isTerminated();
if (m_terminated)
{
if (!m_terminatedPrevious)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Plugin is in broken state.",
"Disable the plugin.");
m_terminatedPrevious = true;
}
return true;
}
// Detecting objects only on every `kDetectionFramePeriod` frame.
if (m_frameIndex % kDetectionFramePeriod == 0)
{
const MetadataPacketList metadataPackets = processFrame(videoFrame);
for (const Ptr<IMetadataPacket>& metadataPacket: metadataPackets)
{
metadataPacket->addRef();
pushMetadataPacket(metadataPacket.get());
}
}
++m_frameIndex;
return true;
}
void DeviceAgent::doSetNeededMetadataTypes(
nx::sdk::Result<void>* outValue,
const nx::sdk::analytics::IMetadataTypes* /*neededMetadataTypes*/)
{
if (m_terminated)
return;
try
{
m_objectDetector->ensureInitialized();
}
catch (const ObjectDetectorInitializationError& e)
{
*outValue = {ErrorCode::otherError, new String(e.what())};
m_terminated = true;
}
catch (const ObjectDetectorIsTerminatedError& /*e*/)
{
m_terminated = true;
}
};
//-------------------------------------------------------------------------------------------------
// private
DeviceAgent::MetadataPacketList DeviceAgent::eventsToEventMetadataPacketList(
const EventList& events,
int64_t timestampUs)
{
if (events.empty())
return {};
MetadataPacketList result;
const auto objectDetectedEventMetadataPacket = makePtr<EventMetadataPacket>();
for (const std::shared_ptr<Event>& event: events)
{
const auto eventMetadata = makePtr<EventMetadata>();
if (event->eventType == EventType::detection_started ||
event->eventType == EventType::detection_finished)
{
static const std::string kStartedSuffix = " STARTED";
static const std::string kFinishedSuffix = " FINISHED";
const std::string suffix = (event->eventType == EventType::detection_started) ?
kStartedSuffix : kFinishedSuffix;
const std::string caption = kClassesToDetectPluralCapitalized.at(event->classLabel) +
" detection" + suffix;
const std::string description = caption;
eventMetadata->setCaption(caption);
eventMetadata->setDescription(description);
eventMetadata->setIsActive(event->eventType == EventType::detection_started);
eventMetadata->setTypeId(kProlongedDetectionEventType);
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
eventMetadataPacket->addItem(eventMetadata.get());
eventMetadataPacket->setTimestampUs(event->timestampUs);
result.push_back(eventMetadataPacket);
}
else if (event->eventType == EventType::object_detected)
{
std::string caption = event->classLabel + kDetectionEventCaptionSuffix;
caption[0] = (char) toupper(caption[0]);
std::string description = event->classLabel + kDetectionEventDescriptionSuffix;
description[0] = (char) toupper(description[0]);
eventMetadata->setCaption(caption);
eventMetadata->setDescription(description);
eventMetadata->setIsActive(true);
eventMetadata->setTypeId(kDetectionEventType);
objectDetectedEventMetadataPacket->addItem(eventMetadata.get());
}
}
objectDetectedEventMetadataPacket->setTimestampUs(timestampUs);
result.push_back(objectDetectedEventMetadataPacket);
return result;
}
Ptr<ObjectMetadataPacket> DeviceAgent::detectionsToObjectMetadataPacket(
const DetectionList& detections,
int64_t timestampUs)
{
if (detections.empty())
return nullptr;
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
for (const std::shared_ptr<Detection>& detection: detections)
{
const auto objectMetadata = makePtr<ObjectMetadata>();
objectMetadata->setBoundingBox(detection->boundingBox);
objectMetadata->setConfidence(detection->confidence);
objectMetadata->setTrackId(detection->trackId);
// Convert class label to object metadata type id.
if (detection->classLabel == "person")
objectMetadata->setTypeId(kPersonObjectType);
else if (detection->classLabel == "cat")
objectMetadata->setTypeId(kCatObjectType);
else if (detection->classLabel == "dog")
objectMetadata->setTypeId(kDogObjectType);
// There is no "else", because only the detections with those types are generated.
objectMetadataPacket->addItem(objectMetadata.get());
}
objectMetadataPacket->setTimestampUs(timestampUs);
return objectMetadataPacket;
}
void DeviceAgent::reinitializeObjectTrackerOnFrameSizeChanges(const Frame& frame)
{
const bool frameSizeUnset = m_previousFrameWidth == 0 && m_previousFrameHeight == 0;
if (frameSizeUnset)
{
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
return;
}
const bool frameSizeChanged = frame.width != m_previousFrameWidth ||
frame.height != m_previousFrameHeight;
if (frameSizeChanged)
{
m_objectTracker = std::make_unique<ObjectTracker>();
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
}
}
DeviceAgent::MetadataPacketList DeviceAgent::processFrame(
const IUncompressedVideoFrame* videoFrame)
{
const Frame frame(videoFrame, m_frameIndex);
reinitializeObjectTrackerOnFrameSizeChanges(frame);
try
{
DetectionList detections = m_objectDetector->run(frame);
ObjectTracker::Result objectTrackerResult = m_objectTracker->run(frame, detections);
const auto& objectMetadataPacket =
detectionsToObjectMetadataPacket(objectTrackerResult.detections, frame.timestampUs);
const auto& eventMetadataPacketList = eventsToEventMetadataPacketList(
objectTrackerResult.events, frame.timestampUs);
MetadataPacketList result;
if (objectMetadataPacket)
result.push_back(objectMetadataPacket);
result.insert(
result.end(),
std::make_move_iterator(eventMetadataPacketList.begin()),
std::make_move_iterator(eventMetadataPacketList.end()));
return result;
}
catch (const ObjectDetectionError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Object detection error.",
e.what());
m_terminated = true;
}
catch (const ObjectTrackingError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Object tracking error.",
e.what());
m_terminated = true;
}
return {};
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 9,974
|
C++
|
.cpp
| 262
| 31.206107
| 99
| 0.672838
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,414
|
detection.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step5/src/sample_company/vms_server_plugins/opencv_object_detection/detection.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "detection.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
// Class labels for the MobileNet SSD model (VOC dataset).
const std::vector<std::string> kClasses{
"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
"chair", "cow", "dining table", "dog", "horse", "motorbike", "person", "potted plant",
"sheep", "sofa", "train", "tv monitor"
};
const std::vector<std::string> kClassesToDetect{"cat", "dog", "person"};
const std::map<std::string, std::string> kClassesToDetectPluralCapitalized{
{"cat", "Cats"}, {"dog", "Dogs"}, {"person", "People"}};
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 855
|
C++
|
.cpp
| 17
| 48.117647
| 94
| 0.696643
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,415
|
object_tracker.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step5/src/sample_company/vms_server_plugins/opencv_object_detection/object_tracker.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_tracker.h"
#include <opencv2/core/core.hpp>
#include "exceptions.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::detail::tracking::tbm;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
/**
* This function implementation is based on the sample from opencv_contrib repository:
* https://github.com/opencv/opencv_contrib/blob/0a2179b328/modules/tracking/samples/tracking_by_matching.cpp
*/
cv::Ptr<ITrackerByMatching> createTrackerByMatchingWithFastDescriptor()
{
TrackerParams params;
// Real forget delay is `params.forget_delay * kDetectionFramePeriod`.
params.forget_delay = 75;
// Keep forgotten tracks for cleaning up our tracks and dropping cv::detail::tracking::tbm tracks manually.
params.drop_forgotten_tracks = false;
cv::Ptr<ITrackerByMatching> tracker = createTrackerByMatching(params);
static const Size kDescriptorFastSize(16, 32);
std::shared_ptr<IImageDescriptor> descriptorFast =
std::make_shared<ResizedImageDescriptor>(
kDescriptorFastSize, InterpolationFlags::INTER_LINEAR);
std::shared_ptr<IDescriptorDistance> distanceFast =
std::make_shared<MatchTemplateDistance>();
tracker->setDescriptorFast(descriptorFast);
tracker->setDistanceFast(distanceFast);
return tracker;
}
class ObjectTracker::Track
{
public:
enum class Status { inactive, started, active, finished };
public:
explicit Track(int maxDetectionCount): m_maxDetectionCount(maxDetectionCount) {}
void addDetection(
int64_t timestampUs,
const std::shared_ptr<const Detection>& detection,
bool isTrackStarted = true)
{
if (m_detections.size() == (size_t) m_maxDetectionCount)
m_detections.erase(m_detections.begin());
if (isTrackStarted)
{
if (m_status == Status::inactive)
m_status = Status::started;
else if (m_status == Status::started)
m_status = Status::active;
}
m_detections.insert(std::make_pair(timestampUs, detection));
}
Status status() const { return m_status; }
int64_t startTimeUs() const { return m_detections.begin()->first; }
std::string classLabel() const { return m_detections.begin()->second->classLabel; };
private:
std::map<
/*timestampUs*/ int64_t,
/*detection*/ std::shared_ptr<const Detection>
> m_detections;
Status m_status = Status::inactive;
int m_maxDetectionCount;
};
//-------------------------------------------------------------------------------------------------
// public
ObjectTracker::ObjectTracker():
m_tracker(createTrackerByMatchingWithFastDescriptor())
{
for (const std::string& classLabel: kClassesToDetect)
m_detectionActive[classLabel] = false;
}
ObjectTracker::Result ObjectTracker::run(const Frame& frame, const DetectionList& detections)
{
try
{
return runImpl(frame, detections);
}
catch (const cv::Exception& e)
{
throw ObjectTrackingError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
throw ObjectTrackingError("Error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
EventList ObjectTracker::generateDetectionFinishedEvents(int64_t timestampUs)
{
EventList result;
for (const std::string& classLabel: kClassesToDetect)
{
if (m_detectionActive[classLabel])
{
bool noActiveTracks = true;
for (const auto& pair: m_tracks)
{
const std::shared_ptr<Track> track = pair.second;
if (track->classLabel() == "person")
{
noActiveTracks = false;
break;
}
}
if (noActiveTracks)
{
result.push_back(std::make_shared<Event>(Event{
/*eventType*/ EventType::detection_finished,
/*timestampUs*/ timestampUs,
/*classLabel*/ classLabel,
}));
m_detectionActive[classLabel] = false;
}
}
}
return result;
}
void ObjectTracker::copyDetectionsHistoryToTrack(
const Frame& frame,
int64_t cvTrackId,
Track* track,
const std::string& classLabel) const
{
const cv::detail::tracking::tbm::Track& cvTrack = m_tracker->tracks().at((size_t) cvTrackId);
for (const TrackedObject& trackedDetection: cvTrack.objects)
{
if ((int64_t) trackedDetection.timestamp == frame.timestampUs)
break;
std::shared_ptr<const DetectionInternal> detection = convertTrackedObjectToDetection(
/*frame*/ frame,
/*trackedDetection*/ trackedDetection,
/*classLabel*/ classLabel,
/*idMapper*/ m_idMapper.get());
track->addDetection(
/*timestampUs*/ (int64_t) trackedDetection.timestamp,
/*detection*/ detection->detection,
/*isTrackStarted*/ false);
}
}
std::shared_ptr<ObjectTracker::Track> ObjectTracker::getOrCreateTrack(const Uuid& trackId)
{
std::shared_ptr<Track> result;
if (m_tracks.find(trackId) == m_tracks.end())
{
result = std::make_shared<Track>(m_tracker->params().max_num_objects_in_track);
m_tracks.insert(std::make_pair(trackId, result));
}
else
{
result = m_tracks[trackId];
}
return result;
}
EventList ObjectTracker::processDetection(
const Frame& frame,
const std::shared_ptr<DetectionInternal>& detection)
{
EventList events;
const int64_t cvTrackId = detection->cvTrackId;
std::shared_ptr<Track> track = getOrCreateTrack(m_idMapper->get(cvTrackId));
const bool isTrackStarted = m_tracker->isTrackValid((size_t) cvTrackId);
track->addDetection(
/*timestampUs*/ frame.timestampUs,
/*detection*/ detection->detection,
/*isTrackStarted*/ isTrackStarted);
const Track::Status& trackStatus = track->status();
if (trackStatus == Track::Status::started)
{
const std::string classLabel = detection->detection->classLabel;
copyDetectionsHistoryToTrack(
/*frame*/ frame,
/*cvTrackId*/ cvTrackId,
/*track*/ track.get(),
/*classLabel*/ classLabel);
events.push_back(std::make_shared<Event>(Event{
/*eventType*/ EventType::object_detected,
/*timestampUs*/ track->startTimeUs(),
/*classLabel*/ classLabel,
}));
}
const std::string& classLabel = detection->detection->classLabel;
if ((trackStatus == Track::Status::started || trackStatus == Track::Status::active) &&
std::find(kClassesToDetect.begin(), kClassesToDetect.end(), classLabel) !=
kClassesToDetect.end())
{
if (!m_detectionActive[classLabel])
{
events.push_back(std::make_shared<Event>(Event{
/*eventType*/ EventType::detection_started,
/*timestampUs*/ track->startTimeUs(),
/*classLabel*/ classLabel,
}));
m_detectionActive[classLabel] = true;
}
}
return events;
}
EventList ObjectTracker::generateEvents(
const Frame& frame,
const DetectionInternalList& detectionsInternal)
{
EventList result;
for (const std::shared_ptr<DetectionInternal>& detection: detectionsInternal)
{
EventList events = processDetection(
/*frame*/ frame,
/*detection*/ detection);
result.insert(
result.end(),
std::make_move_iterator(events.begin()),
std::make_move_iterator(events.end()));
}
cleanup();
const EventList detectionFinishedEvents = generateDetectionFinishedEvents(frame.timestampUs);
result.insert(
result.end(),
std::make_move_iterator(detectionFinishedEvents.begin()),
std::make_move_iterator(detectionFinishedEvents.end()));
return result;
}
ObjectTracker::Result ObjectTracker::runImpl(
const Frame& frame,
const DetectionList& detections)
{
// Unfortunately the OpenCV tbm module does not support preserving classLabel during tracking.
// See issue: https://github.com/opencv/opencv_contrib/issues/2298
// Therefore, we save information about classLabels in the map from unique id of the detection
// (bounding box + timestamp) to classLabel.
std::map<const CompositeDetectionId, std::string> classLabels;
TrackedObjects detectionsToTrack = convertDetectionsToTrackedObjects(
/*frame*/ frame,
/*detections*/ detections,
/*classLabels*/ &classLabels);
// Perform tracking and extract tracked detections.
m_tracker->process(frame.cvMat, detectionsToTrack, (uint64_t) frame.timestampUs);
const TrackedObjects trackedDetections = m_tracker->trackedDetections();
DetectionInternalList detectionsInternal =
convertTrackedObjectsToDetections(
/*frame*/ frame,
/*trackedDetections*/ trackedDetections,
/*classLabels*/ classLabels,
/*idMapper*/ m_idMapper.get());
EventList events = generateEvents(
/*frame*/ frame,
/*detectionsInternal*/ detectionsInternal);
return {
/*detections*/ extractDetectionList(detectionsInternal),
/*events*/ std::move(events),
};
}
/**
* Cleanup ids of the objects that belong to the forgotten tracks.
*/
void ObjectTracker::cleanupIds()
{
std::set<Uuid> validIds;
for (const auto& pair: m_tracks)
validIds.insert(pair.first);
m_idMapper->removeAllExcept(validIds);
}
void ObjectTracker::cleanupTracks()
{
for (const auto& pair: m_tracker->tracks())
{
auto cvTrackId = (int64_t) pair.first;
if (m_tracker->isTrackForgotten((size_t) cvTrackId))
{
const cv::detail::tracking::tbm::Track& cvTrack = pair.second;
const Uuid& trackId = m_idMapper->get(cvTrack.first_object.object_id);
m_tracks.erase(trackId);
}
}
}
void ObjectTracker::cleanup()
{
cleanupTracks();
m_tracker->dropForgottenTracks();
cleanupIds();
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 10,605
|
C++
|
.cpp
| 287
| 30.069686
| 111
| 0.650268
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,416
|
object_tracker_utils.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step5/src/sample_company/vms_server_plugins/opencv_object_detection/object_tracker_utils.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_tracker_utils.h"
#include "geometry.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace cv;
using namespace cv::detail::tracking::tbm;
using namespace nx::sdk;
Uuid IdMapper::get(int64_t id)
{
const auto it = m_map.find(id);
if (it == m_map.end())
{
Uuid result = UuidHelper::randomUuid();
m_map[id] = result;
return result;
}
return it->second;
}
void IdMapper::removeAllExcept(const std::set<Uuid>& idsToKeep)
{
for (auto it = m_map.begin(); it != m_map.end(); )
{
if (idsToKeep.find(it->second) == idsToKeep.end())
it = m_map.erase(it);
else
++it;
}
}
/**
* Convert detections from the plugin format to the format of opencv::detail::tracking::tbm, preserving classLabels.
*/
TrackedObjects convertDetectionsToTrackedObjects(
const Frame& frame,
const DetectionList& detections,
ClassLabelMap* inOutClassLabels)
{
TrackedObjects result;
for (const std::shared_ptr<Detection>& detection: detections)
{
const cv::Rect cvRect = nxRectToCvRect(
detection->boundingBox,
frame.width,
frame.height);
inOutClassLabels->insert(std::make_pair(CompositeDetectionId{
frame.index,
cvRect},
detection->classLabel));
result.push_back(TrackedObject(
cvRect,
detection->confidence,
(int) frame.index,
/*object_id*/ -1)); //< Placeholder, to be filled in ObjectTracker::process().
}
return result;
}
/**
* Convert detection from tbm format to our format, restoring the classLabels.
*/
std::shared_ptr<DetectionInternal> convertTrackedObjectToDetection(
const Frame& frame,
const TrackedObject& trackedDetection,
const std::string& classLabel,
IdMapper* idMapper)
{
auto detection = std::make_shared<Detection>(Detection{
/*boundingBox*/ cvRectToNxRect(trackedDetection.rect, frame.width, frame.height),
classLabel,
(float) trackedDetection.confidence,
/*trackId*/ idMapper->get(trackedDetection.object_id)});
return std::make_shared<DetectionInternal>(DetectionInternal{
detection,
trackedDetection.object_id,
});
}
/**
* Convert detections from opencv::detail::tracking::tbm format to the plugin format, restoring classLabels.
*/
DetectionInternalList convertTrackedObjectsToDetections(
const Frame& frame,
const TrackedObjects& trackedDetections,
const ClassLabelMap& classLabels,
IdMapper* idMapper)
{
DetectionInternalList result;
for (const cv::detail::tracking::tbm::TrackedObject& trackedDetection: trackedDetections)
{
const std::string classLabel = classLabels.at({
frame.index,
trackedDetection.rect});
result.push_back(convertTrackedObjectToDetection(
frame,
trackedDetection,
classLabel,
idMapper));
}
return result;
}
DetectionList extractDetectionList(const DetectionInternalList& detectionsInternal)
{
DetectionList result;
for (const std::shared_ptr<DetectionInternal>& detection: detectionsInternal)
result.push_back(detection->detection);
return result;
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 3,549
|
C++
|
.cpp
| 109
| 26.834862
| 116
| 0.687701
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,418
|
object_detector.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step4/src/sample_company/vms_server_plugins/opencv_object_detection/object_detector.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_detector.h"
#include <opencv2/core.hpp>
#include "exceptions.h"
#include "frame.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::dnn;
ObjectDetector::ObjectDetector(std::filesystem::path modelPath):
m_modelPath(std::move(modelPath))
{
}
/**
* Load the model if it is not loaded, do nothing otherwise. In case of errors terminate the
* plugin and throw a specialized exception.
*/
void ObjectDetector::ensureInitialized()
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detector initialization error: object detector is terminated.");
}
if (m_netLoaded)
return;
try
{
loadModel();
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: " + cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectorInitializationError("Loading model: Error: "s + e.what());
}
}
bool ObjectDetector::isTerminated() const
{
return m_terminated;
}
void ObjectDetector::terminate()
{
m_terminated = true;
}
DetectionList ObjectDetector::run(const Frame& frame)
{
if (isTerminated())
throw ObjectDetectorIsTerminatedError("Detection error: object detector is terminated.");
try
{
return runImpl(frame);
}
catch (const cv::Exception& e)
{
terminate();
throw ObjectDetectionError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
terminate();
throw ObjectDetectionError("Error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
void ObjectDetector::loadModel()
{
// Prepare paths of model weights and definition.
static const auto modelBin = m_modelPath /
std::filesystem::path("MobileNetSSD.caffemodel");
static const auto modelTxt = m_modelPath /
std::filesystem::path("MobileNetSSD.prototxt");
// Load the model for future processing using OpenCV.
m_net = std::make_unique<Net>(
readNetFromCaffe(modelTxt.string(), modelBin.string()));
// Save the whether the net is loaded or not to prevent unnecessary load.
m_netLoaded = !m_net->empty();
if (!m_netLoaded)
throw ObjectDetectorInitializationError("Loading model: network is empty.");
}
std::shared_ptr<Detection> convertRawDetectionToDetection(
const Mat& rawDetections,
int detectionIndex)
{
enum class OutputIndex
{
classIndex = 1,
confidence = 2,
xBottomLeft = 3,
yBottomLeft = 4,
xTopRight = 5,
yTopRight = 6,
};
static constexpr float confidenceThreshold = 0.5F; //< Chosen arbitrarily.
const int& i = detectionIndex;
const float confidence = rawDetections.at<float>(i, (int) OutputIndex::confidence);
const auto classIndex = (int) (rawDetections.at<float>(i, (int) OutputIndex::classIndex));
const std::string classLabel = kClasses[(size_t) classIndex];
const bool confidentDetection = confidence >= confidenceThreshold;
bool oneOfRequiredClasses = std::find(
kClassesToDetect.begin(), kClassesToDetect.end(), classLabel) != kClassesToDetect.end();
if (confidentDetection && oneOfRequiredClasses)
{
const float xBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::xBottomLeft);
const float yBottomLeft = rawDetections.at<float>(i, (int) OutputIndex::yBottomLeft);
const float xTopRight = rawDetections.at<float>(i, (int) OutputIndex::xTopRight);
const float yTopRight = rawDetections.at<float>(i, (int) OutputIndex::yTopRight);
const float width = xTopRight - xBottomLeft;
const float height = yTopRight - yBottomLeft;
return std::make_shared<Detection>(Detection{
/*boundingBox*/ nx::sdk::analytics::Rect(xBottomLeft, yBottomLeft, width, height),
classLabel,
confidence,
/*trackId*/ nx::sdk::Uuid() //< Will be filled with real value in ObjectTracker.
});
}
return nullptr;
}
DetectionList ObjectDetector::runImpl(const Frame& frame)
{
if (isTerminated())
{
throw ObjectDetectorIsTerminatedError(
"Object detection error: object detector is terminated.");
}
const Mat image = frame.cvMat;
// MobileNet SSD parameters.
static const Size netInputImageSize(300, 300);
static constexpr double scaleFactor = 1.0 / 127.5;
static const Scalar mean(127.5, 127.5, 127.5);
static constexpr int kHeightIndex = 2;
static constexpr int kWidthIndex = 3;
const Mat netInputBlob = blobFromImage(image, scaleFactor, netInputImageSize, mean);
m_net->setInput(netInputBlob);
Mat rawDetections = m_net->forward();
const Mat detections(
/*_rows*/ rawDetections.size[kHeightIndex],
/*_cols*/ rawDetections.size[kWidthIndex],
/*_type*/ CV_32F,
/*_s*/ rawDetections.ptr<float>());
DetectionList result;
for (int i = 0; i < detections.rows; ++i)
{
const std::shared_ptr<Detection> detection = convertRawDetectionToDetection(detections, i);
if (detection)
result.push_back(detection);
}
return result;
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 5,674
|
C++
|
.cpp
| 159
| 30.251572
| 99
| 0.675114
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,419
|
device_agent.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step4/src/sample_company/vms_server_plugins/opencv_object_detection/device_agent.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "device_agent.h"
#include <chrono>
#include <exception>
#include <opencv2/core.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <nx/sdk/analytics/helpers/event_metadata.h>
#include <nx/sdk/analytics/helpers/event_metadata_packet.h>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/helpers/object_metadata_packet.h>
#include <nx/sdk/helpers/string.h>
#include "detection.h"
#include "exceptions.h"
#include "frame.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace nx::sdk;
using namespace nx::sdk::analytics;
using namespace std::string_literals;
/**
* @param deviceInfo Various information about the related device, such as its id, vendor, model,
* etc.
*/
DeviceAgent::DeviceAgent(
const nx::sdk::IDeviceInfo* deviceInfo,
std::filesystem::path pluginHomeDir):
// Call the DeviceAgent helper class constructor telling it to verbosely report to stderr.
ConsumingDeviceAgent(deviceInfo, /*enableOutput*/ true),
m_objectDetector(std::make_unique<ObjectDetector>(pluginHomeDir)),
m_objectTracker(std::make_unique<ObjectTracker>())
{
}
DeviceAgent::~DeviceAgent()
{
}
/**
* @return JSON with the particular structure. Note that it is possible to fill in the values
* that are not known at compile time, but should not depend on the DeviceAgent settings.
*/
std::string DeviceAgent::manifestString() const
{
// Tell the Server that the plugin can generate the events and objects of certain types.
// Id values are strings and should be unique. Format of ids:
// `{vendor_id}.{plugin_id}.{event_type_id/object_type_id}`.
//
// See the plugin manifest for the explanation of vendor_id and plugin_id.
return /*suppress newline*/ 1 + R"json(
{
"eventTypes": [
{
"id": ")json" + kNewTrackEventType + R"json(",
"name": "New track started"
}
],
"supportedTypes": [
{
"objectTypeId": ")json" + kPersonObjectType + R"json("
},
{
"objectTypeId": ")json" + kCatObjectType + R"json("
},
{
"objectTypeId": ")json" + kDogObjectType + R"json("
}
]
}
)json";
}
/**
* Called when the Server sends a new uncompressed frame from a camera.
*/
bool DeviceAgent::pushUncompressedVideoFrame(const IUncompressedVideoFrame* videoFrame)
{
m_terminated = m_terminated || m_objectDetector->isTerminated();
if (m_terminated)
{
if (!m_terminatedPrevious)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Plugin is in broken state.",
"Disable the plugin.");
m_terminatedPrevious = true;
}
return true;
}
m_lastVideoFrameTimestampUs = videoFrame->timestampUs();
// Detecting objects only on every `kDetectionFramePeriod` frame.
if (m_frameIndex % kDetectionFramePeriod == 0)
{
const MetadataPacketList metadataPackets = processFrame(videoFrame);
for (const Ptr<IMetadataPacket>& metadataPacket: metadataPackets)
{
metadataPacket->addRef();
pushMetadataPacket(metadataPacket.get());
}
}
++m_frameIndex;
return true;
}
void DeviceAgent::doSetNeededMetadataTypes(
nx::sdk::Result<void>* outValue,
const nx::sdk::analytics::IMetadataTypes* /*neededMetadataTypes*/)
{
if (m_terminated)
return;
try
{
m_objectDetector->ensureInitialized();
}
catch (const ObjectDetectorInitializationError& e)
{
*outValue = {ErrorCode::otherError, new String(e.what())};
m_terminated = true;
}
catch (const ObjectDetectorIsTerminatedError& /*e*/)
{
m_terminated = true;
}
};
//-------------------------------------------------------------------------------------------------
// private
Ptr<IMetadataPacket> DeviceAgent::generateEventMetadataPacket()
{
// Generate event every kTrackFrameCount'th frame.
if (m_frameIndex % kTrackFrameCount != 0)
return nullptr;
// EventMetadataPacket contains arbitrary number of EventMetadata.
const auto eventMetadataPacket = makePtr<EventMetadataPacket>();
// Bind event metadata packet to the last video frame using a timestamp.
eventMetadataPacket->setTimestampUs(m_lastVideoFrameTimestampUs);
// Zero duration means that the event is not sustained, but momental.
eventMetadataPacket->setDurationUs(0);
// EventMetadata contains an information about event.
const auto eventMetadata = makePtr<EventMetadata>();
// Set all required fields.
eventMetadata->setTypeId(kNewTrackEventType);
eventMetadata->setIsActive(true);
eventMetadata->setCaption("New sample plugin track started");
eventMetadata->setDescription("New track #" + std::to_string(m_trackIndex) + " started");
eventMetadataPacket->addItem(eventMetadata.get());
// Generate index for the next track.
++m_trackIndex;
return eventMetadataPacket;
}
Ptr<ObjectMetadataPacket> DeviceAgent::detectionsToObjectMetadataPacket(
const DetectionList& detections,
int64_t timestampUs)
{
if (detections.empty())
return nullptr;
const auto objectMetadataPacket = makePtr<ObjectMetadataPacket>();
for (const std::shared_ptr<Detection>& detection: detections)
{
const auto objectMetadata = makePtr<ObjectMetadata>();
objectMetadata->setBoundingBox(detection->boundingBox);
objectMetadata->setConfidence(detection->confidence);
objectMetadata->setTrackId(detection->trackId);
// Convert class label to object metadata type id.
if (detection->classLabel == "person")
objectMetadata->setTypeId(kPersonObjectType);
else if (detection->classLabel == "cat")
objectMetadata->setTypeId(kCatObjectType);
else if (detection->classLabel == "dog")
objectMetadata->setTypeId(kDogObjectType);
// There is no "else", because only the detections with those types are generated.
objectMetadataPacket->addItem(objectMetadata.get());
}
objectMetadataPacket->setTimestampUs(timestampUs);
return objectMetadataPacket;
}
void DeviceAgent::reinitializeObjectTrackerOnFrameSizeChanges(const Frame& frame)
{
const bool frameSizeUnset = m_previousFrameWidth == 0 && m_previousFrameHeight == 0;
if (frameSizeUnset)
{
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
return;
}
const bool frameSizeChanged = frame.width != m_previousFrameWidth ||
frame.height != m_previousFrameHeight;
if (frameSizeChanged)
{
m_objectTracker = std::make_unique<ObjectTracker>();
m_previousFrameWidth = frame.width;
m_previousFrameHeight = frame.height;
}
}
DeviceAgent::MetadataPacketList DeviceAgent::processFrame(
const IUncompressedVideoFrame* videoFrame)
{
const Frame frame(videoFrame, m_frameIndex);
reinitializeObjectTrackerOnFrameSizeChanges(frame);
try
{
DetectionList detections = m_objectDetector->run(frame);
detections = m_objectTracker->run(frame, detections);
const auto& objectMetadataPacket =
detectionsToObjectMetadataPacket(detections, frame.timestampUs);
MetadataPacketList result;
if (objectMetadataPacket)
result.push_back(objectMetadataPacket);
return result;
}
catch (const ObjectDetectionError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Object detection error.",
e.what());
m_terminated = true;
}
catch (const ObjectTrackingError& e)
{
pushPluginDiagnosticEvent(
IPluginDiagnosticEvent::Level::error,
"Object tracking error.",
e.what());
m_terminated = true;
}
return {};
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 8,237
|
C++
|
.cpp
| 227
| 30.497797
| 99
| 0.686238
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,420
|
object_tracker.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step4/src/sample_company/vms_server_plugins/opencv_object_detection/object_tracker.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_tracker.h"
#include <opencv2/core/core.hpp>
#include "exceptions.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace std::string_literals;
using namespace cv;
using namespace cv::detail::tracking::tbm;
using namespace nx::sdk;
using namespace nx::sdk::analytics;
/**
* This function implementation is based on the sample from opencv_contrib repository:
* https://github.com/opencv/opencv_contrib/blob/0a2179b328/modules/tracking/samples/tracking_by_matching.cpp
*/
cv::Ptr<ITrackerByMatching> createTrackerByMatchingWithFastDescriptor()
{
TrackerParams params;
// Real forget delay will be `params.forget_delay * kDetectionFramePeriod`.
params.forget_delay = 75;
cv::Ptr<ITrackerByMatching> tracker = createTrackerByMatching(params);
static const Size kDescriptorFastSize(16, 32);
std::shared_ptr<IImageDescriptor> descriptorFast =
std::make_shared<ResizedImageDescriptor>(
kDescriptorFastSize, InterpolationFlags::INTER_LINEAR);
std::shared_ptr<IDescriptorDistance> distanceFast =
std::make_shared<MatchTemplateDistance>();
tracker->setDescriptorFast(descriptorFast);
tracker->setDistanceFast(distanceFast);
return tracker;
}
//-------------------------------------------------------------------------------------------------
// public
ObjectTracker::ObjectTracker():
m_tracker(createTrackerByMatchingWithFastDescriptor())
{
}
DetectionList ObjectTracker::run(
const Frame& frame,
const DetectionList& detections)
{
try
{
return runImpl(frame, detections);
}
catch (const cv::Exception& e)
{
throw ObjectTrackingError(cvExceptionToStdString(e));
}
catch (const std::exception& e)
{
throw ObjectTrackingError("Error: "s + e.what());
}
}
//-------------------------------------------------------------------------------------------------
// private
DetectionList ObjectTracker::runImpl(
const Frame& frame,
const DetectionList& detections)
{
// Unfortunately the OpenCV tbm module does not support preserving classLabel during tracking.
// See issue: https://github.com/opencv/opencv_contrib/issues/2298
// Therefore, we save information about classLabels in the map from unique id of the detection
// (bounding box + timestamp) to classLabel.
std::map<const CompositeDetectionId, std::string> classLabels;
TrackedObjects detectionsToTrack = convertDetectionsToTrackedObjects(
/*frame*/ frame,
/*detections*/ detections,
/*classLabels*/ &classLabels);
// Perform tracking and extract tracked detections.
m_tracker->process(frame.cvMat, detectionsToTrack, (uint64_t) frame.timestampUs);
const TrackedObjects trackedDetections = m_tracker->trackedDetections();
DetectionList result = convertTrackedObjectsToDetections(
/*frame*/ frame,
/*trackedDetections*/ trackedDetections,
/*classLabels*/ classLabels,
/*idMapper*/ m_idMapper.get());
cleanupIds();
return result;
}
/**
* Cleanup ids of the objects that belong to the forgotten tracks.
*/
void ObjectTracker::cleanupIds()
{
std::set<int64_t> validIds;
for (const auto& track: m_tracker->tracks())
validIds.insert(track.second.first_object.object_id);
m_idMapper->removeAllExcept(validIds);
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 3,620
|
C++
|
.cpp
| 94
| 34.297872
| 109
| 0.701228
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,421
|
object_tracker_utils.cpp
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/opencv_object_detection_analytics_plugin/step4/src/sample_company/vms_server_plugins/opencv_object_detection/object_tracker_utils.cpp
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#include "object_tracker_utils.h"
#include "geometry.h"
namespace sample_company {
namespace vms_server_plugins {
namespace opencv_object_detection {
using namespace cv;
using namespace cv::detail::tracking::tbm;
using namespace nx::sdk;
Uuid IdMapper::get(int64_t id)
{
const auto it = m_map.find(id);
if (it == m_map.end())
{
Uuid result = UuidHelper::randomUuid();
m_map[id] = result;
return result;
}
return it->second;
}
void IdMapper::removeAllExcept(const std::set<int64_t>& idsToKeep)
{
for (auto it = m_map.begin(); it != m_map.end(); )
{
if (idsToKeep.find(it->first) == idsToKeep.end())
it = m_map.erase(it);
else
++it;
}
}
/**
* Convert detections from the plugin format to the format of opencv::detail::tracking::tbm, preserving classLabels.
*/
TrackedObjects convertDetectionsToTrackedObjects(
const Frame& frame,
const DetectionList& detections,
ClassLabelMap* inOutClassLabels)
{
TrackedObjects result;
for (const std::shared_ptr<Detection>& detection: detections)
{
const cv::Rect cvRect = nxRectToCvRect(
detection->boundingBox,
frame.width,
frame.height);
inOutClassLabels->insert(std::make_pair(CompositeDetectionId{
frame.index,
cvRect},
detection->classLabel));
result.push_back(TrackedObject(
cvRect,
detection->confidence,
(int) frame.index,
/*object_id*/ -1)); //< Placeholder, to be filled in ObjectTracker::process().
}
return result;
}
/**
* Convert detection from tbm format to our format, restoring the classLabels.
*/
std::shared_ptr<Detection> convertTrackedObjectToDetection(
const Frame& frame,
const TrackedObject& trackedDetection,
const std::string& classLabel,
IdMapper* idMapper)
{
return std::make_shared<Detection>(Detection{
/*boundingBox*/ cvRectToNxRect(trackedDetection.rect, frame.width, frame.height),
classLabel,
(float) trackedDetection.confidence,
/*trackId*/ idMapper->get(trackedDetection.object_id)});
}
/**
* Convert detections from opencv::detail::tracking::tbm format to the plugin format, restoring classLabels.
*/
DetectionList convertTrackedObjectsToDetections(
const Frame& frame,
const TrackedObjects& trackedDetections,
const ClassLabelMap& classLabels,
IdMapper* idMapper)
{
DetectionList result;
for (const cv::detail::tracking::tbm::TrackedObject& trackedDetection: trackedDetections)
{
const std::string classLabel = classLabels.at({
frame.index,
trackedDetection.rect});
result.push_back(convertTrackedObjectToDetection(
frame,
trackedDetection,
classLabel,
idMapper));
}
return result;
}
DetectionList extractDetectionList(const DetectionInternalList& detectionsInternal)
{
DetectionList result;
for (const std::shared_ptr<DetectionInternal>& detection: detectionsInternal)
result.push_back(detection->detection);
return result;
}
} // namespace opencv_object_detection
} // namespace vms_server_plugins
} // namespace sample_company
| 3,388
|
C++
|
.cpp
| 105
| 26.590476
| 116
| 0.685242
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,422
|
object_tracker_utils.h
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/object_tracker_utils.h
|
//// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#pragma once
#include <opencv_tbm/tracking_by_matching.hpp>
#include "detection.h"
#include "frame.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
/**
* Provides conversion from int ids coming from the tracker to Uuid ids that are needed by the
* Server.
*/
class IdMapper
{
public:
nx::sdk::Uuid get(int64_t id);
void removeAllExcept(const std::set<nx::sdk::Uuid>& idsToKeep);
private:
std::map<int64_t, nx::sdk::Uuid> m_map;
};
struct DetectionInternal
{
DetectionPtr detection;
int64_t cvTrackId;
};
using DetectionInternalPtr = std::shared_ptr<DetectionInternal>;
using DetectionInternalList = std::vector<DetectionInternalPtr>;
DetectionInternalPtr convertTrackedObjectToDetection(
const Frame& frame,
const cv::tbm::TrackedObject& trackedDetection,
IdMapper* idMapper);
DetectionInternalList convertTrackedObjectsToDetections(
const Frame& frame,
const cv::tbm::TrackedObjects& trackedDetections,
IdMapper* idMapper);
cv::tbm::TrackedObjects convertDetectionsToTrackedObjects(
const Frame& frame,
const DetectionList& detections);
DetectionList extractDetectionList(const DetectionInternalList& detectionsInternal);
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 1,391
|
C++
|
.h
| 38
| 33.815789
| 96
| 0.785821
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,423
|
person_tracker.h
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/person_tracker.h
|
// Copyright 2018-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#pragma once
#include <algorithm>
#include <map>
#include <vector>
#include <optional>
#include <filesystem>
#include <opencv2/core/mat.hpp>
#include <opencv_tbm/tracking_by_matching.hpp>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/rect.h>
#include <nx/sdk/helpers/log_utils.h>
#include <nx/sdk/helpers/uuid_helper.h>
#include <nx/sdk/uuid.h>
#include "config.h"
#include "detection.h"
#include "event.h"
#include "frame.h"
#include "geometry.h"
#include "object_tracker_utils.h"
#include "person_re_id.h"
#include "track.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
using namespace std::chrono_literals;
class PersonTracker
{
public:
struct Result
{
BestShotList bestShots;
DetectionList detections;
EventList events;
TrackList tracks;
};
public:
PersonTracker(
std::filesystem::path modelDir,
nx::sdk::LogUtils logUtils,
const std::shared_ptr<const Config>& config);
void setConfig(const std::shared_ptr<const Config> config, bool updateFpsOnly = false);
Result run(const Frame& frame, const DetectionList& detections);
private:
class PersonReIdDescriptor: public cv::tbm::IImageDescriptor
{
public:
PersonReIdDescriptor(
std::filesystem::path modelDir,
nx::sdk::LogUtils logUtils,
const std::shared_ptr<const Config> config);
virtual cv::Size size() const override;
virtual void compute(const cv::Mat &mat, CV_OUT cv::Mat& descriptor) override;
virtual void compute(
const std::vector<cv::Mat> &mats,
CV_OUT std::vector<cv::Mat>& descriptors) override;
private:
std::shared_ptr<PersonReId> m_personReId;
};
private:
Result runImpl(const Frame& frame, const DetectionList& detections);
std::shared_ptr<Track> getOrCreateTrack(const nx::sdk::Uuid& trackId);
void copyDetectionsHistoryToTrack(const Frame& frame, int64_t cvTrackId, Track* track) const;
void processDetection(const Frame& frame, const std::shared_ptr<DetectionInternal>& detection);
void processDetections(const Frame& frame, const DetectionInternalList& detectionsInternal);
EventList generateEvents();
void cleanupIds();
void cleanupTracks();
void cleanup();
cv::Ptr<cv::tbm::ITrackerByMatching> createPersonTrackerByMatching(int forgetDelay);
void finishTracks();
TrackList extractTracks() const;
BestShotList extractBestShots() const;
private:
nx::sdk::LogUtils logUtils;
std::filesystem::path m_modelDir;
std::shared_ptr<PersonReIdDescriptor> m_personReIdDescriptor;
cv::Ptr<cv::tbm::ITrackerByMatching> m_tracker;
std::unique_ptr<IdMapper> m_idMapper = std::make_unique<IdMapper>();
TrackMap m_tracks;
std::optional<int64_t> m_personDetectionStartTimestampUs;
};
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 3,074
|
C++
|
.h
| 81
| 33.246914
| 99
| 0.726539
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,424
|
utils.h
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/utils.h
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#pragma once
#include <string>
#include <vector>
#include <inference_engine.hpp>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
bool toBool(std::string str);
int getTensorWidth(const InferenceEngine::TensorDesc& description);
int getTensorHeight(const InferenceEngine::TensorDesc& description);
/**
* Sets the image data stored in cv::Mat object to a given Blob object.
*
* @param image - given cv::Mat object with an image data.
* @param blob - Blob object which to be filled by an image data.
* @param batchIndex - batch index of an image inside of the blob.
*/
template<typename Item>
void matU8ToBlob(const cv::Mat& image, const InferenceEngine::Blob::Ptr& blob, int batchIndex = 0)
{
InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
static constexpr int kWidthIndex = 3;
static constexpr int kHeightIndex = 2;
static constexpr int kChannelIndex = 1;
const int width = (int) blobSize[kWidthIndex];
const int height = (int) blobSize[kHeightIndex];
const int channels = (int) blobSize[kChannelIndex];
Item* const blobData = blob->buffer().as<Item*>();
cv::Mat resizedImage(image);
if (width != image.size().width || height != image.size().height)
cv::resize(image, resizedImage, cv::Size(width, height));
const int batchOffset = batchIndex * width * height * channels;
for (int c = 0; c < channels; ++c)
{
for (int h = 0; h < height; ++h)
{
for (int w = 0; w < width; ++w)
{
blobData[batchOffset + c * width * height + h * width + w] =
resizedImage.at<cv::Vec3b>(h, w)[c];
}
}
}
}
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 1,940
|
C++
|
.h
| 46
| 37.304348
| 98
| 0.679596
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,425
|
roi_processor.h
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/roi_processor.h
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#pragma once
#include <chrono>
#include <limits>
#include <map>
#include <set>
#include <utility>
#include <vector>
#include <optional>
#include <nx/sdk/analytics/helpers/object_metadata.h>
#include <nx/sdk/analytics/rect.h>
#include <nx/sdk/helpers/log_utils.h>
#include <nx/sdk/uuid.h>
#include "config.h"
#include "event.h"
#include "track.h"
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
class RoiProcessor
{
public:
using Result = EventList;
public:
RoiProcessor(nx::sdk::LogUtils logUtils, const std::shared_ptr<const Config> config);
Result run(
const TrackList& tracks,
const EventList& events,
bool analyzeOnlyForDisappearanceInAreaDetectedEvents);
void setConfig(const std::shared_ptr<const Config> config);
private:
enum class AreaState
{
none,
outside,
inside,
entered,
exited,
appeared,
disappeared,
};
class IntervalCalculator
{
public:
void registerTimestamp(int64_t timestampUs);
void pause();
int64_t firstTimestamp() const;
std::chrono::microseconds duration() const;
private:
std::chrono::microseconds m_durationWithoutLastInterval{0};
int64_t m_firstTimestamp = 0;
int64_t m_lastTimestamp = 0;
int64_t m_lastIntervalStartTimestampUs = 0;
};
struct TrackContext
{
struct AreaContext
{
AreaState instantState = AreaState::none;
AreaState filteredState = AreaState::none;
AreaState newFilteredState = AreaState::none;
int64_t newFilteredStateTimestampUs = std::numeric_limits<int64_t>::max();
boost::optional<Point> detectionBoundingBoxCenterBeforeAreaEnter;
IntervalCalculator intervalCalculator;
};
using AreaContextPtr = std::unique_ptr<AreaContext>;
/**
* ROI line is removed when bounding box of detection stops crossing the line.
*/
std::set<RoiLinePtr> linesCrossed;
std::map<RoiAreaPtr, AreaContextPtr> areasContexts;
int detectionIndex = 0;
};
using TrackContextPtr = std::unique_ptr<TrackContext>;
private:
static AreaState newInstantState(
AreaState state,
const Rect& boundingBox,
const RoiAreaPtr& area);
static EventList generateImpulseAreaEvents(
TrackContext::AreaContext& areaContext,
const RoiAreaPtr& area,
const TrackPtr& track);
static EventPtr generateAreaCrossedEvent(
TrackContext::AreaContext& areaContext,
const RoiAreaPtr& area,
const TrackPtr& track,
int detectionIndex);
Result runImpl(
const TrackList& tracks,
const EventList& events,
bool analyzeOnlyForDisappearanceInAreaDetectedEvents);
static bool objectIntersectedArea(const Line& centerMovement, const RoiAreaPtr& area);
void ensureTracksContexts(const TrackList &tracks);
EventList lineCrossedDetection(const TrackList &tracks);
EventList monitorArea(const TrackList &tracks);
EventList generateDisappearanceInAreaAndLoiteringFinishEvents(const EventList& events);
private:
std::shared_ptr<const Config> m_config;
nx::sdk::LogUtils logUtils;
std::map<const nx::sdk::Uuid, TrackContextPtr> m_tracksContexts;
std::optional<int64_t> m_loiteringStartTimestampUs;
};
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 3,608
|
C++
|
.h
| 102
| 29.04902
| 94
| 0.703533
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,540,426
|
openvino_log_utils.h
|
networkoptix_nx_open_integrations/cpp/vms_server_plugins/openvino_object_detection_analytics_plugin/src/lib/openvino_log_utils.h
|
// Copyright 2019-present Network Optix, Inc. Licensed under MPL 2.0: www.mozilla.org/MPL/2.0/
#pragma once
#include <inference_engine.hpp>
#include <nx/sdk/helpers/log_utils.h>
namespace nx::vms_server_plugins::analytics::openvino_object_detection {
class OpenVinoLogUtils
{
public:
OpenVinoLogUtils(nx::sdk::LogUtils logUtils = nx::sdk::LogUtils(true, ""));
void printInferenceEnginePluginVersion(const InferenceEngine::Version& version) const;
protected:
nx::sdk::LogUtils logUtils;
};
} // namespace nx::vms_server_plugins::analytics::openvino_object_detection
| 584
|
C++
|
.h
| 14
| 39.357143
| 94
| 0.779751
|
networkoptix/nx_open_integrations
| 33
| 29
| 1
|
MPL-2.0
|
9/20/2024, 10:45:08 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.